diff --git a/.coderabbit.yaml b/.coderabbit.yaml deleted file mode 100644 index 2220649caa28..000000000000 --- a/.coderabbit.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json -# Disable CodeRabbit auto-review to prevent verbose comments on PRs. -# When enabled: false, CodeRabbit won't attempt reviews and won't post -# "Review skipped" or other automated comments. -reviews: - auto_review: - enabled: false - review_status: false - high_level_summary: false - poem: false - sequence_diagrams: false - changed_files_summary: false - tools: - github-checks: - enabled: false -chat: - art: false - auto_reply: false diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 763c5f27ee6b..000000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,17 +0,0 @@ -# Pull requests concerning the listed files will automatically invite the respective maintainers as reviewers. -# This file is not used for denoting any kind of ownership, but is merely a tool for handling notifications. -# -# Merge permissions are required for maintaining an entry in this file. -# For documentation on this mechanism, see https://help.github.com/articles/about-codeowners/ - -# Default reviewers if nothing else matches -* @edolstra - -# This file -.github/CODEOWNERS @edolstra - -# Documentation of built-in functions -src/libexpr/primops.cc @roberth - -# Libstore layer -/src/libstore @ericson2314 diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index af94c3e9e5bb..08a5851748d4 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,10 +1,9 @@ --- name: Bug report about: Report unexpected or incorrect behaviour -title: '' +title: "" labels: bug -assignees: '' - +assignees: "" --- ## Describe the bug @@ -32,7 +31,9 @@ assignees: '' ## Metadata - + + + ## Additional context @@ -42,13 +43,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) +- [ ] checked [latest Determinate Nix manual] \([source]) - [ ] checked [open bug issues and pull requests] for possible duplicates -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open bug issues and pull requests]: https://github.com/NixOS/nix/labels/bug - ---- - -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index fe9f9dd209d4..b88e10937988 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,10 +1,9 @@ --- name: Feature request about: Suggest a new feature -title: '' +title: "" labels: feature -assignees: '' - +assignees: "" --- ## Is your feature request related to a problem? @@ -27,13 +26,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) -- [ ] checked [open feature issues and pull requests] for possible duplicates - -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open feature issues and pull requests]: https://github.com/NixOS/nix/labels/feature - ---- +- [ ] checked [latest Determinate Nix manual] \([source]) +- [ ] checked [open bug issues and pull requests] for possible duplicates -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/ISSUE_TEMPLATE/installer.md b/.github/ISSUE_TEMPLATE/installer.md index 070e0bd9b25b..430bef971aac 100644 --- a/.github/ISSUE_TEMPLATE/installer.md +++ b/.github/ISSUE_TEMPLATE/installer.md @@ -1,18 +1,17 @@ --- name: Installer issue about: Report problems with installation -title: '' +title: "" labels: installer -assignees: '' - +assignees: "" --- ## Platform - + -- [ ] Linux: - [ ] macOS +- [ ] Linux: - [ ] WSL ## Additional information @@ -35,13 +34,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) -- [ ] checked [open installer issues and pull requests] for possible duplicates - -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open installer issues and pull requests]: https://github.com/NixOS/nix/labels/installer - ---- +- [ ] checked [latest Determinate Nix manual] \([source]) +- [ ] checked [open bug issues and pull requests] for possible duplicates -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/ISSUE_TEMPLATE/missing_documentation.md b/.github/ISSUE_TEMPLATE/missing_documentation.md index 4e05b626d398..fcdd0d20135e 100644 --- a/.github/ISSUE_TEMPLATE/missing_documentation.md +++ b/.github/ISSUE_TEMPLATE/missing_documentation.md @@ -1,10 +1,9 @@ --- name: Missing or incorrect documentation about: Help us improve the reference manual -title: '' +title: "" labels: documentation -assignees: '' - +assignees: "" --- ## Problem @@ -19,13 +18,9 @@ assignees: '' -- [ ] checked [latest Nix manual] \([source]) -- [ ] checked [open documentation issues and pull requests] for possible duplicates - -[latest Nix manual]: https://nix.dev/manual/nix/development/ -[source]: https://github.com/NixOS/nix/tree/master/doc/manual/source -[open documentation issues and pull requests]: https://github.com/NixOS/nix/labels/documentation - ---- +- [ ] checked [latest Determinate Nix manual] \([source]) +- [ ] checked [open bug issues and pull requests] for possible duplicates -Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc). +[latest Determinate Nix manual]: https://manual.determinate.systems/ +[source]: https://github.com/DeterminateSystems/nix-src/tree/main/doc/manual/source +[open bug issues and pull requests]: https://github.com/DeterminateSystems/nix-src/labels/bug diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index c155bf8bfa4f..d3e1f8177364 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,26 +1,3 @@ - - ## Motivation @@ -34,9 +11,3 @@ PR stuck in review? We have two Nix team meetings per week online that are open - ---- - -Add :+1: to [pull requests you find important](https://github.com/NixOS/nix/pulls?q=is%3Aopen+sort%3Areactions-%2B1-desc). - -The Nix maintainer team uses a [GitHub project board](https://github.com/orgs/NixOS/projects/19) to [schedule and track reviews](https://github.com/NixOS/nix/tree/master/maintainers#project-board-protocol). diff --git a/.github/STALE-BOT.md b/.github/STALE-BOT.md index bc0005413f1a..281d0f79a8b7 100644 --- a/.github/STALE-BOT.md +++ b/.github/STALE-BOT.md @@ -2,34 +2,21 @@ - Thanks for your contribution! - To remove the stale label, just leave a new comment. -- _How to find the right people to ping?_ → [`git blame`](https://git-scm.com/docs/git-blame) to the rescue! (or GitHub's history and blame buttons.) -- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on [Matrix - #users:nixos.org](https://matrix.to/#/#users:nixos.org). +- You can always ask for help on [Discord](https://determinate.systems/discord). ## Suggestions for PRs -1. GitHub sometimes doesn't notify people who commented / reviewed a PR previously, when you (force) push commits. If you have addressed the reviews you can [officially ask for a review](https://docs.github.com/en/free-pro-team@latest/github/collaborating-with-issues-and-pull-requests/requesting-a-pull-request-review) from those who commented to you or anyone else. -2. If it is unfinished but you plan to finish it, please mark it as a draft. -3. If you don't expect to work on it any time soon, closing it with a short comment may encourage someone else to pick up your work. -4. To get things rolling again, rebase the PR against the target branch and address valid comments. -5. If you need a review to move forward, ask in [the Discourse thread for PRs that need help](https://discourse.nixos.org/t/prs-in-distress/3604). -6. If all you need is a merge, check the git history to find and [request reviews](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/requesting-a-pull-request-review) from people who usually merge related contributions. +1. If it is unfinished but you plan to finish it, please mark it as a draft. +1. If you don't expect to work on it any time soon, closing it with a short comment may encourage someone else to pick up your work. +1. To get things rolling again, rebase the PR against the target branch and address valid comments. +1. If you need a review to move forward, ask in [Discord](https://determinate.systems/discord). ## Suggestions for issues 1. If it is resolved (either for you personally, or in general), please consider closing it. 2. If this might still be an issue, but you are not interested in promoting its resolution, please consider closing it while encouraging others to take over and reopen an issue if they care enough. -3. If you still have interest in resolving it, try to ping somebody who you believe might have an interest in the topic. Consider discussing the problem in [our Discourse Forum](https://discourse.nixos.org/). -4. As with all open source projects, your best option is to submit a Pull Request that addresses this issue. We :heart: this attitude! +3. If you still have interest in resolving it, try to ping somebody who you believe might have an interest in the topic. Consider discussing the problem in [Discord](https://determinate.systems/discord). **Memorandum on closing issues** Don't be afraid to close an issue that holds valuable information. Closed issues stay in the system for people to search, read, cross-reference, or even reopen--nothing is lost! Closing obsolete issues is an important way to help maintainers focus their time and effort. - -## Useful GitHub search queries - -- [Open PRs with any stale-bot interaction](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+commenter%3Aapp%2Fstale+) -- [Open PRs with any stale-bot interaction and `stale`](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+commenter%3Aapp%2Fstale+label%3A%22stale%22) -- [Open PRs with any stale-bot interaction and NOT `stale`](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+commenter%3Aapp%2Fstale+-label%3A%22stale%22+) -- [Open Issues with any stale-bot interaction](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+commenter%3Aapp%2Fstale+) -- [Open Issues with any stale-bot interaction and `stale`](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+commenter%3Aapp%2Fstale+label%3A%22stale%22+) -- [Open Issues with any stale-bot interaction and NOT `stale`](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+commenter%3Aapp%2Fstale+-label%3A%22stale%22+) diff --git a/.github/release-notes.sh b/.github/release-notes.sh new file mode 100755 index 000000000000..f641e146d2e8 --- /dev/null +++ b/.github/release-notes.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +# SC2002 disables "useless cat" warnings. +# I prefer pipelines that start with an explicit input, and go from there. +# Overly fussy. +# shellcheck disable=SC2002 + +scratch=$(mktemp -d -t tmp.XXXXXXXXXX) +finish() { + rm -rf "$scratch" +} +trap finish EXIT + +DATE=$(date +%Y-%m-%d) +DETERMINATE_NIX_VERSION=$(cat .version-determinate) +TAG_NAME="v${DETERMINATE_NIX_VERSION}" +NIX_VERSION=$(cat .version) +NIX_VERSION_MAJOR_MINOR=$(echo "$NIX_VERSION" | cut -d. -f1,2) +GITHUB_REPOSITORY="${GITHUB_REPOSITORY:-DeterminateSystems/nix-src}" + +gh api "/repos/${GITHUB_REPOSITORY}/releases/generate-notes" \ + -f "tag_name=${TAG_NAME}" > "$scratch/notes.json" + +trim_trailing_newlines() { + local text + text="$(cat)" + echo -n "${text}" +} + +linkify_gh() { + sed \ + -e 's!\(https://github.com/DeterminateSystems/nix-src/\(pull\|issue\)/\([[:digit:]]\+\)\)![DeterminateSystems/nix-src#\3](\1)!' \ + -e 's#\(https://github.com/DeterminateSystems/nix-src/compare/\([^ ]\+\)\)#[\2](\1)#' +} + +( + cat doc/manual/source/release-notes-determinate/changes.md \ + | sed 's/^.*\(\)$/This section lists the differences between upstream Nix '"$NIX_VERSION_MAJOR_MINOR"' and Determinate Nix '"$DETERMINATE_NIX_VERSION"'.\1/' \ + + printf "\n\n" "$DETERMINATE_NIX_VERSION" + cat "$scratch/notes.json" \ + | jq -r .body \ + | grep -v '^#' \ + | grep -v "Full Changelog" \ + | trim_trailing_newlines \ + | sed -e 's/^\* /\n* /' \ + | linkify_gh + echo "" # final newline +) > "$scratch/changes.md" + +( + printf "# Release %s (%s)\n\n" \ + "$DETERMINATE_NIX_VERSION" \ + "$DATE" + printf "* Based on [upstream Nix %s](../release-notes/rl-%s.md).\n\n" \ + "$NIX_VERSION" \ + "$NIX_VERSION_MAJOR_MINOR" + + cat "$scratch/notes.json" | jq -r .body | linkify_gh +) > "$scratch/rl.md" + +( + cat doc/manual/source/SUMMARY.md.in \ + | sed 's/\(\)$/\1\n - [Release '"$DETERMINATE_NIX_VERSION"' ('"$DATE"')](release-notes-determinate\/'"$TAG_NAME"'.md)/' +) > "$scratch/summary.md" + +mv "$scratch/changes.md" doc/manual/source/release-notes-determinate/changes.md +mv "$scratch/rl.md" "doc/manual/source/release-notes-determinate/v${DETERMINATE_NIX_VERSION}.md" +mv "$scratch/summary.md" doc/manual/source/SUMMARY.md.in diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml deleted file mode 100644 index 90dbb9305223..000000000000 --- a/.github/workflows/backport.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: Backport -on: - pull_request_target: - types: [closed, labeled] -permissions: - contents: read -jobs: - backport: - name: Backport Pull Request - permissions: - # for korthout/backport-action - contents: write - pull-requests: write - if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name)) - runs-on: ubuntu-24.04-arm - steps: - - name: Generate GitHub App token - id: generate-token - uses: actions/create-github-app-token@v2 - with: - app-id: ${{ vars.CI_APP_ID }} - private-key: ${{ secrets.CI_APP_PRIVATE_KEY }} - - uses: actions/checkout@v6 - with: - ref: ${{ github.event.pull_request.head.sha }} - # required to find all branches - fetch-depth: 0 - - name: Create backport PRs - uses: korthout/backport-action@c656f5d5851037b2b38fb5db2691a03fa229e3b2 # v4.0.1 - id: backport - with: - # Config README: https://github.com/korthout/backport-action#backport-action - github_token: ${{ steps.generate-token.outputs.token }} - github_workspace: ${{ github.workspace }} - auto_merge_enabled: true - pull_description: |- - Automatic backport to `${target_branch}`, triggered by a label in #${pull_number}. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000000..b43a8067d79c --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,274 @@ +on: + workflow_call: + inputs: + system: + required: true + type: string + runner: + required: true + type: string + runner_for_virt: + required: true + type: string + runner_small: + required: true + type: string + if: + required: false + default: true + type: boolean + run_tests: + required: false + default: true + type: boolean + run_vm_tests: + required: false + default: false + type: boolean + run_regression_tests: + required: false + default: false + type: boolean + publish_manual: + required: false + default: false + type: boolean + secrets: + manual_netlify_auth_token: + required: false + manual_netlify_site_id: + required: false + +jobs: + build: + if: ${{ inputs.if }} + strategy: + fail-fast: false + runs-on: ${{ inputs.runner }} + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: nix build .#packages.${{ inputs.system }}.default .#packages.${{ inputs.system }}.binaryTarball --no-link -L + - run: nix build .#packages.${{ inputs.system }}.binaryTarball --out-link tarball + - run: nix build .#packages.${{ inputs.system }}.nix-cli-static --no-link -L + - uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.system }} + path: ./tarball/*.xz + + test: + if: ${{ inputs.if && inputs.run_tests}} + needs: build + strategy: + fail-fast: false + runs-on: ${{ inputs.runner }} + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: nix flake check -L --system ${{ inputs.system }} + + vm_tests_smoke: + if: inputs.run_vm_tests && github.event_name != 'merge_group' + needs: build + runs-on: ${{ inputs.runner_for_virt }} + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: | + nix build -L \ + .#hydraJobs.tests.functional_user \ + .#hydraJobs.tests.githubFlakes \ + .#hydraJobs.tests.nix-docker \ + .#hydraJobs.tests.tarballFlakes \ + ; + + vm_tests_all: + if: inputs.run_vm_tests && github.event_name == 'merge_group' + needs: build + runs-on: ${{ inputs.runner_for_virt }} + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - run: | + cmd() { + nix build -L --keep-going --timeout 600 \ + $(nix flake show --json \ + | jq -r ' + .hydraJobs.tests + | with_entries(select(.value.type == "derivation")) + | keys[] + | ".#hydraJobs.tests." + .') + } + + if ! cmd; then + echo "failed, retrying once ..." + printf "\n\n\n\n\n\n\n\n" + cmd + fi + + flake_regressions: + if: | + (inputs.run_regression_tests && github.event_name == 'merge_group') + || ( + inputs.run_regression_tests + && github.event.pull_request.head.repo.full_name == 'DeterminateSystems/nix-src' + && ( + (github.event.action == 'labeled' && github.event.label.name == 'flake-regression-test') + || (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'flake-regression-test')) + ) + ) + needs: build + runs-on: ${{ inputs.runner }} + strategy: + matrix: + nix_config: + - "lazy-trees = true" + - "lazy-trees = false" + - "eval-cores = 24" + glob: + - "[0]*" + - "[1]*" + - "[2]*" + - "[3]*" + - "[4]*" + - "[5]*" + - "[6]*" + - "[7]*" + - "[8]*" + - "[9]*" + - "[a]*" + - "[b]*" + - "[c]*" + - "[d]*" + - "[e]*" + - "[f]*" + - "[g]*" + - "[h]*" + - "[i]*" + - "[j]*" + - "[k]*" + - "[l]*" + - "[m]*" + - "[n]*" + - "[o]*" + - "[p]*" + - "[q]*" + - "[r]*" + - "[s]*" + - "[t]*" + - "[u]*" + - "[v]*" + - "[w]*" + - "[x]*" + - "[y]*" + - "[z]*" + + steps: + - name: Checkout nix + uses: actions/checkout@v4 + - name: Checkout flake-regressions + uses: actions/checkout@v4 + with: + repository: NixOS/flake-regressions + path: flake-regressions + - name: Checkout flake-regressions-data + uses: actions/checkout@v4 + with: + repository: NixOS/flake-regressions-data + path: flake-regressions/tests + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - name: Run flake regression tests + env: + #PARALLEL: ${{ !contains(matrix.nix_config, 'eval-cores') && '-P 50%' || '-P 1' }} + PARALLEL: '-P 1' + FLAKE_REGRESSION_GLOB: ${{ matrix.glob }} + NIX_CONFIG: ${{ matrix.nix_config }} + PREFETCH: "1" + USE_NIX_FLAKE_SHOW: "1" + run: | + set -x + echo "PARALLEL: $PARALLEL" + echo "NIX_CONFIG: $NIX_CONFIG" + if [ ! -z "${NSC_CACHE_PATH:-}" ]; then + mkdir -p "${NSC_CACHE_PATH}/nix/xdg-cache" + export XDG_CACHE_HOME="${NSC_CACHE_PATH}/nix/xdg-cache" + fi + nix build -L --out-link ./new-nix + export PATH=$(pwd)/new-nix/bin:$PATH + [[ $(type -p nix) = $(pwd)/new-nix/bin/nix ]] + + nix config show lazy-trees + nix config show eval-cores + lscpu + nproc + + if ! flake-regressions/eval-all.sh; then + echo "Some failed, trying again" + printf "\n\n\n\n\n\n\n\n" + NIX_REMOTE=/tmp/nix flake-regressions/eval-all.sh + fi + + manual: + if: github.event_name != 'merge_group' + needs: build + runs-on: ${{ inputs.runner_small }} + permissions: + id-token: "write" + contents: "read" + pull-requests: "write" + statuses: "write" + deployments: "write" + steps: + - name: Checkout nix + uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-cache-action@main + - name: Build manual + if: inputs.system == 'x86_64-linux' + run: nix build .#hydraJobs.manual + - uses: nwtgck/actions-netlify@v3.0 + if: inputs.publish_manual && inputs.system == 'x86_64-linux' + with: + publish-dir: "./result/share/doc/nix/manual" + production-branch: main + github-token: ${{ secrets.GITHUB_TOKEN }} + deploy-message: "Deploy from GitHub Actions" + # NOTE(cole-h): We have a perpetual PR displaying our changes against upstream open, but + # its conversation is locked, so this PR comment can never be posted. + # https://github.com/DeterminateSystems/nix-src/pull/165 + enable-pull-request-comment: ${{ github.event.pull_request.number != 165 }} + enable-commit-comment: true + enable-commit-status: true + overwrites-pull-request-comment: true + env: + NETLIFY_AUTH_TOKEN: ${{ secrets.manual_netlify_auth_token }} + NETLIFY_SITE_ID: ${{ secrets.manual_netlify_site_id }} + + success: + needs: + - build + - test + - vm_tests_smoke + - vm_tests_all + - flake_regressions + - manual + if: ${{ always() }} + runs-on: ubuntu-latest + steps: + - run: "true" + - run: | + echo "A dependent in the build matrix failed:" + echo "$needs" + exit 1 + env: + needs: ${{ toJSON(needs) }} + if: | + contains(needs.*.result, 'failure') || + contains(needs.*.result, 'cancelled') diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2347bf9107d1..08000ac4c871 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,266 +2,158 @@ name: "CI" on: pull_request: - merge_group: push: branches: + # NOTE: make sure any branches here are also valid directory names, + # otherwise creating the directory and uploading to s3 will fail + - main - master - workflow_dispatch: - inputs: - dogfood: - description: 'Use dogfood Nix build' - required: false - default: true - type: boolean - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true + merge_group: + release: + types: + - published -permissions: read-all +permissions: + id-token: "write" + contents: "read" + pull-requests: "write" + statuses: "write" + deployments: "write" jobs: eval: - runs-on: ubuntu-24.04 + runs-on: UbuntuLatest32Cores128G steps: - - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - uses: ./.github/actions/install-nix-action - with: - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: - experimental-features = nix-command flakes - github_token: ${{ secrets.GITHUB_TOKEN }} - use_cache: false - - run: nix flake show --all-systems --json - - pre-commit-checks: - name: pre-commit checks - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v6 - - uses: ./.github/actions/install-nix-action + - uses: actions/checkout@v4 with: - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: experimental-features = nix-command flakes - github_token: ${{ secrets.GITHUB_TOKEN }} - - run: ./ci/gha/tests/pre-commit-checks + fetch-depth: 0 + - uses: DeterminateSystems/determinate-nix-action@main + - run: nix flake show --all-systems --json + + build_x86_64-linux: + uses: ./.github/workflows/build.yml + with: + system: x86_64-linux + runner: namespace-profile-linuxamd32c64g-cache + runner_for_virt: UbuntuLatest32Cores128G + runner_small: ubuntu-latest + run_tests: true + run_vm_tests: true + run_regression_tests: true + publish_manual: true + secrets: + manual_netlify_auth_token: ${{ secrets.NETLIFY_AUTH_TOKEN }} + manual_netlify_site_id: ${{ secrets.NETLIFY_SITE_ID }} + + build_aarch64-linux: + uses: ./.github/workflows/build.yml + with: + if: ${{ + github.event_name != 'pull_request' + || ( + github.event.pull_request.head.repo.full_name == 'DeterminateSystems/nix-src' + && ( + (github.event.action == 'labeled' && github.event.label.name == 'upload to s3') + || (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'upload to s3')) + ) + ) + }} + system: aarch64-linux + runner: UbuntuLatest32Cores128GArm + runner_for_virt: UbuntuLatest32Cores128GArm + runner_small: UbuntuLatest32Cores128GArm - basic-checks: - name: aggregate basic checks + build_aarch64-darwin: + uses: ./.github/workflows/build.yml + with: + system: aarch64-darwin + runner: namespace-profile-mac-m2-12c28g + runner_for_virt: namespace-profile-mac-m2-12c28g + runner_small: macos-latest-xlarge + + success: + runs-on: ubuntu-latest + needs: + - eval + - build_x86_64-linux + - build_aarch64-linux + - build_aarch64-darwin if: ${{ always() }} - runs-on: ubuntu-24.04 - needs: [pre-commit-checks, eval] steps: - - name: Exit with any errors - if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} - run: | + - run: "true" + - run: | + echo "A dependent in the build matrix failed:" + echo "$needs" exit 1 + env: + needs: ${{ toJSON(needs) }} + if: | + contains(needs.*.result, 'failure') || + contains(needs.*.result, 'cancelled') - tests: - needs: basic-checks - strategy: - fail-fast: false - matrix: - include: - - scenario: on ubuntu - runs-on: ubuntu-24.04 - os: linux - instrumented: false - primary: true - stdenv: stdenv - - scenario: on macos - runs-on: macos-14 - os: darwin - instrumented: false - primary: true - stdenv: stdenv - - scenario: on ubuntu (with sanitizers / coverage) - runs-on: ubuntu-24.04 - os: linux - instrumented: true - primary: false - stdenv: clangStdenv - name: tests ${{ matrix.scenario }} - runs-on: ${{ matrix.runs-on }} - timeout-minutes: 60 - steps: - - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - uses: ./.github/actions/install-nix-action - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - # The sandbox would otherwise be disabled by default on Darwin - extra_nix_config: "sandbox = true" - # Since ubuntu 22.30, unprivileged usernamespaces are no longer allowed to map to the root user: - # https://ubuntu.com/blog/ubuntu-23-10-restricted-unprivileged-user-namespaces - - run: sudo sysctl -w kernel.apparmor_restrict_unprivileged_userns=0 - if: matrix.os == 'linux' - - name: Run component tests - run: | - nix build --file ci/gha/tests/wrapper.nix componentTests -L \ - --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" - - name: Run VM tests - run: | - nix build --file ci/gha/tests/wrapper.nix vmTests -L \ - --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" - if: ${{ matrix.os == 'linux' }} - - name: Run flake checks and prepare the installer tarball - run: | - ci/gha/tests/build-checks - ci/gha/tests/prepare-installer-for-github-actions - if: ${{ matrix.primary }} - - name: Collect code coverage - run: | - nix build --file ci/gha/tests/wrapper.nix codeCoverage.coverageReports -L \ - --arg withInstrumentation ${{ matrix.instrumented }} \ - --argstr stdenv "${{ matrix.stdenv }}" \ - --out-link coverage-reports - cat coverage-reports/index.txt >> $GITHUB_STEP_SUMMARY - if: ${{ matrix.instrumented }} - - name: Upload coverage reports - uses: actions/upload-artifact@v5 - with: - name: coverage-reports - path: coverage-reports/ - if: ${{ matrix.instrumented }} - - name: Upload installer tarball - uses: actions/upload-artifact@v5 - with: - name: installer-${{matrix.os}} - path: out/* - if: ${{ matrix.primary }} - - installer_test: - needs: [tests] - strategy: - fail-fast: false - matrix: - include: - - scenario: on ubuntu - runs-on: ubuntu-24.04 - os: linux - experimental-installer: false - - scenario: on macos - runs-on: macos-14 - os: darwin - experimental-installer: false - - scenario: on ubuntu (experimental) - runs-on: ubuntu-24.04 - os: linux - experimental-installer: true - - scenario: on macos (experimental) - runs-on: macos-14 - os: darwin - experimental-installer: true - name: installer test ${{ matrix.scenario }} - runs-on: ${{ matrix.runs-on }} - steps: - - uses: actions/checkout@v6 - - name: Download installer tarball - uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 - with: - name: installer-${{matrix.os}} - path: out - - name: Looking up the installer tarball URL - id: installer-tarball-url - run: | - echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT" - TARBALL_PATH="$(find "$GITHUB_WORKSPACE/out" -name 'nix*.tar.xz' -print | head -n 1)" - echo "tarball-path=file://$TARBALL_PATH" >> "$GITHUB_OUTPUT" - - uses: cachix/install-nix-action@0b0e072294b088b73964f1d72dfdac0951439dbd # v31.8.4 - if: ${{ !matrix.experimental-installer }} - with: - install_url: ${{ format('{0}/install', steps.installer-tarball-url.outputs.installer-url) }} - install_options: ${{ format('--tarball-url-prefix {0}', steps.installer-tarball-url.outputs.installer-url) }} - - uses: ./.github/actions/install-nix-action - if: ${{ matrix.experimental-installer }} - with: - dogfood: false - experimental-installer: true - tarball_url: ${{ steps.installer-tarball-url.outputs.tarball-path }} - github_token: ${{ secrets.GITHUB_TOKEN }} - - run: sudo apt install fish zsh - if: matrix.os == 'linux' - - run: brew install fish - if: matrix.os == 'darwin' - - run: exec bash -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec sh -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec zsh -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec fish -c "nix-instantiate -E 'builtins.currentTime' --eval" - - run: exec bash -c "nix-channel --add https://releases.nixos.org/nixos/unstable/nixos-23.05pre466020.60c1d71f2ba nixpkgs" - - run: exec bash -c "nix-channel --update && nix-env -iA nixpkgs.hello && hello" + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main - docker_push_image: - name: Push docker image to DockerHub and GHCR - needs: [flake_regressions, installer_test] - if: github.event_name == 'push' && github.ref_name == 'master' - uses: ./.github/workflows/docker-push.yml - with: - ref: ${{ github.sha }} - is_master: true - permissions: - contents: read - packages: write - secrets: - DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} - DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Create artifacts directory + run: mkdir -p ./artifacts - flake_regressions: - needs: tests - runs-on: ubuntu-24.04 - steps: - - name: Checkout nix - uses: actions/checkout@v6 - - name: Checkout flake-regressions - uses: actions/checkout@v6 - with: - repository: NixOS/flake-regressions - path: flake-regressions - - name: Checkout flake-regressions-data - uses: actions/checkout@v6 + - name: Fetch artifacts + uses: actions/download-artifact@v4 with: - repository: NixOS/flake-regressions-data - path: flake-regressions/tests - - name: Download installer tarball - uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 - with: - name: installer-linux - path: out - - name: Looking up the installer tarball URL - id: installer-tarball-url + path: downloaded + - name: Move downloaded artifacts to artifacts directory + run: | + for dir in ./downloaded/*; do + arch="$(basename "$dir")" + mv "$dir"/*.xz ./artifacts/"${arch}" + done + + - name: Build fallback-paths.nix + if: ${{ + github.event_name != 'pull_request' + || ( + github.event.pull_request.head.repo.full_name == 'DeterminateSystems/nix-src' + && ( + (github.event.action == 'labeled' && github.event.label.name == 'upload to s3') + || (github.event.action != 'labeled' && contains(github.event.pull_request.labels.*.name, 'upload to s3')) + ) + ) + }} run: | - echo "installer-url=file://$GITHUB_WORKSPACE/out" >> "$GITHUB_OUTPUT" - - uses: cachix/install-nix-action@4e002c8ec80594ecd40e759629461e26c8abed15 # v31.9.0 + nix build .#fallbackPathsNix --out-link fallback + cat fallback > ./artifacts/fallback-paths.nix + + - uses: DeterminateSystems/push-artifact-ids@main with: - install_url: ${{ format('{0}/install', steps.installer-tarball-url.outputs.installer-url) }} - install_options: ${{ format('--tarball-url-prefix {0}', steps.installer-tarball-url.outputs.installer-url) }} - - name: Run flake regressions tests - run: MAX_FLAKES=25 flake-regressions/eval-all.sh + s3_upload_role: ${{ secrets.AWS_S3_UPLOAD_ROLE_ARN }} + bucket: ${{ secrets.AWS_S3_UPLOAD_BUCKET_NAME }} + directory: ./artifacts + ids_project_name: determinate-nix + ids_binary_prefix: determinate-nix + skip_acl: true + allowed_branches: '["main"]' - profile_build: - needs: tests - runs-on: ubuntu-24.04 - timeout-minutes: 60 - if: >- - github.event_name == 'push' && - github.ref_name == 'master' + publish: + needs: + - success + if: (!github.repository.fork && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) || startsWith(github.ref, 'refs/tags/'))) + environment: ${{ github.event_name == 'release' && 'production' || '' }} + runs-on: ubuntu-latest + permissions: + contents: write + id-token: write steps: - - uses: actions/checkout@v6 - with: - fetch-depth: 0 - - uses: ./.github/actions/install-nix-action - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - dogfood: ${{ github.event_name == 'workflow_dispatch' && inputs.dogfood || github.event_name != 'workflow_dispatch' }} - extra_nix_config: | - experimental-features = flakes nix-command ca-derivations impure-derivations - max-jobs = 1 - - run: | - nix build -L --file ./ci/gha/profile-build buildTimeReport --out-link build-time-report.md - cat build-time-report.md >> $GITHUB_STEP_SUMMARY + - uses: actions/checkout@v4 + - uses: DeterminateSystems/determinate-nix-action@main + - uses: DeterminateSystems/flakehub-push@main + with: + rolling: ${{ github.ref == format('refs/heads/{0}', github.event.repository.default_branch) }} + visibility: "public" + tag: "${{ github.ref_name }}" + - name: Update the release notes + if: startsWith(github.ref, 'refs/tags/') + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TAG_NAME: ${{ github.ref_name }} + run: | + gh release edit "$TAG_NAME" --notes-file doc/manual/source/release-notes-determinate/"$TAG_NAME".md || true diff --git a/.github/workflows/docker-push.yml b/.github/workflows/docker-push.yml deleted file mode 100644 index c4ccd1fe0d0f..000000000000 --- a/.github/workflows/docker-push.yml +++ /dev/null @@ -1,101 +0,0 @@ -name: "Push Docker Image" - -on: - workflow_call: - inputs: - ref: - description: "Git ref to build the docker image from" - required: true - type: string - is_master: - description: "Whether run from master branch" - required: true - type: boolean - secrets: - DOCKERHUB_USERNAME: - required: true - DOCKERHUB_TOKEN: - required: true - -permissions: {} - -jobs: - # Steps to test CI automation in your own fork. - # 1. Sign-up for https://hub.docker.com/ - # 2. Store your dockerhub username as DOCKERHUB_USERNAME in "Repository secrets" of your fork repository settings (https://github.com/$githubuser/nix/settings/secrets/actions) - # 3. Create an access token in https://hub.docker.com/settings/security and store it as DOCKERHUB_TOKEN in "Repository secrets" of your fork - check_secrets: - permissions: - contents: none - name: Check presence of secrets - runs-on: ubuntu-24.04 - outputs: - docker: ${{ steps.secret.outputs.docker }} - steps: - - name: Check for DockerHub secrets - id: secret - env: - _DOCKER_SECRETS: ${{ secrets.DOCKERHUB_USERNAME }}${{ secrets.DOCKERHUB_TOKEN }} - run: | - echo "docker=${{ env._DOCKER_SECRETS != '' }}" >> $GITHUB_OUTPUT - - push: - name: Push docker image to DockerHub and GHCR - needs: [check_secrets] - permissions: - contents: read - packages: write - if: needs.check_secrets.outputs.docker == 'true' - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - ref: ${{ inputs.ref }} - - uses: ./.github/actions/install-nix-action - with: - dogfood: false - extra_nix_config: | - experimental-features = flakes nix-command - - run: echo NIX_VERSION="$(nix eval .\#nix.version | tr -d \")" >> $GITHUB_ENV - - run: nix build .#dockerImage -L - - run: docker load -i ./result/image.tar.gz - # We'll deploy the newly built image to both Docker Hub and Github Container Registry. - # - # Push to Docker Hub first - - name: Login to Docker Hub - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Push to Docker Hub - env: - IS_MASTER: ${{ inputs.is_master }} - DOCKERHUB_REPO: ${{ secrets.DOCKERHUB_USERNAME }}/nix - run: | - docker tag nix:$NIX_VERSION $DOCKERHUB_REPO:$NIX_VERSION - docker push $DOCKERHUB_REPO:$NIX_VERSION - if [ "$IS_MASTER" = "true" ]; then - docker tag nix:$NIX_VERSION $DOCKERHUB_REPO:master - docker push $DOCKERHUB_REPO:master - fi - # Push to GitHub Container Registry as well - - name: Login to GitHub Container Registry - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Push to GHCR - env: - IS_MASTER: ${{ inputs.is_master }} - run: | - IMAGE_ID=ghcr.io/${{ github.repository_owner }}/nix - IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') - - docker tag nix:$NIX_VERSION $IMAGE_ID:$NIX_VERSION - docker push $IMAGE_ID:$NIX_VERSION - if [ "$IS_MASTER" = "true" ]; then - docker tag nix:$NIX_VERSION $IMAGE_ID:master - docker push $IMAGE_ID:master - fi diff --git a/.github/workflows/propose-release.yml b/.github/workflows/propose-release.yml new file mode 100644 index 000000000000..ea01e4b7afec --- /dev/null +++ b/.github/workflows/propose-release.yml @@ -0,0 +1,32 @@ +on: + workflow_dispatch: + inputs: + reference-id: + type: string + required: true + version: + type: string + required: true + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +jobs: + propose-release: + uses: DeterminateSystems/propose-release/.github/workflows/workflow.yml@main + permissions: + id-token: write + contents: write + pull-requests: write + with: + update-flake: false + reference-id: ${{ inputs.reference-id }} + version: ${{ inputs.version }} + extra-commands-early: | + echo ${{ inputs.version }} > .version-determinate + git add .version-determinate + git commit -m "Set .version-determinate to ${{ inputs.version }}" || true + ./.github/release-notes.sh + git add doc + git commit -m "Generate release notes for ${{ inputs.version }}" || true diff --git a/.github/workflows/upload-release.yml b/.github/workflows/upload-release.yml deleted file mode 100644 index 82ce0c40b74d..000000000000 --- a/.github/workflows/upload-release.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: Upload Release -on: - workflow_dispatch: - inputs: - eval_id: - description: "Hydra evaluation ID" - required: true - type: number - is_latest: - description: "Mark as latest release" - required: false - type: boolean - default: false -permissions: - contents: read - id-token: write - packages: write -jobs: - release: - runs-on: ubuntu-24.04 - environment: releases - steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - uses: ./.github/actions/install-nix-action - with: - dogfood: false # Use stable version - use_cache: false # Don't want any cache injection shenanigans - extra_nix_config: | - experimental-features = nix-command flakes - - name: Set NIX_PATH from flake input - run: | - NIXPKGS_PATH=$(nix build --inputs-from .# nixpkgs#path --print-out-paths --no-link) - # Shebangs with perl have issues. Pin nixpkgs this way. nix shell should maybe - # get the same uberhack that nix-shell has to support it. - echo "NIX_PATH=nixpkgs=$NIXPKGS_PATH" >> "$GITHUB_ENV" - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1 - with: - role-to-assume: "arn:aws:iam::080433136561:role/nix-release" - role-session-name: nix-release-oidc-${{ github.run_id }} - aws-region: eu-west-1 - - name: Login to Docker Hub - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Login to GitHub Container Registry - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Upload release - run: | - ./maintainers/upload-release.pl \ - ${{ inputs.eval_id }} \ - --skip-git - env: - IS_LATEST: ${{ inputs.is_latest && '1' || '' }} - - name: Push to GHCR - run: | - DOCKER_OWNER="ghcr.io/$(echo '${{ github.repository_owner }}' | tr '[A-Z]' '[a-z]')/nix" - ./maintainers/upload-release.pl \ - ${{ inputs.eval_id }} \ - --skip-git \ - --skip-s3 \ - --docker-owner "$DOCKER_OWNER" - env: - IS_LATEST: ${{ inputs.is_latest && '1' || '' }} diff --git a/.version-determinate b/.version-determinate new file mode 100644 index 000000000000..3f67e25cea13 --- /dev/null +++ b/.version-determinate @@ -0,0 +1 @@ +3.17.0 diff --git a/README.md b/README.md index 02498944cdb7..c5cbcbed21bb 100644 --- a/README.md +++ b/README.md @@ -1,38 +1,111 @@ -# Nix +

+ +

+

+  Discord  +  Bluesky  +  Mastodon  +  Twitter  +  LinkedIn  +

-[![Open Collective supporters](https://opencollective.com/nixos/tiers/supporter/badge.svg?label=Supporters&color=brightgreen)](https://opencollective.com/nixos) -[![CI](https://github.com/NixOS/nix/workflows/CI/badge.svg)](https://github.com/NixOS/nix/actions/workflows/ci.yml) +# The Determinate Nix CLI -Nix is a powerful package manager for Linux and other Unix systems that makes package -management reliable and reproducible. Please refer to the [Nix manual](https://nix.dev/reference/nix-manual) -for more details. +[![CI](https://github.com/DeterminateSystems/nix-src/workflows/CI/badge.svg)](https://github.com/DeterminateSystems/nix-src/actions/workflows/ci.yml) -## Installation and first steps +**Nix** is a powerful [language], [package manager][package-management], and [build tool][cli] for [macOS](#macos), [Linux](#linux), and other Unix systems. +It enables you to create fully reproducible [development environments][envs], to build [packages] in sandboxed environments, to build entire Linux systems using [NixOS], and much more. -Visit [nix.dev](https://nix.dev) for [installation instructions](https://nix.dev/tutorials/install-nix) and [beginner tutorials](https://nix.dev/tutorials/first-steps). +[**Determinate Nix**][det-nix] is a downstream distribution of [Nix][upstream] created and maintained by [Determinate Systems][detsys]. +It has two components: -Full reference documentation can be found in the [Nix manual](https://nix.dev/reference/nix-manual). +- The Determinate Nix CLI, a distribution of the Nix CLI built from this repository. + It's based on the [upstream Nix CLI][upstream] and continuously rebased against it, but adds a wide variety of [features] and [improvements][changelog]. +- [Determinate Nixd][dnixd] is a useful daemon for Linux and macOS that handles vital tasks like configuration and enterprise certificate management. -## Building and developing +Determinate Nix is built on SOC-2-Type-II-compliant infrastructure using [Determinate Secure Packages][secure-packages], released via a carefully orchestrated process, and, for Determinate Systems customers, backed by formal security response SLAs that meet stringent compliance standards. -Follow instructions in the Nix reference manual to [set up a development environment and build Nix from source](https://nix.dev/manual/nix/development/development/building.html). +> [!NOTE] +> Determinate Nix, by definition, consists of _both_ the components listed above. +> While it's possible to use the code in this repository to run just our downstream Nix CLI, we do _not_ officially support this experience and provide none of the guarantees or SLAs that we provide for Determinate Nix proper. -## Contributing +Determinate Nix is part of the [Determinate platform][determinate], which also includes [FlakeHub], a secure flake repository with features like [FlakeHub Cache][cache], [private flakes][private-flakes], and [semantic versioning][semver] (SemVer) for [flakes]. + +## Installing Determinate Nix + +You can install Determinate Nix on [macOS](#macos), non-NixOS [Linux](#linux) and WSL, and [NixOS](#nixos). + +### macOS + +On macOS, we recommend using the graphical installer from Determinate Systems. +Click [here][gui] to download and run it. + +### Linux + +On Linux, including Windows Subsystem for Linux (WSL), we recommend installing Determinate Nix using [Determinate Nix Installer][installer]: + +```shell +curl -fsSL https://install.determinate.systems/nix | sh -s -- install +``` -Check the [contributing guide](./CONTRIBUTING.md) if you want to get involved with developing Nix. +### NixOS -## Additional resources +On [NixOS], we recommend following our [dedicated installation guide][nixos-install]. +We also provide both [Amazon Machine Images][amis] (AMIs) and [ISOs] for using Determinate on NixOS. -Nix was created by Eelco Dolstra and developed as the subject of his PhD thesis [The Purely Functional Software Deployment Model](https://edolstra.github.io/pubs/phd-thesis.pdf), published 2006. -Today, a world-wide developer community contributes to Nix and the ecosystem that has grown around it. +## Other resources -- [The Nix, Nixpkgs, NixOS Community on nixos.org](https://nixos.org/) -- [Official documentation on nix.dev](https://nix.dev) -- [Nixpkgs](https://github.com/NixOS/nixpkgs) is [the largest, most up-to-date free software repository in the world](https://repology.org/repositories/graphs) -- [NixOS](https://github.com/NixOS/nixpkgs/tree/master/nixos) is a Linux distribution that can be configured fully declaratively -- [Discourse](https://discourse.nixos.org/) -- Matrix: [#users:nixos.org](https://matrix.to/#/#users:nixos.org) for user support and [#nix-dev:nixos.org](https://matrix.to/#/#nix-dev:nixos.org) for development +Nix was created by [Eelco Dolstra][eelco] and developed as the subject of his 2006 PhD thesis, [The Purely Functional Software Deployment Model][thesis]. +Today, a worldwide developer community contributes to Nix and the ecosystem that has grown around it. + +- [Zero to Nix][z2n], Determinate Systems' guide to Nix and [flakes] for beginners +- [Nixpkgs], a collection of well over 100,000 software packages that you can build and manage using Nix +- [NixOS] is a Linux distribution that can be configured fully declaratively +- The Nix, Nixpkgs, and NixOS community on [nixos.org][website] + +## Reference + +The primary documentation for Determinate and Determinate Nix is available at [docs.determinate.systems][determinate]. +For deeply technical reference material, see the [Determinate Nix manual][manual] which is based on the upstream Nix manual. ## License -Nix is released under the [LGPL v2.1](./COPYING). +[Upstream Nix][upstream] is released under the [LGPL v2.1][license] license. +[Determinate Nix][det-nix] is also released under LGPL v2.1 in accordance with the terms of the upstream license. + +## Contributing + +Check the [contributing guide][contributing] if you want to get involved with developing Nix. + +[amis]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html +[cache]: https://docs.determinate.systems/flakehub/cache +[changelog]: https://determinate.systems/blog/categories/changelog +[cli]: https://manual.determinate.systems/command-ref/new-cli/nix.html +[contributing]: ./CONTRIBUTING.md +[det-nix]: https://docs.determinate.systems/determinate-nix +[determinate]: https://docs.determinate.systems +[detsys]: https://determinate.systems +[dnixd]: https://docs.determinate.systems/determinate-nix#determinate-nixd +[eelco]: https://determinate.systems/people/eelco-dolstra +[envs]: https://zero-to-nix.com/concepts/dev-env +[features]: https://docs.determinate.systems/determinate-nix/#special-features +[flakehub]: https://flakehub.com +[flakes]: https://zero-to-nix.com/concepts/flakes +[gui]: https://install.determinate.systems/determinate-pkg/stable/Universal +[installer]: https://github.com/DeterminateSystems/nix-installer +[isos]: https://github.com/DeterminateSystems/nixos-iso +[language]: https://zero-to-nix.com/concepts/nix-language +[license]: ./COPYING +[manual]: https://manual.determinate.systems +[nixpkgs]: https://github.com/NixOS/nixpkgs +[nixos]: https://github.com/NixOS/nixpkgs/tree/master/nixos +[nixos-install]: https://docs.determinate.systems/guides/advanced-installation#nixos +[packages]: https://zero-to-nix.com/concepts/packages +[package-management]: https://zero-to-nix.com/concepts/package-management +[private-flakes]: https://docs.determinate.systems/flakehub/private-flakes +[secure-packages]: https://determinate.systems/secure-packages +[semver]: https://docs.determinate.systems/flakehub/concepts/semver +[thesis]: https://edolstra.github.io/pubs/phd-thesis.pdf +[upstream]: https://github.com/NixOS/nix +[website]: https://nixos.org +[z2n]: https://zero-to-nix.com diff --git a/ci/gha/tests/default.nix b/ci/gha/tests/default.nix index 6100f2f4172e..4a30a96a475d 100644 --- a/ci/gha/tests/default.nix +++ b/ci/gha/tests/default.nix @@ -76,13 +76,17 @@ rec { */ topLevel = { installerScriptForGHA = hydraJobs.installerScriptForGHA.${system}; - installTests = hydraJobs.installTests.${system}; nixpkgsLibTests = hydraJobs.tests.nixpkgsLibTests.${system}; rl-next = pkgs.buildPackages.runCommand "test-rl-next-release-notes" { } '' LANG=C.UTF-8 ${pkgs.changelog-d}/bin/changelog-d ${../../../doc/manual/rl-next} >$out ''; repl-completion = pkgs.callPackage ../../../tests/repl-completion.nix { inherit (packages') nix; }; + lazyTrees = nixComponents.nix-functional-tests.override { + pname = "nix-lazy-trees-tests"; + lazyTrees = true; + }; + /** Checks for our packaging expressions. This shouldn't build anything significant; just check that things diff --git a/default.nix b/default.nix deleted file mode 100644 index 6466507b7140..000000000000 --- a/default.nix +++ /dev/null @@ -1,9 +0,0 @@ -(import ( - let - lock = builtins.fromJSON (builtins.readFile ./flake.lock); - in - fetchTarball { - url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; - sha256 = lock.nodes.flake-compat.locked.narHash; - } -) { src = ./.; }).defaultNix diff --git a/doc/manual/book.toml.in b/doc/manual/book.toml.in index c798afc4a8c0..11efca75f110 100644 --- a/doc/manual/book.toml.in +++ b/doc/manual/book.toml.in @@ -1,12 +1,12 @@ [book] -title = "Nix @version@ Reference Manual" +title = "Determinate Nix @version@ Reference Manual" src = "source" [output.html] additional-css = ["custom.css"] additional-js = ["redirects.js"] -edit-url-template = "https://github.com/NixOS/nix/tree/master/doc/manual/{path}" -git-repository-url = "https://github.com/NixOS/nix" +edit-url-template = "https://github.com/DeterminateSystems/nix-src/tree/master/doc/manual/{path}" +git-repository-url = "https://github.com/DeterminateSystems/nix-src" mathjax-support = true # Handles replacing @docroot@ with a path to ./source relative to that markdown file, diff --git a/doc/manual/custom.css b/doc/manual/custom.css index 7af150be391b..119c6d125430 100644 --- a/doc/manual/custom.css +++ b/doc/manual/custom.css @@ -1,5 +1,5 @@ :root { - --sidebar-width: 23em; + --sidebar-width: 23em; } h1.menu-title::before { @@ -7,11 +7,10 @@ h1.menu-title::before { background-image: url("./favicon.svg"); padding: 1.25em; background-position: center center; - background-size: 2em; + background-size: 1.5em; background-repeat: no-repeat; } - .menu-bar { padding: 0.5em 0em; } @@ -21,13 +20,13 @@ h1.menu-title::before { } h1:not(:first-of-type) { - margin-top: 1.3em; + margin-top: 1.3em; } h2 { - margin-top: 1em; + margin-top: 1em; } .hljs-meta { - user-select: none; + user-select: none; } diff --git a/doc/manual/generate-manpage.nix b/doc/manual/generate-manpage.nix index 31e74e17d264..292cb283d3d6 100644 --- a/doc/manual/generate-manpage.nix +++ b/doc/manual/generate-manpage.nix @@ -42,11 +42,6 @@ let let result = '' - > **Warning** \ - > This program is - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) - > and its interface is subject to change. - # Name `${command}` - ${details.description} diff --git a/doc/manual/meson.build b/doc/manual/meson.build index 3c3e79541139..1b9a325df2ac 100644 --- a/doc/manual/meson.build +++ b/doc/manual/meson.build @@ -5,19 +5,9 @@ project( license : 'LGPL-2.1-or-later', ) -# Compute documentation URL based on version and release type -version = meson.project_version() -official_release = get_option('official-release') +fs = import('fs') -if official_release - # For official releases, use versioned URL (dropping patch version) - version_parts = version.split('.') - major_minor = '@0@.@1@'.format(version_parts[0], version_parts[1]) - doc_url = 'https://nix.dev/manual/nix/@0@'.format(major_minor) -else - # For development builds, use /latest - doc_url = 'https://nix.dev/manual/nix/latest' -endif +doc_url = 'https://manual.determinate.systems/' nix = find_program('nix', native : true) @@ -40,7 +30,7 @@ nix_env_for_docs = { 'NIX_CONFIG' : 'cores = 0', } -nix_for_docs = [ nix, '--experimental-features', 'nix-command' ] +nix_for_docs = [ nix ] nix_eval_for_docs_common = nix_for_docs + [ 'eval', '-I', @@ -137,7 +127,7 @@ if get_option('html-manual') python.full_path(), mdbook.full_path(), meson.current_build_dir(), - meson.project_version(), + fs.read('../../.version-determinate').strip(), rsync.full_path(), ), ], diff --git a/doc/manual/package.nix b/doc/manual/package.nix index 3a90a0faf8a0..0b3d8ca940a2 100644 --- a/doc/manual/package.nix +++ b/doc/manual/package.nix @@ -34,7 +34,7 @@ let in mkMesonDerivation (finalAttrs: { - pname = "nix-manual"; + pname = "determinate-nix-manual"; inherit version; workDir = ./.; @@ -42,6 +42,7 @@ mkMesonDerivation (finalAttrs: { fileset.difference (fileset.unions [ ../../.version + ../../.version-determinate # For example JSON ../../src/libutil-tests/data/memory-source-accessor ../../src/libutil-tests/data/hash diff --git a/doc/manual/redirects.json b/doc/manual/redirects.json index 0a6c71508006..07a6f36627ff 100644 --- a/doc/manual/redirects.json +++ b/doc/manual/redirects.json @@ -243,29 +243,11 @@ "gloss-validity": "glossary.html#gloss-validity", "part-glossary": "glossary.html", "sec-building-source": "installation/building-source.html", - "ch-env-variables": "installation/env-variables.html", - "sec-installer-proxy-settings": "installation/env-variables.html#proxy-environment-variables", - "sec-nix-ssl-cert-file": "installation/env-variables.html#nix_ssl_cert_file", - "sec-nix-ssl-cert-file-with-nix-daemon-and-macos": "installation/env-variables.html#nix_ssl_cert_file", "chap-installation": "installation/index.html", - "ch-installing-binary": "installation/installing-binary.html", - "sect-macos-installation": "installation/installing-binary.html#macos-installation", - "sect-macos-installation-change-store-prefix": "installation/installing-binary.html#macos-installation", - "sect-macos-installation-encrypted-volume": "installation/installing-binary.html#macos-installation", - "sect-macos-installation-recommended-notes": "installation/installing-binary.html#macos-installation", - "sect-macos-installation-symlink": "installation/installing-binary.html#macos-installation", - "sect-multi-user-installation": "installation/installing-binary.html#multi-user-installation", - "sect-nix-install-binary-tarball": "installation/installing-binary.html#installing-from-a-binary-tarball", - "sect-nix-install-pinned-version-url": - "installation/installing-binary.html#installing-a-pinned-nix-version-from-a-url", - "sect-single-user-installation": "installation/installing-binary.html#single-user-installation", "ch-installing-source": "installation/installing-source.html", - "ssec-multi-user": "installation/multi-user.html", "ch-nix-security": "installation/nix-security.html", "sec-obtaining-source": "installation/obtaining-source.html", "sec-prerequisites-source": "installation/prerequisites-source.html", - "sec-single-user": "installation/single-user.html", - "ch-supported-platforms": "installation/supported-platforms.html", "ch-upgrading-nix": "installation/upgrading.html", "ch-about-nix": "introduction.html", "chap-introduction": "introduction.html", @@ -287,43 +269,7 @@ "sec-sharing-packages": "package-management/sharing-packages.html", "ssec-ssh-substituter": "package-management/ssh-substituter.html", "chap-quick-start": "quick-start.html", - "sec-relnotes": "release-notes/index.html", - "ch-relnotes-0.10.1": "release-notes/rl-0.10.1.html", - "ch-relnotes-0.10": "release-notes/rl-0.10.html", - "ssec-relnotes-0.11": "release-notes/rl-0.11.html", - "ssec-relnotes-0.12": "release-notes/rl-0.12.html", - "ssec-relnotes-0.13": "release-notes/rl-0.13.html", - "ssec-relnotes-0.14": "release-notes/rl-0.14.html", - "ssec-relnotes-0.15": "release-notes/rl-0.15.html", - "ssec-relnotes-0.16": "release-notes/rl-0.16.html", - "ch-relnotes-0.5": "release-notes/rl-0.5.html", - "ch-relnotes-0.6": "release-notes/rl-0.6.html", - "ch-relnotes-0.7": "release-notes/rl-0.7.html", - "ch-relnotes-0.8.1": "release-notes/rl-0.8.1.html", - "ch-relnotes-0.8": "release-notes/rl-0.8.html", - "ch-relnotes-0.9.1": "release-notes/rl-0.9.1.html", - "ch-relnotes-0.9.2": "release-notes/rl-0.9.2.html", - "ch-relnotes-0.9": "release-notes/rl-0.9.html", - "ssec-relnotes-1.0": "release-notes/rl-1.0.html", - "ssec-relnotes-1.1": "release-notes/rl-1.1.html", - "ssec-relnotes-1.10": "release-notes/rl-1.10.html", - "ssec-relnotes-1.11.10": "release-notes/rl-1.11.10.html", - "ssec-relnotes-1.11": "release-notes/rl-1.11.html", - "ssec-relnotes-1.2": "release-notes/rl-1.2.html", - "ssec-relnotes-1.3": "release-notes/rl-1.3.html", - "ssec-relnotes-1.4": "release-notes/rl-1.4.html", - "ssec-relnotes-1.5.1": "release-notes/rl-1.5.html", - "ssec-relnotes-1.5.2": "release-notes/rl-1.5.2.html", - "ssec-relnotes-1.5": "release-notes/rl-1.5.html", - "ssec-relnotes-1.6.1": "release-notes/rl-1.6.1.html", - "ssec-relnotes-1.6.0": "release-notes/rl-1.6.html", - "ssec-relnotes-1.7": "release-notes/rl-1.7.html", - "ssec-relnotes-1.8": "release-notes/rl-1.8.html", - "ssec-relnotes-1.9": "release-notes/rl-1.9.html", - "ssec-relnotes-2.0": "release-notes/rl-2.0.html", - "ssec-relnotes-2.1": "release-notes/rl-2.1.html", - "ssec-relnotes-2.2": "release-notes/rl-2.2.html", - "ssec-relnotes-2.3": "release-notes/rl-2.3.html" + "sec-relnotes": "release-notes/index.html" }, "language/types.html": { "simple-values": "#primitives", @@ -340,12 +286,10 @@ "builder-execution": "../store/building.html#builder-execution" }, "installation/installing-binary.html": { - "linux": "uninstall.html#linux", - "macos": "uninstall.html#macos", "uninstalling": "uninstall.html" }, "development/building.html": { - "nix-with-flakes": "#building-nix-with-flakes", + "nix-with-flakes": "#building-nix", "classic-nix": "#building-nix", "running-tests": "testing.html#running-tests", "unit-tests": "testing.html#unit-tests", diff --git a/doc/manual/rl-next/c-api-new-store-methods.md b/doc/manual/rl-next/c-api-new-store-methods.md new file mode 100644 index 000000000000..28792e7cc42d --- /dev/null +++ b/doc/manual/rl-next/c-api-new-store-methods.md @@ -0,0 +1,9 @@ +--- +synopsis: "C API: New store API methods" +prs: [14766] +--- + +The C API now includes additional methods: + +- `nix_store_query_path_from_hash_part()` - Get the full store path given its hash part +- `nix_store_copy_path()` - Copy a single store path between two stores, allows repairs and configuring signature checking diff --git a/doc/manual/rl-next/shorter-build-dir-names.md b/doc/manual/rl-next/shorter-build-dir-names.md new file mode 100644 index 000000000000..e87fa5d04fb8 --- /dev/null +++ b/doc/manual/rl-next/shorter-build-dir-names.md @@ -0,0 +1,6 @@ +--- +synopsis: "Temporary build directories no longer include derivation names" +prs: [13839] +--- + +Temporary build directories created during derivation builds no longer include the derivation name in their path to avoid build failures when the derivation name is too long. This change ensures predictable prefix lengths for build directories under `/nix/var/nix/builds`. \ No newline at end of file diff --git a/doc/manual/source/SUMMARY.md.in b/doc/manual/source/SUMMARY.md.in index 8b6b29f6a7ff..df8f10dcda7f 100644 --- a/doc/manual/source/SUMMARY.md.in +++ b/doc/manual/source/SUMMARY.md.in @@ -3,17 +3,12 @@ - [Introduction](introduction.md) - [Quick Start](quick-start.md) - [Installation](installation/index.md) - - [Supported Platforms](installation/supported-platforms.md) - - [Installing a Binary Distribution](installation/installing-binary.md) - [Installing Nix from Source](installation/installing-source.md) - [Prerequisites](installation/prerequisites-source.md) - [Obtaining a Source Distribution](installation/obtaining-source.md) - [Building Nix from Source](installation/building-source.md) - [Using Nix within Docker](installation/installing-docker.md) - [Security](installation/nix-security.md) - - [Single-User Mode](installation/single-user.md) - - [Multi-User Mode](installation/multi-user.md) - - [Environment Variables](installation/env-variables.md) - [Upgrading Nix](installation/upgrading.md) - [Uninstalling Nix](installation/uninstall.md) - [Nix Store](store/index.md) @@ -65,8 +60,11 @@ - [Command Reference](command-ref/index.md) - [Common Options](command-ref/opt-common.md) - [Common Environment Variables](command-ref/env-common.md) - - [Main Commands](command-ref/main-commands.md) + - [Subcommands](command-ref/subcommands.md) +{{#include ./command-ref/new-cli/SUMMARY.md}} + - [Deprecated Commands](command-ref/main-commands.md) - [nix-build](command-ref/nix-build.md) + - [nix-channel](command-ref/nix-channel.md) - [nix-shell](command-ref/nix-shell.md) - [nix-store](command-ref/nix-store.md) - [nix-store --add-fixed](command-ref/nix-store/add-fixed.md) @@ -102,22 +100,17 @@ - [nix-env --uninstall](command-ref/nix-env/uninstall.md) - [nix-env --upgrade](command-ref/nix-env/upgrade.md) - [Utilities](command-ref/utilities.md) - - [nix-channel](command-ref/nix-channel.md) - [nix-collect-garbage](command-ref/nix-collect-garbage.md) - [nix-copy-closure](command-ref/nix-copy-closure.md) - [nix-daemon](command-ref/nix-daemon.md) - [nix-hash](command-ref/nix-hash.md) - [nix-instantiate](command-ref/nix-instantiate.md) - [nix-prefetch-url](command-ref/nix-prefetch-url.md) - - [Experimental Commands](command-ref/experimental-commands.md) -{{#include ./command-ref/new-cli/SUMMARY.md}} - [Files](command-ref/files.md) - [nix.conf](command-ref/conf-file.md) - [Profiles](command-ref/files/profiles.md) - [manifest.nix](command-ref/files/manifest.nix.md) - [manifest.json](command-ref/files/manifest.json.md) - - [Channels](command-ref/files/channels.md) - - [Default Nix expression](command-ref/files/default-nix-expression.md) - [Architecture and Design](architecture/architecture.md) - [Formats and Protocols](protocols/index.md) - [JSON Formats](protocols/json/index.md) @@ -136,6 +129,8 @@ - [Store Path Specification](protocols/store-path.md) - [Nix Archive (NAR) Format](protocols/nix-archive/index.md) - [Derivation "ATerm" file format](protocols/derivation-aterm.md) + - [`builtins.wasm` Host Interface](protocols/wasm.md) + - [Flake Schemas](protocols/flake-schemas.md) - [C API](c-api.md) - [Glossary](glossary.md) - [Development](development/index.md) @@ -149,7 +144,55 @@ - [C++ style guide](development/cxx.md) - [Experimental Features](development/experimental-features.md) - [Contributing](development/contributing.md) -- [Releases](release-notes/index.md) +- [Determinate Nix Release Notes](release-notes-determinate/index.md) + - [Changes between Nix and Determinate Nix](release-notes-determinate/changes.md) + - [Release 3.17.0 (2026-03-04)](release-notes-determinate/v3.17.0.md) + - [Release 3.16.3 (2026-02-24)](release-notes-determinate/v3.16.3.md) + - [Release 3.16.2 (2026-02-23)](release-notes-determinate/v3.16.2.md) + - [Release 3.16.1 (2026-02-22)](release-notes-determinate/v3.16.1.md) + - [Release 3.16.0 (2026-02-12)](release-notes-determinate/v3.16.0.md) + - [Release 3.15.2 (2026-01-20)](release-notes-determinate/v3.15.2.md) + - [Release 3.15.1 (2025-12-24)](release-notes-determinate/v3.15.1.md) + - [Release 3.15.0 (2025-12-19)](release-notes-determinate/v3.15.0.md) + - [Release 3.14.0 (2025-12-08)](release-notes-determinate/v3.14.0.md) + - [Release 3.13.2 (2025-11-19)](release-notes-determinate/v3.13.2.md) + - [Release 3.13.1 (2025-11-12)](release-notes-determinate/v3.13.1.md) + - [Release 3.13.0 (2025-11-09)](release-notes-determinate/v3.13.0.md) + - [Release 3.12.2 (2025-11-05)](release-notes-determinate/v3.12.2.md) + - [Release 3.12.1 (2025-11-04)](release-notes-determinate/v3.12.1.md) + - [Release 3.12.0 (2025-10-23)](release-notes-determinate/v3.12.0.md) + - [Release 3.11.3 (2025-10-09)](release-notes-determinate/v3.11.3.md) + - [Release 3.11.2 (2025-09-12)](release-notes-determinate/v3.11.2.md) + - [Release 3.11.1 (2025-09-04)](release-notes-determinate/v3.11.1.md) + - [Release 3.11.0 (2025-09-03)](release-notes-determinate/v3.11.0.md) + - [Release 3.10.1 (2025-09-02)](release-notes-determinate/v3.10.1.md) + - [Release 3.10.0 (2025-09-02)](release-notes-determinate/v3.10.0.md) + - [Release 3.9.1 (2025-08-28)](release-notes-determinate/v3.9.1.md) + - [Release 3.9.0 (2025-08-26)](release-notes-determinate/v3.9.0.md) + - [Release 3.8.6 (2025-08-19)](release-notes-determinate/v3.8.6.md) + - [Release 3.8.5 (2025-08-04)](release-notes-determinate/rl-3.8.5.md) + - [Release 3.8.4 (2025-07-21)](release-notes-determinate/rl-3.8.4.md) + - [Release 3.8.3 (2025-07-18)](release-notes-determinate/rl-3.8.3.md) + - [Release 3.8.2 (2025-07-12)](release-notes-determinate/rl-3.8.2.md) + - [Release 3.8.1 (2025-07-11)](release-notes-determinate/rl-3.8.1.md) + - [Release 3.8.0 (2025-07-10)](release-notes-determinate/rl-3.8.0.md) + - [Release 3.7.0 (2025-07-03)](release-notes-determinate/rl-3.7.0.md) + - [Release 3.6.8 (2025-06-25)](release-notes-determinate/rl-3.6.8.md) + - [Release 3.6.7 (2025-06-24)](release-notes-determinate/rl-3.6.7.md) + - [Release 3.6.6 (2025-06-17)](release-notes-determinate/rl-3.6.6.md) + - [Release 3.6.5 (2025-06-16)](release-notes-determinate/rl-3.6.5.md) + - [Release 3.6.2 (2025-06-02)](release-notes-determinate/rl-3.6.2.md) + - [Release 3.6.1 (2025-05-24)](release-notes-determinate/rl-3.6.1.md) + - [Release 3.6.0 (2025-05-22)](release-notes-determinate/rl-3.6.0.md) + - [Release 3.5.2 (2025-05-12)](release-notes-determinate/rl-3.5.2.md) + - [Release 3.5.1 (2025-05-09)](release-notes-determinate/rl-3.5.1.md) + - [~~Release 3.5.0 (2025-05-09)~~](release-notes-determinate/rl-3.5.0.md) + - [Release 3.4.2 (2025-05-05)](release-notes-determinate/rl-3.4.2.md) + - [Release 3.4.0 (2025-04-25)](release-notes-determinate/rl-3.4.0.md) + - [Release 3.3.0 (2025-04-11)](release-notes-determinate/rl-3.3.0.md) + - [Release 3.1.0 (2025-03-27)](release-notes-determinate/rl-3.1.0.md) + - [Release 3.0.0 (2025-03-04)](release-notes-determinate/rl-3.0.0.md) +- [Nix Release Notes](release-notes/index.md) {{#include ./SUMMARY-rl-next.md}} - [Release 2.33 (2025-12-09)](release-notes/rl-2.33.md) - [Release 2.32 (2025-10-06)](release-notes/rl-2.32.md) @@ -159,60 +202,3 @@ - [Release 2.28 (2025-04-02)](release-notes/rl-2.28.md) - [Release 2.27 (2025-03-03)](release-notes/rl-2.27.md) - [Release 2.26 (2025-01-22)](release-notes/rl-2.26.md) - - [Release 2.25 (2024-11-07)](release-notes/rl-2.25.md) - - [Release 2.24 (2024-07-31)](release-notes/rl-2.24.md) - - [Release 2.23 (2024-06-03)](release-notes/rl-2.23.md) - - [Release 2.22 (2024-04-23)](release-notes/rl-2.22.md) - - [Release 2.21 (2024-03-11)](release-notes/rl-2.21.md) - - [Release 2.20 (2024-01-29)](release-notes/rl-2.20.md) - - [Release 2.19 (2023-11-17)](release-notes/rl-2.19.md) - - [Release 2.18 (2023-09-20)](release-notes/rl-2.18.md) - - [Release 2.17 (2023-07-24)](release-notes/rl-2.17.md) - - [Release 2.16 (2023-05-31)](release-notes/rl-2.16.md) - - [Release 2.15 (2023-04-11)](release-notes/rl-2.15.md) - - [Release 2.14 (2023-02-28)](release-notes/rl-2.14.md) - - [Release 2.13 (2023-01-17)](release-notes/rl-2.13.md) - - [Release 2.12 (2022-12-06)](release-notes/rl-2.12.md) - - [Release 2.11 (2022-08-25)](release-notes/rl-2.11.md) - - [Release 2.10 (2022-07-11)](release-notes/rl-2.10.md) - - [Release 2.9 (2022-05-30)](release-notes/rl-2.9.md) - - [Release 2.8 (2022-04-19)](release-notes/rl-2.8.md) - - [Release 2.7 (2022-03-07)](release-notes/rl-2.7.md) - - [Release 2.6 (2022-01-24)](release-notes/rl-2.6.md) - - [Release 2.5 (2021-12-13)](release-notes/rl-2.5.md) - - [Release 2.4 (2021-11-01)](release-notes/rl-2.4.md) - - [Release 2.3 (2019-09-04)](release-notes/rl-2.3.md) - - [Release 2.2 (2019-01-11)](release-notes/rl-2.2.md) - - [Release 2.1 (2018-09-02)](release-notes/rl-2.1.md) - - [Release 2.0 (2018-02-22)](release-notes/rl-2.0.md) - - [Release 1.11.10 (2017-06-12)](release-notes/rl-1.11.10.md) - - [Release 1.11 (2016-01-19)](release-notes/rl-1.11.md) - - [Release 1.10 (2015-09-03)](release-notes/rl-1.10.md) - - [Release 1.9 (2015-06-12)](release-notes/rl-1.9.md) - - [Release 1.8 (2014-12-14)](release-notes/rl-1.8.md) - - [Release 1.7 (2014-04-11)](release-notes/rl-1.7.md) - - [Release 1.6.1 (2013-10-28)](release-notes/rl-1.6.1.md) - - [Release 1.6 (2013-09-10)](release-notes/rl-1.6.md) - - [Release 1.5.2 (2013-05-13)](release-notes/rl-1.5.2.md) - - [Release 1.5 (2013-02-27)](release-notes/rl-1.5.md) - - [Release 1.4 (2013-02-26)](release-notes/rl-1.4.md) - - [Release 1.3 (2013-01-04)](release-notes/rl-1.3.md) - - [Release 1.2 (2012-12-06)](release-notes/rl-1.2.md) - - [Release 1.1 (2012-07-18)](release-notes/rl-1.1.md) - - [Release 1.0 (2012-05-11)](release-notes/rl-1.0.md) - - [Release 0.16 (2010-08-17)](release-notes/rl-0.16.md) - - [Release 0.15 (2010-03-17)](release-notes/rl-0.15.md) - - [Release 0.14 (2010-02-04)](release-notes/rl-0.14.md) - - [Release 0.13 (2009-11-05)](release-notes/rl-0.13.md) - - [Release 0.12 (2008-11-20)](release-notes/rl-0.12.md) - - [Release 0.11 (2007-12-31)](release-notes/rl-0.11.md) - - [Release 0.10.1 (2006-10-11)](release-notes/rl-0.10.1.md) - - [Release 0.10 (2006-10-06)](release-notes/rl-0.10.md) - - [Release 0.9.2 (2005-09-21)](release-notes/rl-0.9.2.md) - - [Release 0.9.1 (2005-09-20)](release-notes/rl-0.9.1.md) - - [Release 0.9 (2005-09-16)](release-notes/rl-0.9.md) - - [Release 0.8.1 (2005-04-13)](release-notes/rl-0.8.1.md) - - [Release 0.8 (2005-04-11)](release-notes/rl-0.8.md) - - [Release 0.7 (2005-01-12)](release-notes/rl-0.7.md) - - [Release 0.6 (2004-11-14)](release-notes/rl-0.6.md) - - [Release 0.5 and earlier](release-notes/rl-0.5.md) diff --git a/doc/manual/source/advanced-topics/distributed-builds.md b/doc/manual/source/advanced-topics/distributed-builds.md index 08a980643e88..c39cf4500795 100644 --- a/doc/manual/source/advanced-topics/distributed-builds.md +++ b/doc/manual/source/advanced-topics/distributed-builds.md @@ -5,8 +5,8 @@ this allows multiple builds to be performed in parallel. Remote builds also allow Nix to perform multi-platform builds in a semi-transparent way. For example, if you perform a build for a -`x86_64-darwin` on an `i686-linux` machine, Nix can automatically -forward the build to a `x86_64-darwin` machine, if one is available. +`aarch64-darwin` on an `x86_64-linux` machine, Nix can automatically +forward the build to a `aarch64-darwin` machine, if one is available. ## Requirements @@ -59,7 +59,7 @@ then you need to ensure that the `PATH` of non-interactive login shells contains Nix. The [list of remote build machines](@docroot@/command-ref/conf-file.md#conf-builders) can be specified on the command line or in the Nix configuration file. -For example, the following command allows you to build a derivation for `x86_64-darwin` on a Linux machine: +For example, the following command allows you to build a derivation for `aarch64-darwin` on a Linux machine: ```console uname @@ -71,8 +71,8 @@ Linux ```console nix build --impure \ - --expr '(with import { system = "x86_64-darwin"; }; runCommand "foo" {} "uname > $out")' \ - --builders 'ssh://mac x86_64-darwin' + --expr '(with import { system = "aarch64-darwin"; }; runCommand "foo" {} "uname > $out")' \ + --builders 'ssh://mac aarch64-darwin' ``` ```console @@ -90,12 +90,12 @@ Darwin It is possible to specify multiple build machines separated by a semicolon or a newline, e.g. ```console - --builders 'ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd' + --builders 'ssh://mac aarch64-darwin ; ssh://beastie x86_64-freebsd' ``` Remote build machines can also be configured in [`nix.conf`](@docroot@/command-ref/conf-file.md), e.g. - builders = ssh://mac x86_64-darwin ; ssh://beastie x86_64-freebsd + builders = ssh://mac aarch64-darwin ; ssh://beastie x86_64-freebsd After making changes to `nix.conf`, restart the Nix daemon for changes to take effect. @@ -107,4 +107,4 @@ file included in `builders` via the syntax `@/path/to/file`. For example, causes the list of machines in `/etc/nix/machines` to be included. (This is the default.) -[Nix instance]: @docroot@/glossary.md#gloss-nix-instance \ No newline at end of file +[Nix instance]: @docroot@/glossary.md#gloss-nix-instance diff --git a/doc/manual/source/advanced-topics/eval-profiler.md b/doc/manual/source/advanced-topics/eval-profiler.md index ed3848bb2db4..2bc7ebb05e00 100644 --- a/doc/manual/source/advanced-topics/eval-profiler.md +++ b/doc/manual/source/advanced-topics/eval-profiler.md @@ -27,7 +27,7 @@ site](https://en.wikipedia.org/wiki/Call_site) position and the name of the function being called (when available). For example: ``` -/nix/store/x9wnkly3k1gkq580m90jjn32q9f05q2v-source/pkgs/top-level/default.nix:167:5:primop import +/nix/store/2q71fdvr4h33g9832hiriwnf20fn630l-source/pkgs/top-level/default.nix:167:5:primop import ``` -Here `import` primop is called at `/nix/store/x9wnkly3k1gkq580m90jjn32q9f05q2v-source/pkgs/top-level/default.nix:167:5`. +Here `import` primop is called at `/nix/store/2q71fdvr4h33g9832hiriwnf20fn630l-source/pkgs/top-level/default.nix:167:5`. diff --git a/doc/manual/source/command-ref/env-common.md b/doc/manual/source/command-ref/env-common.md index e0fd2b00eec3..fe6e822ff16a 100644 --- a/doc/manual/source/command-ref/env-common.md +++ b/doc/manual/source/command-ref/env-common.md @@ -102,7 +102,7 @@ Most Nix commands interpret the following environment variables: This variable should be set to `daemon` if you want to use the Nix daemon to execute Nix operations. This is necessary in [multi-user - Nix installations](@docroot@/installation/multi-user.md). If the Nix + Nix installations](@docroot@/installation/nix-security.md#multi-user-model). If the Nix daemon's Unix socket is at some non-standard path, this variable should be set to `unix://path/to/socket`. Otherwise, it should be left unset. diff --git a/doc/manual/source/command-ref/experimental-commands.md b/doc/manual/source/command-ref/experimental-commands.md deleted file mode 100644 index 1190729a2305..000000000000 --- a/doc/manual/source/command-ref/experimental-commands.md +++ /dev/null @@ -1,8 +0,0 @@ -# Experimental Commands - -This section lists [experimental commands](@docroot@/development/experimental-features.md#xp-feature-nix-command). - -> **Warning** -> -> These commands may be removed in the future, or their syntax may -> change in incompatible ways. diff --git a/doc/manual/source/command-ref/files/default-nix-expression.md b/doc/manual/source/command-ref/files/default-nix-expression.md index 2bd45ff5debd..e886e3ff4991 100644 --- a/doc/manual/source/command-ref/files/default-nix-expression.md +++ b/doc/manual/source/command-ref/files/default-nix-expression.md @@ -31,12 +31,12 @@ Then, the resulting expression is interpreted like this: The file [`manifest.nix`](@docroot@/command-ref/files/manifest.nix.md) is always ignored. -The command [`nix-channel`] places a symlink to the current user's [channels] in this directory, the [user channel link](#user-channel-link). +The command [`nix-channel`] places a symlink to the current user's channels in this directory, the [user channel link](#user-channel-link). This makes all subscribed channels available as attributes in the default expression. ## User channel link -A symlink that ensures that [`nix-env`] can find the current user's [channels]: +A symlink that ensures that [`nix-env`] can find the current user's channels: - `~/.nix-defexpr/channels` - `$XDG_STATE_HOME/defexpr/channels` if [`use-xdg-base-directories`] is set to `true`. @@ -51,4 +51,3 @@ In a multi-user installation, you may also have `~/.nix-defexpr/channels_root`, [`nix-channel`]: @docroot@/command-ref/nix-channel.md [`nix-env`]: @docroot@/command-ref/nix-env.md [`use-xdg-base-directories`]: @docroot@/command-ref/conf-file.md#conf-use-xdg-base-directories -[channels]: @docroot@/command-ref/files/channels.md diff --git a/doc/manual/source/command-ref/files/manifest.nix.md b/doc/manual/source/command-ref/files/manifest.nix.md index d7d1b605b54b..78bfdc346eaf 100644 --- a/doc/manual/source/command-ref/files/manifest.nix.md +++ b/doc/manual/source/command-ref/files/manifest.nix.md @@ -114,9 +114,9 @@ Here is an example of how this file might look like after installing `hello` fro }; name = "hello-2.12.1"; out = { - outPath = "/nix/store/260q5867crm1xjs4khgqpl6vr9kywql1-hello-2.12.1"; + outPath = "/nix/store/src1vzij2z0slnakrsbpqpk20389z0k6-hello-2.12.1"; }; - outPath = "/nix/store/260q5867crm1xjs4khgqpl6vr9kywql1-hello-2.12.1"; + outPath = "/nix/store/src1vzij2z0slnakrsbpqpk20389z0k6-hello-2.12.1"; outputs = [ "out" ]; system = "x86_64-linux"; type = "derivation"; diff --git a/doc/manual/source/command-ref/files/profiles.md b/doc/manual/source/command-ref/files/profiles.md index b5c7378800fd..f137336747f1 100644 --- a/doc/manual/source/command-ref/files/profiles.md +++ b/doc/manual/source/command-ref/files/profiles.md @@ -37,13 +37,13 @@ dr-xr-xr-x 4 root root 4096 Jan 1 1970 share /home/eelco/.local/state/nix/profiles/profile-7-link/bin: total 20 -lrwxrwxrwx 5 root root 79 Jan 1 1970 chromium -> /nix/store/ijm5k0zqisvkdwjkc77mb9qzb35xfi4m-chromium-86.0.4240.111/bin/chromium +lrwxrwxrwx 5 root root 79 Jan 1 1970 chromium -> /nix/store/cyxny9d1zjb9l9103fr6j6kavp3bqjxf-chromium-86.0.4240.111/bin/chromium lrwxrwxrwx 7 root root 87 Jan 1 1970 spotify -> /nix/store/w9182874m1bl56smps3m5zjj36jhp3rn-spotify-1.1.26.501.gbe11e53b-15/bin/spotify lrwxrwxrwx 3 root root 79 Jan 1 1970 zoom-us -> /nix/store/wbhg2ga8f3h87s9h5k0slxk0m81m4cxl-zoom-us-5.3.469451.0927/bin/zoom-us /home/eelco/.local/state/nix/profiles/profile-7-link/share/applications: total 12 -lrwxrwxrwx 4 root root 120 Jan 1 1970 chromium-browser.desktop -> /nix/store/4cf803y4vzfm3gyk3vzhzb2327v0kl8a-chromium-unwrapped-86.0.4240.111/share/applications/chromium-browser.desktop +lrwxrwxrwx 4 root root 120 Jan 1 1970 chromium-browser.desktop -> /nix/store/sqzyx2l85i6j2a77pnyvglh3bvzwmjjp-chromium-unwrapped-86.0.4240.111/share/applications/chromium-browser.desktop lrwxrwxrwx 7 root root 110 Jan 1 1970 spotify.desktop -> /nix/store/w9182874m1bl56smps3m5zjj36jhp3rn-spotify-1.1.26.501.gbe11e53b-15/share/applications/spotify.desktop lrwxrwxrwx 3 root root 107 Jan 1 1970 us.zoom.Zoom.desktop -> /nix/store/wbhg2ga8f3h87s9h5k0slxk0m81m4cxl-zoom-us-5.3.469451.0927/share/applications/us.zoom.Zoom.desktop @@ -67,7 +67,7 @@ By default, this symlink points to: - `$NIX_STATE_DIR/profiles/per-user/root/profile` for `root` The `PATH` environment variable should include `/bin` subdirectory of the profile link (e.g. `~/.nix-profile/bin`) for the user environment to be visible to the user. -The [installer](@docroot@/installation/installing-binary.md) sets this up by default, unless you enable [`use-xdg-base-directories`]. +The installer sets this up by default, unless you enable [`use-xdg-base-directories`]. [`nix-env`]: @docroot@/command-ref/nix-env.md [`nix profile`]: @docroot@/command-ref/new-cli/nix3-profile.md diff --git a/doc/manual/source/command-ref/nix-channel.md b/doc/manual/source/command-ref/nix-channel.md index 865f43ccce5b..59817be974b5 100644 --- a/doc/manual/source/command-ref/nix-channel.md +++ b/doc/manual/source/command-ref/nix-channel.md @@ -8,6 +8,12 @@ # Description +> **Warning** +> +> nix-channel is deprecated in favor of flakes in Determinate Nix. +> For a guide on Nix flakes, see: . +> For details and to offer feedback on the deprecation process, see: . + Channels are a mechanism for referencing remote Nix expressions and conveniently retrieving their latest version. The moving parts of channels are: diff --git a/doc/manual/source/command-ref/nix-copy-closure.md b/doc/manual/source/command-ref/nix-copy-closure.md index b7e31d93bfc3..b34d57a50150 100644 --- a/doc/manual/source/command-ref/nix-copy-closure.md +++ b/doc/manual/source/command-ref/nix-copy-closure.md @@ -72,11 +72,11 @@ When using public key authentication, you can avoid typing the passphrase with ` > $ storePath="$(nix-build '' -I nixpkgs=channel:nixpkgs-unstable -A hello --no-out-link)" > $ nix-copy-closure --to alice@itchy.example.org "$storePath" > copying 5 paths... -> copying path '/nix/store/nrwkk6ak3rgkrxbqhsscb01jpzmslf2r-xgcc-13.2.0-libgcc' to 'ssh://alice@itchy.example.org'... -> copying path '/nix/store/gm61h1y42pqyl6178g90x8zm22n6pyy5-libunistring-1.1' to 'ssh://alice@itchy.example.org'... -> copying path '/nix/store/ddfzjdykw67s20c35i7a6624by3iz5jv-libidn2-2.3.7' to 'ssh://alice@itchy.example.org'... -> copying path '/nix/store/apab5i73dqa09wx0q27b6fbhd1r18ihl-glibc-2.39-31' to 'ssh://alice@itchy.example.org'... -> copying path '/nix/store/g1n2vryg06amvcc1avb2mcq36faly0mh-hello-2.12.1' to 'ssh://alice@itchy.example.org'... +> copying path '/nix/store/h6q8sqsqfbd3252f9gixqn3z282wds7m-xgcc-13.2.0-libgcc' to 'ssh://alice@itchy.example.org'... +> copying path '/nix/store/imnwvn96lw355giswsk36hx105j4wnpj-libunistring-1.1' to 'ssh://alice@itchy.example.org'... +> copying path '/nix/store/85301indj7scg34spnfczkz72jgv8wa9-libidn2-2.3.7' to 'ssh://alice@itchy.example.org'... +> copying path '/nix/store/ypwfsaljwhzw9iffiysxmxnhjj8v7np0-glibc-2.39-31' to 'ssh://alice@itchy.example.org'... +> copying path '/nix/store/0dklv59zppdsqdvgf0qdvjgzcs5wbwxa-hello-2.12.1' to 'ssh://alice@itchy.example.org'... > ``` > **Example** diff --git a/doc/manual/source/command-ref/nix-env.md b/doc/manual/source/command-ref/nix-env.md index bda02149ed06..d01caaf7f787 100644 --- a/doc/manual/source/command-ref/nix-env.md +++ b/doc/manual/source/command-ref/nix-env.md @@ -52,7 +52,7 @@ These pages can be viewed offline: `nix-env` can obtain packages from multiple sources: - An attribute set of derivations from: - - The [default Nix expression](@docroot@/command-ref/files/default-nix-expression.md) (by default) + - The default Nix expression (by default) - A Nix file, specified via `--file` - A [profile](@docroot@/command-ref/files/profiles.md), specified via `--from-profile` - A Nix expression that is a function which takes default expression as argument, specified via `--from-expression` diff --git a/doc/manual/source/command-ref/nix-env/install.md b/doc/manual/source/command-ref/nix-env/install.md index 527fd8f90d87..320fa530fdaa 100644 --- a/doc/manual/source/command-ref/nix-env/install.md +++ b/doc/manual/source/command-ref/nix-env/install.md @@ -22,12 +22,11 @@ It is based on the current generation of the active [profile](@docroot@/command- The arguments *args* map to store paths in a number of possible ways: -- By default, *args* is a set of names denoting derivations in the [default Nix expression]. +- By default, *args* is a set of names denoting derivations in the default Nix expression. These are [realised], and the resulting output paths are installed. Currently installed derivations with a name equal to the name of a derivation being added are removed unless the option `--preserve-installed` is specified. [derivation expression]: @docroot@/glossary.md#gloss-derivation-expression - [default Nix expression]: @docroot@/command-ref/files/default-nix-expression.md [realised]: @docroot@/glossary.md#gloss-realise If there are multiple derivations matching a name in *args* that @@ -45,7 +44,7 @@ The arguments *args* map to store paths in a number of possible ways: gcc-3.3.6 gcc-4.1.1` will install both version of GCC (and will probably cause a user environment conflict\!). -- If [`--attr`](#opt-attr) / `-A` is specified, the arguments are *attribute paths* that select attributes from the [default Nix expression]. +- If [`--attr`](#opt-attr) / `-A` is specified, the arguments are *attribute paths* that select attributes from the default Nix expression. This is faster than using derivation names and unambiguous. Show the attribute paths of available packages with [`nix-env --query`](./query.md): @@ -58,7 +57,7 @@ The arguments *args* map to store paths in a number of possible ways: easy way to copy user environment elements from one profile to another. -- If `--from-expression` is given, *args* are [Nix language functions](@docroot@/language/syntax.md#functions) that are called with the [default Nix expression] as their single argument. +- If `--from-expression` is given, *args* are [Nix language functions](@docroot@/language/syntax.md#functions) that are called with the default Nix expression as their single argument. The derivations returned by those function calls are installed. This allows derivations to be specified in an unambiguous way, which is necessary if there are multiple derivations with the same name. @@ -204,7 +203,7 @@ To install a specific [store derivation] (typically created by `nix-instantiate`): ```console -$ nix-env --install /nix/store/fibjb1bfbpm5mrsxc4mh2d8n37sxh91i-gcc-3.4.3.drv +$ nix-env --install /nix/store/8la6y31fmm6i4wfmby6avly1wf718xnj-gcc-3.4.3.drv ``` To install a specific output path: @@ -232,7 +231,7 @@ $ nix-env --file '' --install --attr hello --dry-run (dry run; not doing anything) installing ‘hello-2.10’ this path will be fetched (0.04 MiB download, 0.19 MiB unpacked): - /nix/store/wkhdf9jinag5750mqlax6z2zbwhqb76n-hello-2.10 + /nix/store/ikwkxz4wwlp2g1428n7dy729cg1d9hin-hello-2.10 ... ``` diff --git a/doc/manual/source/command-ref/nix-prefetch-url.md b/doc/manual/source/command-ref/nix-prefetch-url.md index 19322ec8e04c..8451778ad46d 100644 --- a/doc/manual/source/command-ref/nix-prefetch-url.md +++ b/doc/manual/source/command-ref/nix-prefetch-url.md @@ -76,7 +76,7 @@ $ nix-prefetch-url ftp://ftp.gnu.org/pub/gnu/hello/hello-2.10.tar.gz ```console $ nix-prefetch-url --print-path mirror://gnu/hello/hello-2.10.tar.gz 0ssi1wpaf7plaswqqjwigppsg5fyh99vdlb9kzl7c9lng89ndq1i -/nix/store/3x7dwzq014bblazs7kq20p9hyzz0qh8g-hello-2.10.tar.gz +/nix/store/8alrpdaasjd1x6g1fczchmzbpqm936a3-hello-2.10.tar.gz ``` ```console diff --git a/doc/manual/source/command-ref/nix-store/add-fixed.md b/doc/manual/source/command-ref/nix-store/add-fixed.md index 2ea90a135925..511fe2050ebe 100644 --- a/doc/manual/source/command-ref/nix-store/add-fixed.md +++ b/doc/manual/source/command-ref/nix-store/add-fixed.md @@ -34,6 +34,6 @@ This operation has the following options: ```console $ nix-store --add-fixed sha256 ./hello-2.10.tar.gz -/nix/store/3x7dwzq014bblazs7kq20p9hyzz0qh8g-hello-2.10.tar.gz +/nix/store/8alrpdaasjd1x6g1fczchmzbpqm936a3-hello-2.10.tar.gz ``` diff --git a/doc/manual/source/command-ref/nix-store/delete.md b/doc/manual/source/command-ref/nix-store/delete.md index 550c5ea2914c..fcb2212d86d8 100644 --- a/doc/manual/source/command-ref/nix-store/delete.md +++ b/doc/manual/source/command-ref/nix-store/delete.md @@ -27,7 +27,7 @@ paths in the store that refer to it (i.e., depend on it). # Example ```console -$ nix-store --delete /nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4 +$ nix-store --delete /nix/store/gjak3al7lj61x4gj6rln4f5pc5v0f67n-mesa-6.4 0 bytes freed (0.00 MiB) -error: cannot delete path `/nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4' since it is still alive +error: cannot delete path `/nix/store/gjak3al7lj61x4gj6rln4f5pc5v0f67n-mesa-6.4' since it is still alive ``` diff --git a/doc/manual/source/command-ref/nix-store/query.md b/doc/manual/source/command-ref/nix-store/query.md index b5ba63adae26..cc45eeb74cf1 100644 --- a/doc/manual/source/command-ref/nix-store/query.md +++ b/doc/manual/source/command-ref/nix-store/query.md @@ -103,6 +103,13 @@ symlink. example when *paths* were substituted from a binary cache. Use `--valid-derivers` instead to obtain valid paths only. + > **Note** + > + > `nix-store --query --deriver` is replaced with the following `nix` command: + > + > nix path-info --json ... | jq -r '.[].deriver' + + [deriver]: @docroot@/glossary.md#gloss-deriver - `--valid-derivers` @@ -184,9 +191,9 @@ Print the build-time dependencies of `svn`: ```console $ nix-store --query --requisites $(nix-store --query --deriver $(which svn)) -/nix/store/02iizgn86m42q905rddvg4ja975bk2i4-grep-2.5.1.tar.bz2.drv -/nix/store/07a2bzxmzwz5hp58nf03pahrv2ygwgs3-gcc-wrapper.sh -/nix/store/0ma7c9wsbaxahwwl04gbw3fcd806ski4-glibc-2.3.4.drv +/nix/store/y6qa66l9h0pw161crnlk6y16rdrcljx4-grep-2.5.1.tar.bz2.drv +/nix/store/z716h753s97jhnzvfank2srqbljswpgm-gcc-wrapper.sh +/nix/store/f39x0q73rjdyvzm93y9wrkfr6x39lb7f-glibc-2.3.4.drv ... lots of other paths ... ``` @@ -199,10 +206,10 @@ Show the build-time dependencies as a tree: ```console $ nix-store --query --tree $(nix-store --query --deriver $(which svn)) /nix/store/7i5082kfb6yjbqdbiwdhhza0am2xvh6c-subversion-1.1.4.drv -+---/nix/store/d8afh10z72n8l1cr5w42366abiblgn54-builder.sh -+---/nix/store/fmzxmpjx2lh849ph0l36snfj9zdibw67-bash-3.0.drv -| +---/nix/store/570hmhmx3v57605cqg9yfvvyh0nnb8k8-bash -| +---/nix/store/p3srsbd8dx44v2pg6nbnszab5mcwx03v-builder.sh ++---/nix/store/vxnmkc8l8d2ijjha4xwhkfgx9vvc3q4c-builder.sh ++---/nix/store/rn9776dy82n5qrgz7xbcl1iw4vfkcrkk-bash-3.0.drv +| +---/nix/store/x9j20hz6bln1crzn55qifk0bbsm8v5ac-bash +| +---/nix/store/ajnn1mcm45wjvn0rlc22gvx2cwhjnazx-builder.sh ... ``` diff --git a/doc/manual/source/command-ref/nix-store/realise.md b/doc/manual/source/command-ref/nix-store/realise.md index 240685ce5c78..f5d203894e6b 100644 --- a/doc/manual/source/command-ref/nix-store/realise.md +++ b/doc/manual/source/command-ref/nix-store/realise.md @@ -76,7 +76,7 @@ This operation is typically used to build [store derivation]s produced by ```console $ nix-store --realise $(nix-instantiate ./test.nix) -/nix/store/31axcgrlbfsxzmfff1gyj1bf62hvkby2-aterm-2.3.1 +/nix/store/6gwmy5jcnwdlz6aqqhksz863f1l8xc2w-aterm-2.3.1 ``` This is essentially what [`nix-build`](@docroot@/command-ref/nix-build.md) does. diff --git a/doc/manual/source/command-ref/subcommands.md b/doc/manual/source/command-ref/subcommands.md new file mode 100644 index 000000000000..6a26732338d1 --- /dev/null +++ b/doc/manual/source/command-ref/subcommands.md @@ -0,0 +1,3 @@ +# Subcommands + +This section lists all the subcommands of the `nix` CLI. diff --git a/doc/manual/source/development/building.md b/doc/manual/source/development/building.md index eb65a7247572..9694183ba82e 100644 --- a/doc/manual/source/development/building.md +++ b/doc/manual/source/development/building.md @@ -1,73 +1,5 @@ # Building Nix -This section provides some notes on how to start hacking on Nix. -To get the latest version of Nix from GitHub: - -```console -$ git clone https://github.com/NixOS/nix.git -$ cd nix -``` - -> **Note** -> -> The following instructions assume you already have some version of Nix installed locally, so that you can use it to set up the development environment. -> If you don't have it installed, follow the [installation instructions](../installation/index.md). - - -To build all dependencies and start a shell in which all environment variables are set up so that those dependencies can be found: - -```console -$ nix-shell -``` - -To get a shell with one of the other [supported compilation environments](#compilation-environments): - -```console -$ nix-shell --attr devShells.x86_64-linux.native-clangStdenv -``` - -> **Note** -> -> You can use `native-ccacheStdenv` to drastically improve rebuild time. -> By default, [ccache](https://ccache.dev) keeps artifacts in `~/.cache/ccache/`. - -To build Nix itself in this shell: - -```console -[nix-shell]$ out="$(pwd)/outputs/out" dev=$out debug=$out mesonFlags+=" --prefix=${out}" -[nix-shell]$ dontAddPrefix=1 configurePhase -[nix-shell]$ buildPhase -``` - -To test it: - -```console -[nix-shell]$ checkPhase -``` - -To install it in `$(pwd)/outputs`: - -```console -[nix-shell]$ installPhase -[nix-shell]$ ./outputs/out/bin/nix --version -nix (Nix) 2.12 -``` - -To build a release version of Nix for the current operating system and CPU architecture: - -```console -$ nix-build -``` - -You can also build Nix for one of the [supported platforms](#platforms). - -## Building Nix with flakes - -This section assumes you are using Nix with the [`flakes`] and [`nix-command`] experimental features enabled. - -[`flakes`]: @docroot@/development/experimental-features.md#xp-feature-flakes -[`nix-command`]: @docroot@/development/experimental-features.md#xp-feature-nix-command - To build all dependencies and start a shell in which all environment variables are set up so that those dependencies can be found: ```console @@ -126,8 +58,6 @@ Nix can be built for various platforms, as specified in [`flake.nix`]: [`flake.nix`]: https://github.com/nixos/nix/blob/master/flake.nix - `x86_64-linux` -- `x86_64-darwin` -- `i686-linux` - `aarch64-linux` - `aarch64-darwin` - `armv6l-linux` @@ -145,12 +75,6 @@ platform. Common solutions include [remote build machines] and [binary format em Given such a setup, executing the build only requires selecting the respective attribute. For example, to compile for `aarch64-linux`: -```console -$ nix-build --attr packages.aarch64-linux.default -``` - -or for Nix with the [`flakes`] and [`nix-command`] experimental features enabled: - ```console $ nix build .#packages.aarch64-linux.default ``` @@ -219,6 +143,7 @@ For historic reasons and backward-compatibility, some CPU and OS identifiers are |-----------------------------|-------------------------|---------------------| | `x86` | | `i686` | | `arm` | | `host_machine.cpu()`| +| `arm64` | | `host_machine.cpu()`| | `ppc` | `little` | `powerpcle` | | `ppc64` | `little` | `powerpc64le` | | `ppc` | `big` | `powerpc` | @@ -243,20 +168,12 @@ To build with one of those environments, you can use $ nix build .#nix-cli-ccacheStdenv ``` -for flake-enabled Nix, or - -```console -$ nix-build --attr nix-cli-ccacheStdenv -``` - -for classic Nix. - You can use any of the other supported environments in place of `nix-cli-ccacheStdenv`. ## Editor integration The `clangd` LSP server is installed by default on the `clang`-based `devShell`s. -See [supported compilation environments](#compilation-environments) and instructions how to set up a shell [with flakes](#building-nix-with-flakes) or in [classic Nix](#building-nix). +See [supported compilation environments](#compilation-environments) and instructions how to [set up a shell](#building-nix). To use the LSP with your editor, you will want a `compile_commands.json` file telling `clangd` how we are compiling the code. Meson's configure always produces this inside the build directory. diff --git a/doc/manual/source/development/debugging.md b/doc/manual/source/development/debugging.md index d2450495e504..6578632d991a 100644 --- a/doc/manual/source/development/debugging.md +++ b/doc/manual/source/development/debugging.md @@ -6,14 +6,7 @@ Additionally, see [Testing Nix](./testing.md) for further instructions on how to ## Building Nix with Debug Symbols -In the development shell, set the `mesonBuildType` environment variable to `debug` before configuring the build: - -```console -[nix-shell]$ export mesonBuildType=debugoptimized -``` - -Then, proceed to build Nix as described in [Building Nix](./building.md). -This will build Nix with debug symbols, which are essential for effective debugging. +In the development shell, `mesonBuildType` is set automatically to `debugoptimized`. This builds Nix with debug symbols, which are essential for effective debugging. It is also possible to build without optimization for faster build: diff --git a/doc/manual/source/development/experimental-features.md b/doc/manual/source/development/experimental-features.md index ad5cffa91ee5..56a45b23890a 100644 --- a/doc/manual/source/development/experimental-features.md +++ b/doc/manual/source/development/experimental-features.md @@ -6,7 +6,7 @@ Experimental features are considered unstable, which means that they can be chan Users must explicitly enable them by toggling the associated [experimental feature flags](@docroot@/command-ref/conf-file.md#conf-experimental-features). This allows accessing unstable functionality without unwittingly relying on it. -Experimental feature flags were first introduced in [Nix 2.4](@docroot@/release-notes/rl-2.4.md). +Experimental feature flags were first introduced in [Nix 2.4](https://nix.dev/manual/nix/latest/release-notes/rl-2.4). Before that, Nix did have experimental features, but they were not guarded by flags and were merely documented as unstable. This was a source of confusion and controversy. diff --git a/doc/manual/source/development/testing.md b/doc/manual/source/development/testing.md index dd965862a34c..35654d163935 100644 --- a/doc/manual/source/development/testing.md +++ b/doc/manual/source/development/testing.md @@ -325,7 +325,6 @@ Creating a Cachix cache for your installer tests and adding its authorisation to - `x86_64-linux` - `armv6l-linux` - `armv7l-linux` - - `x86_64-darwin` - The `installer_test` job (which runs on `ubuntu-24.04` and `macos-14`) will try to install Nix with the cached installer and run a trivial Nix command. diff --git a/doc/manual/source/favicon.png b/doc/manual/source/favicon.png deleted file mode 100644 index 1ed2b5fe0fdf..000000000000 Binary files a/doc/manual/source/favicon.png and /dev/null differ diff --git a/doc/manual/source/favicon.svg b/doc/manual/source/favicon.svg index 1d2a6e835d5f..55fb9479b06e 100644 --- a/doc/manual/source/favicon.svg +++ b/doc/manual/source/favicon.svg @@ -1 +1,29 @@ - \ No newline at end of file + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/manual/source/glossary.md b/doc/manual/source/glossary.md index 502e6d4de6b0..64ca1cf5e167 100644 --- a/doc/manual/source/glossary.md +++ b/doc/manual/source/glossary.md @@ -136,7 +136,7 @@ > **Example** > - > `/nix/store/a040m110amc4h71lds2jmr8qrkj2jhxd-git-2.38.1` + > `/nix/store/jf6gn2dzna4nmsfbdxsd7kwhsk6gnnlr-git-2.38.1` See [Store Path](@docroot@/store/store-path.md) for details. @@ -353,14 +353,6 @@ See [Nix Archive](store/file-system-object/content-address.html#serial-nix-archive) for details. -- [`∅`]{#gloss-empty-set} - - The empty set symbol. In the context of profile history, this denotes a package is not present in a particular version of the profile. - -- [`ε`]{#gloss-epsilon} - - The epsilon symbol. In the context of a package, this means the version is empty. More precisely, the derivation does not have a version attribute. - - [package]{#package} A software package; files that belong together for a particular purpose, and metadata. diff --git a/doc/manual/source/installation/env-variables.md b/doc/manual/source/installation/env-variables.md deleted file mode 100644 index 0350904211ac..000000000000 --- a/doc/manual/source/installation/env-variables.md +++ /dev/null @@ -1,62 +0,0 @@ -# Environment Variables - -To use Nix, some environment variables should be set. In particular, -`PATH` should contain the directories `prefix/bin` and -`~/.nix-profile/bin`. The first directory contains the Nix tools -themselves, while `~/.nix-profile` is a symbolic link to the current -*user environment* (an automatically generated package consisting of -symlinks to installed packages). The simplest way to set the required -environment variables is to include the file -`prefix/etc/profile.d/nix.sh` in your `~/.profile` (or similar), like -this: - -```bash -source prefix/etc/profile.d/nix.sh -``` - -# `NIX_SSL_CERT_FILE` - -If you need to specify a custom certificate bundle to account for an -HTTPS-intercepting man in the middle proxy, you must specify the path to -the certificate bundle in the environment variable `NIX_SSL_CERT_FILE`. - -If you don't specify a `NIX_SSL_CERT_FILE` manually, Nix will install -and use its own certificate bundle. - -Set the environment variable and install Nix - -```console -$ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt -$ curl -L https://nixos.org/nix/install | sh -``` - -In the shell profile and rc files (for example, `/etc/bashrc`, -`/etc/zshrc`), add the following line: - -```bash -export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt -``` - -> **Note** -> -> You must not add the export and then do the install, as the Nix -> installer will detect the presence of Nix configuration, and abort. - -If you use the Nix daemon, you should also add the following to -`/etc/nix/nix.conf`: - -``` -ssl-cert-file = /etc/ssl/my-certificate-bundle.crt -``` - -## Proxy Environment Variables - -The Nix installer has special handling for these proxy-related -environment variables: `http_proxy`, `https_proxy`, `ftp_proxy`, -`all_proxy`, `no_proxy`, `HTTP_PROXY`, `HTTPS_PROXY`, `FTP_PROXY`, -`ALL_PROXY`, `NO_PROXY`. - -If any of these variables are set when running the Nix installer, then -the installer will create an override file at -`/etc/systemd/system/nix-daemon.service.d/override.conf` so `nix-daemon` -will use them. diff --git a/doc/manual/source/installation/index.md b/doc/manual/source/installation/index.md index 3c09f103184a..aded684b0b59 100644 --- a/doc/manual/source/installation/index.md +++ b/doc/manual/source/installation/index.md @@ -1,44 +1,11 @@ # Installation -This section describes how to install and configure Nix for first-time use. - -The current recommended option on Linux and MacOS is [multi-user](#multi-user). - -## Multi-user - -This installation offers better sharing, improved isolation, and more security -over a single user installation. - -This option requires either: - -* Linux running systemd, with SELinux disabled -* MacOS - -> **Updating to macOS 15 Sequoia** -> -> If you recently updated to macOS 15 Sequoia and are getting -> ```console -> error: the user '_nixbld1' in the group 'nixbld' does not exist -> ``` -> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. +We recommend that macOS users install Determinate Nix using our graphical installer, [Determinate.pkg][pkg]. +For Linux and Windows Subsystem for Linux (WSL) users: ```console -$ curl -L https://nixos.org/nix/install | sh -s -- --daemon -``` - -## Single-user - -> Single-user is not supported on Mac. - -> `warning: installing Nix as root is not supported by this script!` - -This installation has less requirements than the multi-user install, however it -cannot offer equivalent sharing, isolation, or security. - -This option is suitable for systems without systemd. - -```console -$ curl -L https://nixos.org/nix/install | sh -s -- --no-daemon +curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | \ + sh -s -- install ``` ## Distributions @@ -46,3 +13,5 @@ $ curl -L https://nixos.org/nix/install | sh -s -- --no-daemon The Nix community maintains installers for several distributions. They can be found in the [`nix-community/nix-installers`](https://github.com/nix-community/nix-installers) repository. + +[pkg]: https://install.determinate.systems/determinate-pkg/stable/Universal diff --git a/doc/manual/source/installation/installing-binary.md b/doc/manual/source/installation/installing-binary.md deleted file mode 100644 index 21c15637437d..000000000000 --- a/doc/manual/source/installation/installing-binary.md +++ /dev/null @@ -1,158 +0,0 @@ -# Installing a Binary Distribution - -> **Updating to macOS 15 Sequoia** -> -> If you recently updated to macOS 15 Sequoia and are getting -> ```console -> error: the user '_nixbld1' in the group 'nixbld' does not exist -> ``` -> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. - -To install the latest version Nix, run the following command: - -```console -$ curl -L https://nixos.org/nix/install | sh -``` - -This performs the default type of installation for your platform: - -- [Multi-user](#multi-user-installation): - - Linux with systemd and without SELinux - - macOS -- [Single-user](#single-user-installation): - - Linux without systemd - - Linux with SELinux - -We recommend the multi-user installation if it supports your platform and you can authenticate with `sudo`. - -The installer can be configured with various command line arguments and environment variables. -To show available command line flags: - -```console -$ curl -L https://nixos.org/nix/install | sh -s -- --help -``` - -To check what it does and how it can be customised further, [download and edit the second-stage installation script](#installing-from-a-binary-tarball). - -# Installing a pinned Nix version from a URL - -Version-specific installation URLs for all Nix versions since 1.11.16 can be found at [releases.nixos.org](https://releases.nixos.org/?prefix=nix/). -The directory for each version contains the corresponding SHA-256 hash. - -All installation scripts are invoked the same way: - -```console -$ export VERSION=2.19.2 -$ curl -L https://releases.nixos.org/nix/nix-$VERSION/install | sh -``` - -# Multi User Installation - -The multi-user Nix installation creates system users and a system service for the Nix daemon. - -Supported systems: - -- Linux running systemd, with SELinux disabled -- macOS - -To explicitly instruct the installer to perform a multi-user installation on your system: - -```console -$ bash <(curl -L https://nixos.org/nix/install) --daemon -``` - -You can run this under your usual user account or `root`. -The script will invoke `sudo` as needed. - -# Single User Installation - -To explicitly select a single-user installation on your system: - -```console -$ bash <(curl -L https://nixos.org/nix/install) --no-daemon -``` - -In a single-user installation, `/nix` is owned by the invoking user. -The script will invoke `sudo` to create `/nix` if it doesn’t already exist. -If you don’t have `sudo`, manually create `/nix` as `root`: - -```console -$ su root -# mkdir /nix -# chown alice /nix -``` - -# Installing from a binary tarball - -You can also download a binary tarball that contains Nix and all its dependencies: -- Choose a [version](https://releases.nixos.org/?prefix=nix/) and [system type](../development/building.md#platforms) -- Download and unpack the tarball -- Run the installer - -> **Example** -> -> ```console -> $ pushd $(mktemp -d) -> $ export VERSION=2.19.2 -> $ export SYSTEM=x86_64-linux -> $ curl -LO https://releases.nixos.org/nix/nix-$VERSION/nix-$VERSION-$SYSTEM.tar.xz -> $ tar xfj nix-$VERSION-$SYSTEM.tar.xz -> $ cd nix-$VERSION-$SYSTEM -> $ ./install -> $ popd -> ``` - -The installer can be customised with the environment variables declared in the file named `install-multi-user`. - -## Native packages for Linux distributions - -The Nix community maintains installers for some Linux distributions in their native packaging format(https://nix-community.github.io/nix-installers/). - -# macOS Installation - - -[]{#sect-macos-installation-change-store-prefix}[]{#sect-macos-installation-encrypted-volume}[]{#sect-macos-installation-symlink}[]{#sect-macos-installation-recommended-notes} - -We believe we have ironed out how to cleanly support the read-only root file system -on modern macOS. New installs will do this automatically. - -This section previously detailed the situation, options, and trade-offs, -but it now only outlines what the installer does. You don't need to know -this to run the installer, but it may help if you run into trouble: - -- create a new APFS volume for your Nix store -- update `/etc/synthetic.conf` to direct macOS to create a "synthetic" - empty root directory to mount your volume -- specify mount options for the volume in `/etc/fstab` - - `rw`: read-write - - `noauto`: prevent the system from auto-mounting the volume (so the - LaunchDaemon mentioned below can control mounting it, and to avoid - masking problems with that mounting service). - - `nobrowse`: prevent the Nix Store volume from showing up on your - desktop; also keeps Spotlight from spending resources to index - this volume - -- if you have FileVault enabled - - generate an encryption password - - put it in your system Keychain - - use it to encrypt the volume -- create a system LaunchDaemon to mount this volume early enough in the - boot process to avoid problems loading or restoring any programs that - need access to your Nix store - diff --git a/doc/manual/source/installation/nix-security.md b/doc/manual/source/installation/nix-security.md index 1e9036b68b21..61cad24c2b3b 100644 --- a/doc/manual/source/installation/nix-security.md +++ b/doc/manual/source/installation/nix-security.md @@ -1,15 +1,85 @@ # Security -Nix has two basic security models. First, it can be used in “single-user -mode”, which is similar to what most other package management tools do: -there is a single user (typically root) who performs all package -management operations. All other users can then use the installed -packages, but they cannot perform package management operations -themselves. - -Alternatively, you can configure Nix in “multi-user mode”. In this -model, all users can perform package management operations — for -instance, every user can install software without requiring root -privileges. Nix ensures that this is secure. For instance, it’s not -possible for one user to overwrite a package used by another user with a -Trojan horse. +Nix follows a [**multi-user**](#multi-user-model) security model in which all +users can perform package management operations. Every user can, for example, +install software without requiring root privileges, and Nix ensures that this +is secure. It's *not* possible for one user to, for example, overwrite a +package used by another user with a Trojan horse. + +## Multi-User model + +To allow a Nix store to be shared safely among multiple users, it is +important that users are not able to run builders that modify the Nix +store or database in arbitrary ways, or that interfere with builds +started by other users. If they could do so, they could install a Trojan +horse in some package and compromise the accounts of other users. + +To prevent this, the Nix store and database are owned by some privileged +user (usually `root`) and builders are executed under special user +accounts (usually named `nixbld1`, `nixbld2`, etc.). When a unprivileged +user runs a Nix command, actions that operate on the Nix store (such as +builds) are forwarded to a *Nix daemon* running under the owner of the +Nix store/database that performs the operation. + +> **Note** +> +> Multi-user mode has one important limitation: only root and a set of +> trusted users specified in `nix.conf` can specify arbitrary binary +> caches. So while unprivileged users may install packages from +> arbitrary Nix expressions, they may not get pre-built binaries. + +### Setting up the build users + +The *build users* are the special UIDs under which builds are performed. +They should all be members of the *build users group* `nixbld`. This +group should have no other members. The build users should not be +members of any other group. On Linux, you can create the group and users +as follows: + +```console +$ groupadd -r nixbld +$ for n in $(seq 1 10); do useradd -c "Nix build user $n" \ + -d /var/empty -g nixbld -G nixbld -M -N -r -s "$(which nologin)" \ + nixbld$n; done +``` + +This creates 10 build users. There can never be more concurrent builds +than the number of build users, so you may want to increase this if you +expect to do many builds at the same time. + +### Running the daemon + +The [Nix daemon](../command-ref/nix-daemon.md) should be started as +follows (as `root`): + +```console +$ nix-daemon +``` + +You’ll want to put that line somewhere in your system’s boot scripts. + +To let unprivileged users use the daemon, they should set the +[`NIX_REMOTE` environment variable](../command-ref/env-common.md) to +`daemon`. So you should put a line like + +```console +export NIX_REMOTE=daemon +``` + +into the users’ login scripts. + +### Restricting access + +To limit which users can perform Nix operations, you can use the +permissions on the directory `/nix/var/nix/daemon-socket`. For instance, +if you want to restrict the use of Nix to the members of a group called +`nix-users`, do + +```console +$ chgrp nix-users /nix/var/nix/daemon-socket +$ chmod ug=rwx,o= /nix/var/nix/daemon-socket +``` + +This way, users who are not in the `nix-users` group cannot connect to +the Unix domain socket `/nix/var/nix/daemon-socket/socket`, so they +cannot perform Nix operations. diff --git a/doc/manual/source/installation/single-user.md b/doc/manual/source/installation/single-user.md deleted file mode 100644 index f9a3b26edf41..000000000000 --- a/doc/manual/source/installation/single-user.md +++ /dev/null @@ -1,9 +0,0 @@ -# Single-User Mode - -In single-user mode, all Nix operations that access the database in -`prefix/var/nix/db` or modify the Nix store in `prefix/store` must be -performed under the user ID that owns those directories. This is -typically root. (If you install from RPM packages, that’s in fact the -default ownership.) However, on single-user machines, it is often -convenient to `chown` those directories to your normal user account so -that you don’t have to `su` to root all the time. diff --git a/doc/manual/source/installation/supported-platforms.md b/doc/manual/source/installation/supported-platforms.md deleted file mode 100644 index 8ca3ce8d445e..000000000000 --- a/doc/manual/source/installation/supported-platforms.md +++ /dev/null @@ -1,7 +0,0 @@ -# Supported Platforms - -Nix is currently supported on the following platforms: - - - Linux (i686, x86\_64, aarch64). - - - macOS (x86\_64, aarch64). diff --git a/doc/manual/source/installation/uninstall.md b/doc/manual/source/installation/uninstall.md index 69d59847b6fd..e95634c213a1 100644 --- a/doc/manual/source/installation/uninstall.md +++ b/doc/manual/source/installation/uninstall.md @@ -1,197 +1,15 @@ # Uninstalling Nix -## Multi User - -Removing a [multi-user installation](./installing-binary.md#multi-user-installation) depends on the operating system. - -### Linux - -If you are on Linux with systemd: - -1. Remove the Nix daemon service: - - ```console - sudo systemctl stop nix-daemon.service - sudo systemctl disable nix-daemon.socket nix-daemon.service - sudo systemctl daemon-reload - ``` - -Remove files created by Nix: +To uninstall Determinate Nix, use the uninstallation utility built into the [Determinate Nix Installer][installer]: ```console -sudo rm -rf /etc/nix /etc/profile.d/nix.sh /etc/tmpfiles.d/nix-daemon.conf /nix ~root/.nix-channels ~root/.nix-defexpr ~root/.nix-profile ~root/.cache/nix +$ /nix/nix-installer uninstall ``` -Remove build users and their group: +If you're certain that you want to uninstall, you can skip the confirmation step: ```console -for i in $(seq 1 32); do - sudo userdel nixbld$i -done -sudo groupdel nixbld +$ /nix/nix-installer uninstall --no-confirm ``` -There may also be references to Nix in - -- `/etc/bash.bashrc` -- `/etc/bashrc` -- `/etc/profile` -- `/etc/zsh/zshrc` -- `/etc/zshrc` - -which you may remove. - -### FreeBSD - -1. Stop and remove the Nix daemon service: - - ```console - sudo service nix-daemon stop - sudo rm -f /usr/local/etc/rc.d/nix-daemon - sudo sysrc -x nix_daemon_enable - ``` - -2. Remove files created by Nix: - - ```console - sudo rm -rf /etc/nix /usr/local/etc/profile.d/nix.sh /nix ~root/.nix-channels ~root/.nix-defexpr ~root/.nix-profile ~root/.cache/nix - ``` - -3. Remove build users and their group: - - ```console - for i in $(seq 1 32); do - sudo pw userdel nixbld$i - done - sudo pw groupdel nixbld - ``` - -4. There may also be references to Nix in: - - `/usr/local/etc/bashrc` - - `/usr/local/etc/zshrc` - - Shell configuration files in users' home directories - - which you may remove. - -### macOS - -> **Updating to macOS 15 Sequoia** -> -> If you recently updated to macOS 15 Sequoia and are getting -> ```console -> error: the user '_nixbld1' in the group 'nixbld' does not exist -> ``` -> when running Nix commands, refer to GitHub issue [NixOS/nix#10892](https://github.com/NixOS/nix/issues/10892) for instructions to fix your installation without reinstalling. - -1. If system-wide shell initialisation files haven't been altered since installing Nix, use the backups made by the installer: - - ```console - sudo mv /etc/zshrc.backup-before-nix /etc/zshrc - sudo mv /etc/bashrc.backup-before-nix /etc/bashrc - sudo mv /etc/bash.bashrc.backup-before-nix /etc/bash.bashrc - ``` - - Otherwise, edit `/etc/zshrc`, `/etc/bashrc`, and `/etc/bash.bashrc` to remove the lines sourcing `nix-daemon.sh`, which should look like this: - - ```bash - # Nix - if [ -e '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' ]; then - . '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' - fi - # End Nix - ``` - -2. Stop and remove the Nix daemon services: - - ```console - sudo launchctl unload /Library/LaunchDaemons/org.nixos.nix-daemon.plist - sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist - sudo launchctl unload /Library/LaunchDaemons/org.nixos.darwin-store.plist - sudo rm /Library/LaunchDaemons/org.nixos.darwin-store.plist - ``` - - This stops the Nix daemon and prevents it from being started next time you boot the system. - -3. Remove the `nixbld` group and the `_nixbuildN` users: - - ```console - sudo dscl . -delete /Groups/nixbld - for u in $(sudo dscl . -list /Users | grep _nixbld); do sudo dscl . -delete /Users/$u; done - ``` - - This will remove all the build users that no longer serve a purpose. - -4. Edit fstab using `sudo vifs` to remove the line mounting the Nix Store volume on `/nix`, which looks like - - ``` - UUID= /nix apfs rw,noauto,nobrowse,suid,owners - ``` - or - - ``` - LABEL=Nix\040Store /nix apfs rw,nobrowse - ``` - - by setting the cursor on the respective line using the arrow keys, and pressing `dd`, and then `:wq` to save the file. - - This will prevent automatic mounting of the Nix Store volume. - -5. Edit `/etc/synthetic.conf` to remove the `nix` line. - If this is the only line in the file you can remove it entirely: - - ```bash - if [ -f /etc/synthetic.conf ]; then - if [ "$(cat /etc/synthetic.conf)" = "nix" ]; then - sudo rm /etc/synthetic.conf - else - sudo vi /etc/synthetic.conf - fi - fi - ``` - - This will prevent the creation of the empty `/nix` directory. - -6. Remove the files Nix added to your system, except for the store: - - ```console - sudo rm -rf /etc/nix /var/root/.nix-profile /var/root/.nix-defexpr /var/root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels - ``` - - -7. Remove the Nix Store volume: - - ```console - sudo diskutil apfs deleteVolume /nix - ``` - - This will remove the Nix Store volume and everything that was added to the store. - - If the output indicates that the command couldn't remove the volume, you should make sure you don't have an _unmounted_ Nix Store volume. - Look for a "Nix Store" volume in the output of the following command: - - ```console - diskutil list - ``` - - If you _do_ find a "Nix Store" volume, delete it by running `diskutil apfs deleteVolume` with the store volume's `diskXsY` identifier. - - If you get an error that the volume is in use by the kernel, reboot and immediately delete the volume before starting any other process. - -> **Note** -> -> After you complete the steps here, you will still have an empty `/nix` directory. -> This is an expected sign of a successful uninstall. -> The empty `/nix` directory will disappear the next time you reboot. -> -> You do not have to reboot to finish uninstalling Nix. -> The uninstall is complete. -> macOS (Catalina+) directly controls root directories, and its read-only root will prevent you from manually deleting the empty `/nix` mountpoint. - -## Single User - -To remove a [single-user installation](./installing-binary.md#single-user-installation) of Nix, run: - -```console -rm -rf /nix ~/.nix-channels ~/.nix-defexpr ~/.nix-profile -``` -You might also want to manually remove references to Nix from your `~/.profile`. +[installer]: https://github.com/DeterminateSystems/nix-installer diff --git a/doc/manual/source/installation/upgrading.md b/doc/manual/source/installation/upgrading.md index a433f1d30e6c..8fe342b09b7c 100644 --- a/doc/manual/source/installation/upgrading.md +++ b/doc/manual/source/installation/upgrading.md @@ -1,40 +1,10 @@ # Upgrading Nix -> **Note** -> -> These upgrade instructions apply where Nix was installed following the [installation instructions in this manual](./index.md). - -Check which Nix version will be installed, for example from one of the [release channels](http://channels.nixos.org/) such as `nixpkgs-unstable`: - -```console -$ nix-shell -p nix -I nixpkgs=channel:nixpkgs-unstable --run "nix --version" -nix (Nix) 2.18.1 -``` - -> **Warning** -> -> Writing to the [local store](@docroot@/store/types/local-store.md) with a newer version of Nix, for example by building derivations with [`nix-build`](@docroot@/command-ref/nix-build.md) or [`nix-store --realise`](@docroot@/command-ref/nix-store/realise.md), may change the database schema! -> Reverting to an older version of Nix may therefore require purging the store database before it can be used. - -## Linux multi-user +You can upgrade Determinate Nix using Determinate Nixd: ```console -$ sudo su -# nix-env --install --file '' --attr nix cacert -I nixpkgs=channel:nixpkgs-unstable -# systemctl daemon-reload -# systemctl restart nix-daemon +sudo determinate-nixd upgrade ``` -## macOS multi-user +Note that the `sudo` is necessary here and upgrading fails without it. -```console -$ sudo nix-env --install --file '' --attr nix cacert -I nixpkgs=channel:nixpkgs-unstable -$ sudo launchctl remove org.nixos.nix-daemon -$ sudo launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist -``` - -## Single-user all platforms - -```console -$ nix-env --install --file '' --attr nix cacert -I nixpkgs=channel:nixpkgs-unstable -``` diff --git a/doc/manual/source/introduction.md b/doc/manual/source/introduction.md index e70411c11f56..039ad6f30b1c 100644 --- a/doc/manual/source/introduction.md +++ b/doc/manual/source/introduction.md @@ -1,4 +1,19 @@ -# Introduction +# Determinate Nix + +**Determinate Nix** is a downstream distribution of [Nix], a purely functional language, CLI tool, and package management system. +It's available on Linux, macOS, and Windows Subsystem for Linux (WSL). + +## Installing + +We recommend that macOS users install Determinate Nix using our graphical installer, [Determinate.pkg][pkg]. +For Linux and Windows Subsystem for Linux (WSL) users: + +```console +curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | \ + sh -s -- install +``` + +## How Nix works Nix is a _purely functional package manager_. This means that it treats packages like values in a purely functional programming language @@ -8,7 +23,7 @@ stores packages in the _Nix store_, usually the directory `/nix/store`, where each package has its own unique subdirectory such as - /nix/store/b6gvzjyb2pg0kjfwrjmg1vfhh54ad73z-firefox-33.1/ + /nix/store/q06x3jll2yfzckz2bzqak089p43ixkkq-firefox-33.1/ where `b6gvzjyb2pg0…` is a unique identifier for the package that captures all its dependencies (it’s a cryptographic hash of the @@ -184,10 +199,14 @@ to build configuration files in `/etc`). This means, among other things, that it is easy to roll back the entire configuration of the system to an earlier state. Also, users can install software without root privileges. For more information and downloads, see the [NixOS -homepage](https://nixos.org/). +homepage][nix]. ## License Nix is released under the terms of the [GNU LGPLv2.1 or (at your option) any later -version](http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html). +version][license]. + +[license]: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html +[pkg]: https://install.determinate.systems/determinate-pkg/stable/Universal +[site]: https://nixos.org diff --git a/doc/manual/source/language/string-context.md b/doc/manual/source/language/string-context.md index 65c59d865f09..0968cc88b849 100644 --- a/doc/manual/source/language/string-context.md +++ b/doc/manual/source/language/string-context.md @@ -34,12 +34,12 @@ String context elements come in different forms: > [`builtins.storePath`] creates a string with a single constant string context element: > > ```nix - > builtins.getContext (builtins.storePath "/nix/store/wkhdf9jinag5750mqlax6z2zbwhqb76n-hello-2.10") + > builtins.getContext (builtins.storePath "/nix/store/ikwkxz4wwlp2g1428n7dy729cg1d9hin-hello-2.10") > ``` > evaluates to > ```nix > { - > "/nix/store/wkhdf9jinag5750mqlax6z2zbwhqb76n-hello-2.10" = { + > "/nix/store/ikwkxz4wwlp2g1428n7dy729cg1d9hin-hello-2.10" = { > path = true; > }; > } diff --git a/doc/manual/source/language/string-interpolation.md b/doc/manual/source/language/string-interpolation.md index 8e25d2b63114..3f6bf9b9f855 100644 --- a/doc/manual/source/language/string-interpolation.md +++ b/doc/manual/source/language/string-interpolation.md @@ -181,7 +181,7 @@ A derivation interpolates to the [store path] of its first [output](./derivation > "${pkgs.hello}" > ``` > -> "/nix/store/4xpfqf29z4m8vbhrqcz064wfmb46w5r7-hello-2.12.1" +> "/nix/store/qnlr7906z0mrl2syrkdbpicffq02nw07-hello-2.12.1" An attribute set interpolates to the return value of the function in the `__toString` applied to the attribute set itself. diff --git a/doc/manual/source/protocols/flake-schemas.md b/doc/manual/source/protocols/flake-schemas.md new file mode 100644 index 000000000000..18bffeef1dc1 --- /dev/null +++ b/doc/manual/source/protocols/flake-schemas.md @@ -0,0 +1,64 @@ +# Flake Schemas + +Flake schemas are a mechanism to allow tools like `nix flake show` and `nix flake check` to enumerate and check the contents of a flake +in a generic way, without requiring built-in knowledge of specific flake output types like `packages` or `nixosConfigurations`. + +A flake can define schemas for its outputs by defining a `schemas` output. `schemas` should be an attribute set with an attribute for +every output type that you want to be supported. If a flake does not have a `schemas` attribute, Nix uses a built-in set of schemas (namely https://github.com/DeterminateSystems/flake-schemas). + +A schema is an attribute set with the following attributes: + +| Attribute | Description | Default | +| :---------- | :---------------------------------------------------------------------------------------------- | :------ | +| `version` | Should be set to 1 | | +| `doc` | A string containing documentation about the flake output type in Markdown format. | | +| `allowIFD` | Whether the evaluation of the output attributes of this flake can read from derivation outputs. | `true` | +| `inventory` | A function that returns the contents of the flake output (described [below](#inventory)). | | + +# Inventory + +The `inventory` function returns a _node_ describing the contents of the flake output. A node is either a _leaf node_ or a _non-leaf node_. This allows nested flake output attributes to be described (e.g. `x86_64-linux.hello` inside a `packages` output). + +Non-leaf nodes must have the following attribute: + +| Attribute | Description | +| :--------- | :------------------------------------------------------------------------------------- | +| `children` | An attribute set of nodes. If this attribute is missing, the attribute is a leaf node. | + +Leaf nodes can have the following attributes: + +| Attribute | Description | +| :------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `derivationAttrPath` | If not null, a list of strings denoting the attribute path of the "main" derivation of this node. | +| `evalChecks` | An attribute set of Boolean values, used by `nix flake check`. Each attribute must evaluate to `true`. | +| `isFlakeCheck` | Whether `nix flake check` should build the attribute denoted by `derivationAttrPath`. | +| `shortDescription` | A one-sentence description of the node (such as the `meta.description` attribute in Nixpkgs). | +| `what` | A brief human-readable string describing the type of the node, e.g. `"package"` or `"development environment"`. This is used by tools like `nix flake show` to describe the contents of a flake. | + +Both leaf and non-leaf nodes can have the following attributes: + +| Attribute | Description | +| :----------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `forSystems` | A list of Nix system types (e.g. `["x86_64-linux"]`) supported by this node. This is used by tools to skip nodes that cannot be built on the user's system. Setting this on a non-leaf node allows all the children to be skipped, regardless of the `forSystems` attributes of the children. If this attribute is not set, the node is never skipped. | + +# Example + +Here is a schema that checks that every element of the `nixosConfigurations` flake output evaluates and builds correctly (meaning that it has a `config.system.build.toplevel` attribute that yields a buildable derivation). + +```nix +outputs = { + schemas.nixosConfigurations = { + version = 1; + doc = '' + The `nixosConfigurations` flake output defines NixOS system configurations. + ''; + inventory = output: { + children = builtins.mapAttrs (configName: machine: + { + what = "NixOS configuration"; + derivationAttrPath = [ "config" "system" "build" "toplevel" ]; + }) output; + }; + }; +}; +``` diff --git a/doc/manual/source/protocols/json/schema/derivation-options-v1.yaml b/doc/manual/source/protocols/json/schema/derivation-options-v1.yaml index 58ff070882ff..d247802cd6c2 100644 --- a/doc/manual/source/protocols/json/schema/derivation-options-v1.yaml +++ b/doc/manual/source/protocols/json/schema/derivation-options-v1.yaml @@ -9,7 +9,7 @@ description: | > **Warning** > > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) + > [**experimental**](@docroot@/development/experimental-features.md) > and subject to change. type: object diff --git a/doc/manual/source/protocols/json/schema/derivation-v4.yaml b/doc/manual/source/protocols/json/schema/derivation-v4.yaml index 2528f7502e63..c1884769671e 100644 --- a/doc/manual/source/protocols/json/schema/derivation-v4.yaml +++ b/doc/manual/source/protocols/json/schema/derivation-v4.yaml @@ -9,7 +9,7 @@ description: | > **Warning** > > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) + > [**experimental**](@docroot@/development/experimental-features.md) > and subject to change. type: object @@ -94,8 +94,8 @@ properties: > > ```json > "srcs": [ - > "47y241wqdhac3jm5l7nv0x4975mb1975-separate-debug-info.sh", - > "56d0w71pjj9bdr363ym3wj1zkwyqq97j-fix-pop-var-context-error.patch" + > "b8nwz167km1yciqpwzjj24f8jcy8pq1h-separate-debug-info.sh", + > "ihzmilr413r8fb3ah30yjnhlb18c1laz-fix-pop-var-context-error.patch" > ] > ``` items: @@ -140,7 +140,7 @@ properties: description: | Absolute path of the program used to perform the build. Typically this is the `bash` shell - (e.g. `/nix/store/r3j288vpmczbl500w6zz89gyfa4nr0b1-bash-4.4-p23/bin/bash`). + (e.g. `/nix/store/p4xlj4imjbnm4v0x5jf4qysvyjjlgq1d-bash-4.4-p23/bin/bash`). args: type: array diff --git a/doc/manual/source/protocols/json/schema/store-object-info-v2.yaml b/doc/manual/source/protocols/json/schema/store-object-info-v2.yaml index 3ed7e99e28d8..582b5e9eb476 100644 --- a/doc/manual/source/protocols/json/schema/store-object-info-v2.yaml +++ b/doc/manual/source/protocols/json/schema/store-object-info-v2.yaml @@ -6,12 +6,6 @@ description: | This schema describes the JSON representation of store object metadata as returned by commands like [`nix path-info --json`](@docroot@/command-ref/new-cli/nix3-path-info.md). - > **Warning** - > - > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) - > and subject to change. - ### Field Categories Store object information can come in a few different variations. @@ -185,6 +179,15 @@ $defs: The total size of this store object and every other object in its [closure](@docroot@/glossary.md#gloss-closure). > This field is not stored at all, but computed by traversing the other fields across all the store objects in a closure. + + provenance: + oneOf: + - type: "null" + - type: object # FIXME + title: Provenance + description: | + An arbitrary JSON object containing provenance information about the store object, or `null` if not available. + additionalProperties: false narInfo: @@ -268,4 +271,13 @@ $defs: > This is an impure "`.narinfo`" field that may not be included in certain contexts. > This field is not stored at all, but computed by traversing the other fields across all the store objects in a closure. + + provenance: + oneOf: + - type: "null" + - type: object # FIXME + title: Provenance + description: | + An arbitrary JSON object containing provenance information about the store object, or `null` if not available. + additionalProperties: false diff --git a/doc/manual/source/protocols/json/schema/store-path-v1.yaml b/doc/manual/source/protocols/json/schema/store-path-v1.yaml index 2012aab99150..61653d60e214 100644 --- a/doc/manual/source/protocols/json/schema/store-path-v1.yaml +++ b/doc/manual/source/protocols/json/schema/store-path-v1.yaml @@ -6,12 +6,6 @@ description: | This schema describes the JSON representation of store paths as used in various Nix JSON APIs. - > **Warning** - > - > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) - > and subject to change. - ## Format Store paths in JSON are represented as strings containing just the hash and name portion, without the store directory prefix. diff --git a/doc/manual/source/protocols/json/schema/store-v1.yaml b/doc/manual/source/protocols/json/schema/store-v1.yaml index e0c6f8fed6ce..31aa10c41476 100644 --- a/doc/manual/source/protocols/json/schema/store-v1.yaml +++ b/doc/manual/source/protocols/json/schema/store-v1.yaml @@ -10,7 +10,7 @@ description: | > **Warning** > > This JSON format is currently - > [**experimental**](@docroot@/development/experimental-features.md#xp-feature-nix-command) + > [**experimental**](@docroot@/development/experimental-features.md) > and subject to change. type: object diff --git a/doc/manual/source/protocols/wasm.md b/doc/manual/source/protocols/wasm.md new file mode 100644 index 000000000000..ca67491cadd5 --- /dev/null +++ b/doc/manual/source/protocols/wasm.md @@ -0,0 +1,379 @@ +# Wasm Host Interface + +Nix provides a builtin for calling WebAssembly modules: `builtins.wasm`. This allows extending Nix with custom functionality written in languages that compile to WebAssembly (such as Rust). + +## Overview + +WebAssembly modules can interact with Nix values through a host interface that provides functions for creating and inspecting Nix values. The WASM module receives Nix values as opaque `ValueId` handles and uses host functions to work with them. + +The `builtins.wasm` builtin takes two arguments: +1. A configuration attribute set with the following attributes: + - `path` - Path to the WebAssembly module (required) + - `function` - Name of the Wasm function to call (required for non-WASI modules, not allowed for WASI modules) +2. The argument value to pass to the function + +WASI mode is automatically detected by checking if the module imports from `wasi_snapshot_preview1`. There are two calling conventions: + +- **Non-WASI mode** (no WASI imports) calls the Wasm export specified by `function` directly. The function receives its input as a `ValueId` parameter and returns a `ValueId`. +- **WASI mode** (when the module imports from `wasi_snapshot_preview1`) runs the WASI module's `_start` entry point. The input `ValueId` is passed as a command-line argument (`argv[1]`), and the result is returned by calling the `return_to_nix` host function. + +## Value IDs + +Nix values are represented in Wasm code as a `u32` referred to below as a `ValueId`. These are opaque handles that reference values managed by the Nix evaluator. Value ID 0 is reserved to represent a missing attribute lookup result. + +## Entry Points + +### Non-WASI Mode + +Non-WASI mode is used when the module does **not** import from `wasi_snapshot_preview1`. + +Usage: +```nix +builtins.wasm { + path = ; + function = ; +} +``` + +Every Wasm module used in non-WASI mode must export: +- A `memory` object that the host can use to read/write data. +- `nix_wasm_init_v1()`, a function that is called once when the module is instantiated. +- The entry point function, whose name is specified by the `function` attribute. It takes a single `ValueId` and returns a single `ValueId` (i.e. it has type `fn(arg: u32) -> u32`). + +### WASI Mode + +WASI mode is automatically used when the module imports a `wasi_snapshot_preview1` function. + +Usage: +```nix +builtins.wasm { + path = ; +} +``` + +Every WASI module must export: +- A `memory` object that the host can use to read/write data. +- `_start()`, the standard WASI entry point. This function takes no parameters. + +The input value is passed as a command-line argument: `argv[1]` is set to the decimal representation of the `ValueId` of the input value. + +To return a result to Nix, the module must call the `return_to_nix` host function (see below) with the `ValueId` of the result. If `_start` finishes without calling `return_to_nix`, an error is raised. + +Standard output and standard error from the WASI module are captured and emitted as Nix warnings (one warning per line). + +## Host Functions + +All host functions are imported from the `env` module. + +### Error Handling + +#### `panic(ptr: u32, len: u32)` + +Aborts execution with an error message. + +**Parameters:** +- `ptr` - Pointer to UTF-8 encoded error message in Wasm memory +- `len` - Length of the error message in bytes + +#### `warn(ptr: u32, len: u32)` + +Emits a warning message. + +**Parameters:** +- `ptr` - Pointer to UTF-8 encoded warning message in Wasm memory +- `len` - Length of the warning message in bytes + +### Type Inspection + +#### `get_type(value: ValueId) -> u32` + +Returns the type of a Nix value. + +**Parameters:** +- `value` - ID of a Nix value + +**Return values:** +- `1` - Integer +- `2` - Float +- `3` - Boolean +- `4` - String +- `5` - Path +- `6` - Null +- `7` - Attribute set +- `8` - List +- `9` - Function + +**Note:** Forces evaluation of the value. + +### Integer Operations + +#### `make_int(n: i64) -> ValueId` + +Creates a Nix integer value. + +**Parameters:** +- `n` - The integer value + +**Returns:** Value ID of the created integer + +#### `get_int(value: ValueId) -> i64` + +Extracts an integer from a Nix value. Throws an error if the value is not an integer. + +**Parameters:** +- `value` - ID of a Nix integer value + +**Returns:** The integer value + +### Float Operations + +#### `make_float(x: f64) -> ValueId` + +Creates a Nix float value. + +**Parameters:** +- `x` - The float value + +**Returns:** Value ID of the created float + +#### `get_float(value: ValueId) -> f64` + +Extracts a float from a Nix value. Throws an error if the value is not a float. + +**Parameters:** +- `value` - ID of a Nix float value + +**Returns:** The float value + +### Boolean Operations + +#### `make_bool(b: i32) -> ValueId` + +Creates a Nix Boolean value. + +**Parameters:** +- `b` - Boolean value (0 = false, non-zero = true) + +**Returns:** Value ID of the created Boolean + +#### `get_bool(value: ValueId) -> i32` + +Extracts a Boolean from a Nix value. Throws an error if the value is not a Boolean. + +**Parameters:** +- `value` - ID of a Nix Boolean value + +**Returns:** 0 for false, 1 for true + +### Null Operations + +#### `make_null() -> ValueId` + +Creates a Nix null value. + +**Returns:** Value ID of the null value + +### String Operations + +#### `make_string(ptr: u32, len: u32) -> ValueId` + +Creates a Nix string value from Wasm memory. + +**Parameters:** +- `ptr` - Pointer to a string in Wasm memory +- `len` - Length of the string in bytes + +**Note:** Strings do not require a null terminator. + +**Returns:** Value ID of the created string + +#### `copy_string(value: ValueId, ptr: u32, max_len: u32) -> u32` + +Copies a Nix string value into Wasm memory. + +**Parameters:** +- `value` - ID of a string value +- `ptr` - Pointer to buffer in Wasm memory +- `max_len` - Maximum number of bytes to copy + +**Returns:** The actual length of the string in bytes + +**Note:** If the returned length is greater than `max_len`, no data is copied. Call again with a larger buffer to get the full string. + +### Path Operations + +#### `make_path(base: ValueId, ptr: u32, len: u32) -> ValueId` + +Creates a Nix path value relative to a base path. + +**Parameters:** +- `base` - ID of a path value +- `ptr` - Pointer to a string in Wasm memory +- `len` - Length of the path string in bytes + +**Returns:** ID of a new path value + +**Note:** The path string is interpreted relative to the base path. The resulting path is in the same source tree ("source accessor") as the original path. + +#### `copy_path(value: ValueId, ptr: u32, max_len: u32) -> u32` + +Copies a Nix path value into Wasm memory as an absolute path string. + +**Parameters:** +- `value` - ID of a path value +- `ptr` - Pointer to buffer in Wasm memory +- `max_len` - Maximum number of bytes to copy + +**Returns:** The actual length of the path string in bytes + +**Note:** If the returned length is greater than `max_len`, no data is copied. + +### List Operations + +#### `make_list(ptr: u32, len: u32) -> ValueId` + +Creates a Nix list from an array of value IDs in Wasm memory. + +**Parameters:** +- `ptr` - Pointer to array of `ValueId` (u32) in Wasm memory +- `len` - Number of elements in the array + +**Returns:** Value ID of the created list + +**Note:** The array must contain `len * 4` bytes (each ValueId is 4 bytes). + +#### `copy_list(value: ValueId, ptr: u32, max_len: u32) -> u32` + +Copies a Nix list into Wasm memory as an array of value IDs. + +**Parameters:** +- `value` - ID of a list value +- `ptr` - Pointer to buffer in Wasm memory +- `max_len` - Maximum number of elements to copy + +**Returns:** The actual number of elements in the list + +**Note:** If the returned length is greater than `max_len`, no data is copied. Each element is written as a `ValueId` (4 bytes). The buffer must be `max_len * 4` bytes large. + +### Attribute Set Operations + +#### `make_attrset(ptr: u32, len: u32) -> ValueId` + +Creates a Nix attribute set from an array of attributes in Wasm memory. + +**Parameters:** +- `ptr` - Pointer to array of attribute structures in Wasm memory +- `len` - Number of attributes + +**Returns:** Value ID of the created attribute set + +**Attribute structure format:** +```c +struct Attr { + name_ptr: u32, // Pointer to attribute name + name_len: u32, // Length of attribute name in bytes + value_id: u32, // ID of the attribute value +} +``` + +Each `Attr` element is 12 bytes (3 × 4 bytes). + +#### `copy_attrset(value: ValueId, ptr: u32, max_len: u32) -> u32` + +Copies a Nix attribute set into Wasm memory as an array of attribute structures. + +**Parameters:** +- `value` - ID of a Nix attribute set value +- `ptr` - Pointer to buffer in Wasm memory +- `max_len` - Maximum number of attributes to copy + +**Returns:** The actual number of attributes in the set + +**Note:** If the returned length is greater than `max_len`, no data is copied. + +**Output structure format:** +```c +struct Attr { + value_id: u32, // ID of the attribute value + name_len: u32, // Length of attribute name in bytes +} +``` + +Each attribute is 8 bytes (2 × 4 bytes). Use `copy_attrname` to retrieve attribute names. + +#### `copy_attrname(value: ValueId, attr_idx: u32, ptr: u32, len: u32)` + +Copies an attribute name into Wasm memory. + +**Parameters:** +- `value` - ID of a Nix attribute set value +- `attr_idx` - Index of the attribute (from `copy_attrset`) +- `ptr` - Pointer to buffer in Wasm memory +- `len` - Length of the buffer (must exactly match the attribute name length) + +**Note:** Throws an error if `len` doesn't match the attribute name length or if `attr_idx` is out of bounds. + +#### `get_attr(value: ValueId, ptr: u32, len: u32) -> ValueId` + +Gets an attribute value from an attribute set by name. + +**Parameters:** +- `value` - ID of a Nix attribute set value +- `ptr` - Pointer to the attribute name in Wasm memory +- `len` - Length of the attribute name in bytes + +**Returns:** Value ID of the attribute value, or 0 if the attribute doesn't exist + +### Function Operations + +#### `call_function(fun: ValueId, ptr: u32, len: u32) -> ValueId` + +Calls a Nix function with arguments. + +**Parameters:** +- `fun` - ID of a Nix function value +- `ptr` - Pointer to array of `ValueId` arguments in Wasm memory +- `len` - Number of arguments + +**Returns:** Value ID of the function result + +#### `make_app(fun: ValueId, ptr: u32, len: u32) -> ValueId` + +Creates a lazy or partially applied function application. + +**Parameters:** +- `fun` - ID of a Nix function value +- `ptr` - Pointer to array of `ValueId` arguments in Wasm memory +- `len` - Number of arguments + +**Returns:** Value ID of the unevaluated application + +### Returning Results (WASI mode only) + +#### `return_to_nix(value: ValueId)` + +Returns a result value to the Nix evaluator from a WASI module. This function is only available in WASI mode. + +**Parameters:** +- `value` - ID of the Nix value to return as the result of the `builtins.wasm` call + +**Note:** Calling this function immediately terminates the WASI module's execution. The module must call `return_to_nix` before finishing; otherwise, an error is raised. + +### File I/O + +#### `read_file(path: ValueId, ptr: u32, len: u32) -> u32` + +Reads a file into Wasm memory. + +**Parameters:** +- `path` - Value ID of a Nix path value +- `ptr` - Pointer to buffer in Wasm memory +- `len` - Maximum number of bytes to read + +**Returns:** The actual file size in bytes + +**Note:** Similar to `builtins.readFile`, but can handle files that cannot be represented as Nix strings (in particular, files containing NUL bytes). If the returned size is greater than `len`, no data is copied. + +## Example Usage + +For Rust bindings to this interface and several examples, see https://github.com/DeterminateSystems/nix-wasm-rust/. diff --git a/doc/manual/source/quick-start.md b/doc/manual/source/quick-start.md index 9eb7a3265903..42e4e9c0c247 100644 --- a/doc/manual/source/quick-start.md +++ b/doc/manual/source/quick-start.md @@ -3,10 +3,13 @@ This chapter is for impatient people who don't like reading documentation. For more in-depth information you are kindly referred to subsequent chapters. -1. Install Nix: +1. Install Nix. + We recommend that macOS users install Determinate Nix using our graphical installer, [Determinate.pkg][pkg]. + For Linux and Windows Subsystem for Linux (WSL) users: ```console - $ curl -L https://nixos.org/nix/install | sh + $ curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | \ + sh -s -- install ``` The install script will use `sudo`, so make sure you have sufficient rights. @@ -41,3 +44,5 @@ For more in-depth information you are kindly referred to subsequent chapters. ```console $ nix-collect-garbage ``` + +[pkg]: https://install.determinate.systems/determinate-pkg/stable/Universal diff --git a/doc/manual/source/release-notes-determinate/changes.md b/doc/manual/source/release-notes-determinate/changes.md new file mode 100644 index 000000000000..80446d6d6ea5 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/changes.md @@ -0,0 +1,200 @@ +# Changes between Nix and Determinate Nix + +This section lists the differences between upstream Nix 2.33 and Determinate Nix 3.17.0. + +* In Determinate Nix, flakes are stable. You no longer need to enable the `flakes` experimental feature. + +* In Determinate Nix, the new Nix CLI (i.e. the `nix` command) is stable. You no longer need to enable the `nix-command` experimental feature. + +* Determinate Nix has a setting [`json-log-path`](@docroot@/command-ref/conf-file.md#conf-json-log-path) to send a copy of all Nix log messages (in JSON format) to a file or Unix domain socket. + +* Determinate Nix has made `nix profile install` an alias to `nix profile add`, a more symmetrical antonym of `nix profile remove`. + +* `nix-channel` and `channel:` url syntax (like `channel:nixos-24.11`) is deprecated, see: https://github.com/DeterminateSystems/nix-src/issues/34 + +* Using indirect flake references and implicit inputs is deprecated, see: https://github.com/DeterminateSystems/nix-src/issues/37 + +* Warnings around "dirty trees" are updated to reduce "dirty" jargon, and now refers to "uncommitted changes". + + + + + + + +* `nix upgrade-nix` is now inert, and suggests using `determinate-nixd upgrade`. [DeterminateSystems/nix-src#55](https://github.com/DeterminateSystems/nix-src/pull/55) + +* Determinate Nix has Lazy Trees, avoiding expensive copying of flake inputs to the Nix store. ([DeterminateSystems/nix-src#27](https://github.com/DeterminateSystems/nix-src/pull/27), [DeterminateSystems/nix-src#56](https://github.com/DeterminateSystems/nix-src/pull/56)) + + + + + + + + + +* Documentation on how to replicate `nix-store --query --deriver` with the new `nix` cli. [DeterminateSystems/nix-src#82](https://github.com/DeterminateSystems/nix-src/pull/82) + +* In `nix profile`, the symbols `ε` and `∅` have been replaced with descriptive English words. [DeterminateSystems/nix-src#81](https://github.com/DeterminateSystems/nix-src/pull/81) + + + + + + + +* When remote building with `--keep-failed`, Determinate Nix shows "you can rerun" message if the derivation's platform is supported on this machine. [DeterminateSystems/nix-src#87](https://github.com/DeterminateSystems/nix-src/pull/87) + +* Improved error message when `sandbox-paths` specifies a missing file. [DeterminateSystems/nix-src#88](https://github.com/DeterminateSystems/nix-src/pull/88) + + + + + + + + + +* `nix store delete` now explains why deletion fails. [DeterminateSystems/nix-src#130](https://github.com/DeterminateSystems/nix-src/pull/130) + + + + + + + + + + + + + +* Tab completing arguments to Nix avoids network access. [DeterminateSystems/nix-src#161](https://github.com/DeterminateSystems/nix-src/pull/161) + +* Importing Nixpkgs and other tarballs to the cache is 2-4x faster. [DeterminateSystems/nix-src#149](https://github.com/DeterminateSystems/nix-src/pull/149) + +* Adding paths to the store is significantly faster. [DeterminateSystems/nix-src#162](https://github.com/DeterminateSystems/nix-src/pull/162) + + + + + +* Determinate Nix allows flake inputs to be fetched at build time. [DeterminateSystems/nix-src#49](https://github.com/DeterminateSystems/nix-src/pull/49) + + + +* The default `nix flake init` template is much more useful. [DeterminateSystems/nix-src#180](https://github.com/DeterminateSystems/nix-src/pull/180) + + + + + + + + +* Multithreaded evaluation support. [DeterminateSystems/nix-src#125](https://github.com/DeterminateSystems/nix-src/pull/125) + + + + + + +* Determinate Nix only tries to substitute inputs if fetching from its original location fails.[DeterminateSystems/nix-src#202](https://github.com/DeterminateSystems/nix-src/pull/202) + + + + + + +* A new command `nix nario` that replaces `nix-store --export|--export`. It also has a new file format (`--format 2`) that supports store path attributes such as signatures, and that can be imported more efficiently. [DeterminateSystems/nix-src#215](https://github.com/DeterminateSystems/nix-src/pull/215) + +* Determinate Nix prints the Nix version when using `-vv` or higher verbosity. [DeterminateSystems/nix-src#237](https://github.com/DeterminateSystems/nix-src/pull/237) + + + + +* During evaluation, you can read or import from the result of `builtins.fetchClosure`. [DeterminateSystems/nix-src#241](https://github.com/DeterminateSystems/nix-src/pull/241) + + + +* Flakerefs in error messages and lockfile diffs are abbreviated for readability. [DeterminateSystems/nix-src#243](https://github.com/DeterminateSystems/nix-src/pull/243), [DeterminateSystems/nix-src#264](https://github.com/DeterminateSystems/nix-src/pull/264) + + + + + + + + +* The Git fetcher doesn't compute `revCount` or `lastModified` if they're already specified [DeterminateSystems./nix-src#269](https://github.com/DeterminateSystems/nix-src/pull/269) + +* The Git fetcher avoids doing a shallow Git fetch if it previously did a non-shallow fetch of the same repository. [DeterminateSystems/nix-src#270](https://github.com/DeterminateSystems/nix-src/pull/270) + +* Determinate Nix has a builtin copy of the flake registry, making it more resilient to network outages. [DeterminateSystems/nix-src#271](https://github.com/DeterminateSystems/nix-src/pull/271) + + + +* `nix build` and `nix profile` report failing or succeeding installables. [DeterminateSystems/nix-src#281](https://github.com/DeterminateSystems/nix-src/pull/281) + +* `nix flake check` shows which outputs failed or succeeded. [DeterminateSystems/nix-src#285](https://github.com/DeterminateSystems/nix-src/pull/285) + +* Determinate Nix has a `nix ps` command to show active builds. [DeterminateSystems/nix-src#282](https://github.com/DeterminateSystems/nix-src/pull/282) + +* Determinate Nix has improved backward compatibility with lock files created by Nix < 2.20. [DeterminateSystems/nix-src#278](https://github.com/DeterminateSystems/nix-src/pull/278) + + + +* Determinate Nix has a builtin function `builtins.filterAttrs`. [DeterminateSystems/nix-src#291](https://github.com/DeterminateSystems/nix-src/pull/291) + +* `builtins.fetchTree` implicitly sets `__final = true` when a `narHash` is supplied. This allows the tree to be substituted. [DeterminateSystems/nix-src#297](https://github.com/DeterminateSystems/nix-src/pull/297) + + + + + +* Path inputs are now lazy [DeterminateSystems/nix-src#312](https://github.com/DeterminateSystems/nix-src/pull/312) + +* Improved performance when fetching a lot of dependencies with curl [DeterminateSystems/nix-src#315](https://github.com/DeterminateSystems/nix-src/pull/315) + + + +* Wasm support [DeterminateSystems/nix-src#309](https://github.com/DeterminateSystems/nix-src/pull/309) + +* Fix hung downloads when `http-connections = 0` [DeterminateSystems/nix-src#327](https://github.com/DeterminateSystems/nix-src/pull/327) + +* Support .gitattributes in subdirectories [DeterminateSystems/nix-src#335](https://github.com/DeterminateSystems/nix-src/pull/335) + +* builtins.getFlake fixes [DeterminateSystems/nix-src#337](https://github.com/DeterminateSystems/nix-src/pull/337) + +* builtins.getFlake: Support path values [DeterminateSystems/nix-src#338](https://github.com/DeterminateSystems/nix-src/pull/338) + +* Provenance [DeterminateSystems/nix-src#321](https://github.com/DeterminateSystems/nix-src/pull/321) + +* Add subcommand 'nix provenance show' [DeterminateSystems/nix-src#340](https://github.com/DeterminateSystems/nix-src/pull/340) + +* Increase the open file soft limit to the hard limit [DeterminateSystems/nix-src#347](https://github.com/DeterminateSystems/nix-src/pull/347) + + + + +* Record provenance for unlocked inputs and impure evaluations in [DeterminateSystems/nix-src#354](https://github.com/DeterminateSystems/nix-src/pull/354) + +* Add setting narinfo-cache-meta-ttl in [DeterminateSystems/nix-src#355](https://github.com/DeterminateSystems/nix-src/pull/355) + +* Add derivationWithMeta builtin in [DeterminateSystems/nix-src#357](https://github.com/DeterminateSystems/nix-src/pull/357) + +* Add builtins.wasi in [DeterminateSystems/nix-src#359](https://github.com/DeterminateSystems/nix-src/pull/359) + +* Add `nix provenance verify` command in [DeterminateSystems/nix-src#356](https://github.com/DeterminateSystems/nix-src/pull/356) + +* builtins.hashString: Devirtualize lazy paths, and re-enable lazy trees tests in [DeterminateSystems/nix-src#360](https://github.com/DeterminateSystems/nix-src/pull/360) + + + + + + + + + diff --git a/doc/manual/source/release-notes-determinate/index.md b/doc/manual/source/release-notes-determinate/index.md new file mode 100644 index 000000000000..bba33084424c --- /dev/null +++ b/doc/manual/source/release-notes-determinate/index.md @@ -0,0 +1,3 @@ +# Determinate Nix Release Notes + +This chapter lists the differences between Nix and Determinate Nix, as well as the release history of Determinate Nix. diff --git a/doc/manual/source/release-notes-determinate/rl-3.0.0.md b/doc/manual/source/release-notes-determinate/rl-3.0.0.md new file mode 100644 index 000000000000..d60786e9a72f --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.0.0.md @@ -0,0 +1,5 @@ +# Release 3.0.0 (2025-03-04) + +* Initial release of Determinate Nix. + +* Based on [upstream Nix 2.26.2](../release-notes/rl-2.26.md). diff --git a/doc/manual/source/release-notes-determinate/rl-3.1.0.md b/doc/manual/source/release-notes-determinate/rl-3.1.0.md new file mode 100644 index 000000000000..96b7819d08db --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.1.0.md @@ -0,0 +1,5 @@ +# Release 3.1.0 (2025-03-27) + +* Based on [upstream Nix 2.27.1](../release-notes/rl-2.27.md). + +* New setting `json-log-path` that sends a copy of all Nix log messages (in JSON format) to a file or Unix domain socket. diff --git a/doc/manual/source/release-notes-determinate/rl-3.3.0.md b/doc/manual/source/release-notes-determinate/rl-3.3.0.md new file mode 100644 index 000000000000..badf96415df0 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.3.0.md @@ -0,0 +1,5 @@ +# Release 3.3.0 (2025-04-11) + +* Based on [upstream Nix 2.28.1](../release-notes/rl-2.28.md). + +* The `nix profile install` command is now an alias to `nix profile add`, a more symmetrical antonym of `nix profile remove`. diff --git a/doc/manual/source/release-notes-determinate/rl-3.4.0.md b/doc/manual/source/release-notes-determinate/rl-3.4.0.md new file mode 100644 index 000000000000..24ae03ca554f --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.4.0.md @@ -0,0 +1,50 @@ +# Release 3.4.0 (2025-04-25) + +* Based on [upstream Nix 2.28.2](../release-notes/rl-2.28.md). + +* **Warn users that `nix-channel` is deprecated.** + +This is the first change accomplishing our roadmap item of deprecating Nix channels: https://github.com/DeterminateSystems/nix-src/issues/34 + +This is due to user confusion and surprising behavior of channels, especially in the context of user vs. root channels. + +The goal of this change is to make the user experience of Nix more predictable. +In particular, these changes are to support users with lower levels of experience who are following guides that focus on channels as the mechanism of distribution. + +Users will now see this message: + +> nix-channel is deprecated in favor of flakes in Determinate Nix. For a guide on Nix flakes, see: https://zero-to-nix.com/. or details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34. + + +* **Warn users that `channel:` URLs are deprecated.** + +This is the second change regarding our deprecation of Nix channels. +Using a `channel:` URL (like `channel:nixos-24.11`) will yield a warning like this: + +> Channels are deprecated in favor of flakes in Determinate Nix. Instead of 'channel:nixos-24.11', use 'https://nixos.org/channels/nixos-24.11/nixexprs.tar.xz'. For a guide on Nix flakes, see: https://zero-to-nix.com/. For details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34. + +* **Warn users against indirect flake references in `flake.nix` inputs** + +This is the first change accomplishing our roadmap item of deprecating implicit and indirect flake inputs: https://github.com/DeterminateSystems/nix-src/issues/37 + +The flake registry provides an important UX affordance for using Nix flakes and remote sources in command line uses. +For that reason, the registry is not being deprecated entirely and will still be used for command-line incantations, like nix run. + +This move will eliminate user confusion and surprising behavior around global and local registries during flake input resolution. + +The goal of this change is to make the user experience of Nix more predictable. +We have seen a pattern of confusion when using automatic flake inputs and local registries. +Specifically, users' flake inputs resolving and locking inconsistently depending on the configuration of the host system. + +Users will now see the following warning if their flake.nix uses an implicit or indirect Flake reference input: + +> Flake input 'nixpkgs' uses the flake registry. Using the registry in flake inputs is deprecated in Determinate Nix. To make your flake future-proof, add the following to 'xxx/flake.nix': +> +> inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11"; +> +> For more information, see: https://github.com/DeterminateSystems/nix-src/issues/37 + + +### Other updates: +* Improve the "dirty tree" message. Determinate Nix will now say `Git tree '...' has uncommitted changes` instead of `Git tree '...' is dirty` +* Stop warning about uncommitted changes in a Git repository when using `nix develop` diff --git a/doc/manual/source/release-notes-determinate/rl-3.4.2.md b/doc/manual/source/release-notes-determinate/rl-3.4.2.md new file mode 100644 index 000000000000..8acabd4425fd --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.4.2.md @@ -0,0 +1,4 @@ +# Release 3.4.2 (2025-05-05) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + diff --git a/doc/manual/source/release-notes-determinate/rl-3.5.0.md b/doc/manual/source/release-notes-determinate/rl-3.5.0.md new file mode 100644 index 000000000000..d5b26b9419e7 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.5.0.md @@ -0,0 +1,4 @@ +# Release 3.5.0 (2025-05-09) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + diff --git a/doc/manual/source/release-notes-determinate/rl-3.5.1.md b/doc/manual/source/release-notes-determinate/rl-3.5.1.md new file mode 100644 index 000000000000..b0813ca59c90 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.5.1.md @@ -0,0 +1,57 @@ +# Release 3.5.1 (2025-05-09) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + +## What's Changed + +Most notably, Lazy Trees has merged in to Determinate Nix and is in Feature Preview status, but remains disabled by default. +Lazy trees massively improves performance in virtually all scenarios because it enables Nix to avoid making unnecessary copies of files into the Nix store. +In testing, we saw iteration times on Nixpkgs **drop from over 12 seconds to 3.5 seconds**. + +After upgrading to Determinate Nix 3.5.1 with `sudo determinate-nixd upgrade`, enable lazy trees by adding this to `/etc/nix/nix.custom.conf`: + +``` +lazy-trees = true +``` + +Please note that our full flake regression test suite passes with no changes with lazy trees, and please report compatibility issues. + +Read [this GitHub comment](https://github.com/DeterminateSystems/nix-src/pull/27#pullrequestreview-2822153088) for further details and next steps. +We'll be publishing an update on the [Determinate Systems blog](https://determinate.systems/posts/) in the next few days with more information as well. + +Relevant PRs: +* Lazy trees v2 by @edolstra in [DeterminateSystems/nix-src#27](https://github.com/DeterminateSystems/nix-src/pull/27) +* Improve lazy trees backward compatibility by @edolstra in [DeterminateSystems/nix-src#56](https://github.com/DeterminateSystems/nix-src/pull/56) + + +### Additional changes in this release: +* Bug fix: Flake input URLs are canonicalized before checking flake.lock file staleness, avoiding needlessly regenerating flake.lock files with `dir` in URL-style flakerefs by @edolstra in [DeterminateSystems/nix-src#57](https://github.com/DeterminateSystems/nix-src/pull/57) +* `nix upgrade-nix` is deprecated in favor of `determinate-nixd upgrade`, by @gustavderdrache in [DeterminateSystems/nix-src#55](https://github.com/DeterminateSystems/nix-src/pull/55) +* UX: Improved build failure and dependency failure error messages to include needed output paths by @edolstra in [DeterminateSystems/nix-src#58](https://github.com/DeterminateSystems/nix-src/pull/58). + +Previously: + +``` +error: builder for '/nix/store/[...]-nested-failure-bottom.drv' failed with exit code 1 +error: 1 dependencies of derivation '/nix/store/[...]-nested-failure-middle.drv' failed to build +error: 1 dependencies of derivation '/nix/store/[...]-nested-failure-top.drv' failed to build +``` + +Now: + +``` +error: Cannot build '/nix/store/w37gflm9wz9dcnsgy3sfrmnlvm8qigaj-nested-failure-bottom.drv'. + Reason: builder failed with exit code 1. + Output paths: + /nix/store/yzybs8kp35dfipbzdlqcc6lxz62hax04-nested-failure-bottom +error: Cannot build '/nix/store/00gr5hlxfc03x2675w6nn3pwfrz2fr62-nested-failure-middle.drv'. + Reason: 1 dependency failed. + Output paths: + /nix/store/h781j5h4bdchmb4c2lvy8qzh8733azhz-nested-failure-middle +error: Cannot build '/nix/store/8am0ng1gyx8sbzyr0yx6jd5ix3yy5szc-nested-failure-top.drv'. + Reason: 1 dependency failed. + Output paths: + /nix/store/fh12637kgvp906s9yhi9w2dc7ghfwxs1-nested-failure-top +``` + +**Full Changelog**: [v3.4.2...v3.5.1](https://github.com/DeterminateSystems/nix-src/compare/v3.4.2...v3.5.1) diff --git a/doc/manual/source/release-notes-determinate/rl-3.5.2.md b/doc/manual/source/release-notes-determinate/rl-3.5.2.md new file mode 100644 index 000000000000..bc5396c255b6 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.5.2.md @@ -0,0 +1,11 @@ +# Release 3.5.2 (2025-05-12) + +* Based on [upstream Nix 2.28.3](../release-notes/rl-2.28.md). + +## What's Changed +* Fix a regression where narHash was not added to lock files when lazy trees were disabled by @edolstra in [DeterminateSystems/nix-src#63](https://github.com/DeterminateSystems/nix-src/pull/63) + +* Tell users a source is corrupted ("cannot read file from tarball: Truncated tar archive detected while reading data"), improving over the previous 'cannot read file from tarball' error by @edolstra in [DeterminateSystems/nix-src#64](https://github.com/DeterminateSystems/nix-src/pull/64) + + +**Full Changelog**: [v3.5.1...v3.5.2](https://github.com/DeterminateSystems/nix-src/compare/v3.5.1...v3.5.2) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.0.md b/doc/manual/source/release-notes-determinate/rl-3.6.0.md new file mode 100644 index 000000000000..453ab6c301dc --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.0.md @@ -0,0 +1,11 @@ +# Release 3.6.0 (2025-05-22) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* Install 'nix profile add' manpage by @edolstra in [DeterminateSystems/nix-src#69](https://github.com/DeterminateSystems/nix-src/pull/69) +* Sync with upstream 2.29.0 by @edolstra in [DeterminateSystems/nix-src#67](https://github.com/DeterminateSystems/nix-src/pull/67) +* Emit warnings when using import-from-derivation by setting the `trace-import-from-derivation` option to `true` by @gustavderdrache in [DeterminateSystems/nix-src#70](https://github.com/DeterminateSystems/nix-src/pull/70) + + +**Full Changelog**: [v3.5.2...v3.6.0](https://github.com/DeterminateSystems/nix-src/compare/v3.5.2...v3.6.0) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.1.md b/doc/manual/source/release-notes-determinate/rl-3.6.1.md new file mode 100644 index 000000000000..12505afee278 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.1.md @@ -0,0 +1,9 @@ +# Release 3.6.1 (2025-05-24) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* Fix nlohmann error in fromStructuredAttrs() by @edolstra in [DeterminateSystems/nix-src#73](https://github.com/DeterminateSystems/nix-src/pull/73) + + +**Full Changelog**: [v3.6.0...v3.6.1](https://github.com/DeterminateSystems/nix-src/compare/v3.6.0...v3.6.1) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.2.md b/doc/manual/source/release-notes-determinate/rl-3.6.2.md new file mode 100644 index 000000000000..882c142f00c3 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.2.md @@ -0,0 +1,15 @@ +# Release 3.6.2 (2025-06-02) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* Dramatically improve the performance of nix store copy-sigs: Use http-connections setting to control parallelism by @edolstra in [DeterminateSystems/nix-src#80](https://github.com/DeterminateSystems/nix-src/pull/80) +* Document how to replicate nix-store --query --deriver with the nix cli by @grahamc in [DeterminateSystems/nix-src#82](https://github.com/DeterminateSystems/nix-src/pull/82) +* The garbage collector no longer gives up if it encounters an undeletable file, by @edolstra in [DeterminateSystems/nix-src#83](https://github.com/DeterminateSystems/nix-src/pull/83) +* nix profile: Replace ε and ∅ with descriptive English words by @grahamc in [DeterminateSystems/nix-src#81](https://github.com/DeterminateSystems/nix-src/pull/81) +* Rework README to clarify that this distribution is our distribution, by @lucperkins in [DeterminateSystems/nix-src#84](https://github.com/DeterminateSystems/nix-src/pull/84) +* Include the source location when warning about inefficient double copies by @edolstra in [DeterminateSystems/nix-src#79](https://github.com/DeterminateSystems/nix-src/pull/79) +* Call out that `--keep-failed` with remote builders will keep the failed build directory on that builder by @cole-h in [DeterminateSystems/nix-src#85](https://github.com/DeterminateSystems/nix-src/pull/85) + + +**Full Changelog**: [v3.6.1...v3.6.2](https://github.com/DeterminateSystems/nix-src/compare/v3.6.1...v3.6.2) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.5.md b/doc/manual/source/release-notes-determinate/rl-3.6.5.md new file mode 100644 index 000000000000..8ef5be0fd0d3 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.5.md @@ -0,0 +1,19 @@ +# Release 3.6.5 (2025-06-12) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed +* When remote building with --keep-failed, only show "you can rerun" message if the derivation's platform is supported on this machine by @cole-h in [DeterminateSystems/nix-src#87](https://github.com/DeterminateSystems/nix-src/pull/87) +* Indicate that sandbox-paths specifies a missing file in the corresponding error message. by @cole-h in [DeterminateSystems/nix-src#88](https://github.com/DeterminateSystems/nix-src/pull/88) +* Render lazy tree paths in messages withouth the/nix/store/hash... prefix in substituted source trees by @edolstra in [DeterminateSystems/nix-src#91](https://github.com/DeterminateSystems/nix-src/pull/91) +* Use FlakeHub inputs by @lucperkins in [DeterminateSystems/nix-src#89](https://github.com/DeterminateSystems/nix-src/pull/89) +* Proactively cache more flake inputs and fetches by @edolstra in [DeterminateSystems/nix-src#93](https://github.com/DeterminateSystems/nix-src/pull/93) +* Fix: register extra builtins just once by @edolstra in [DeterminateSystems/nix-src#97](https://github.com/DeterminateSystems/nix-src/pull/97) +* Fix the link to `builders-use-substitutes` documentation for `builders` by @lucperkins in [DeterminateSystems/nix-src#102](https://github.com/DeterminateSystems/nix-src/pull/102) +* Improve error messages that use the hypothetical future tense of "will" by @lucperkins in [DeterminateSystems/nix-src#92](https://github.com/DeterminateSystems/nix-src/pull/92) +* Make the `nix repl` test more stable by @edolstra in [DeterminateSystems/nix-src#103](https://github.com/DeterminateSystems/nix-src/pull/103) +* Run nixpkgsLibTests against lazy trees by @edolstra in [DeterminateSystems/nix-src#100](https://github.com/DeterminateSystems/nix-src/pull/100) +* Run the Nix test suite against lazy trees by @edolstra in [DeterminateSystems/nix-src#105](https://github.com/DeterminateSystems/nix-src/pull/105) +* Improve caching of inputs by @edolstra in [DeterminateSystems/nix-src#98](https://github.com/DeterminateSystems/nix-src/pull/98), [DeterminateSystems/nix-src#110](https://github.com/DeterminateSystems/nix-src/pull/110), and [DeterminateSystems/nix-src#115](https://github.com/DeterminateSystems/nix-src/pull/115) + +**Full Changelog**: [v3.6.2...v3.6.5](https://github.com/DeterminateSystems/nix-src/compare/v3.6.2...v3.6.4) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.6.md b/doc/manual/source/release-notes-determinate/rl-3.6.6.md new file mode 100644 index 000000000000..bf4e3690afa1 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.6.md @@ -0,0 +1,7 @@ +# Release 3.6.6 (2025-06-17) + +* Based on [upstream Nix 2.29.0](../release-notes/rl-2.29.md). + +## What's Changed + +* No-op release on the nix-src side, due to a regression on nix-darwin in determinate-nixd. diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.7.md b/doc/manual/source/release-notes-determinate/rl-3.6.7.md new file mode 100644 index 000000000000..197587f1b3a9 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.7.md @@ -0,0 +1,17 @@ +# Release 3.6.7 (2025-06-24) + +* Based on [upstream Nix 2.29.1](../release-notes/rl-2.29.md). + +## What's Changed + +### Security contents + +* Patched against GHSA-g948-229j-48j3 + +### Lazy trees: + +* Lazy trees now produces `flake.lock` files with NAR hashes unless `lazy-locks` is set to `true` by @edolstra in [DeterminateSystems/nix-src#113](https://github.com/DeterminateSystems/nix-src/pull/113) +* Improved caching with lazy-trees when using --impure, with enhanced testing by @edolstra in [DeterminateSystems/nix-src#117](https://github.com/DeterminateSystems/nix-src/pull/117) + + +**Full Changelog**: [v3.6.6...v3.6.7](https://github.com/DeterminateSystems/nix-src/compare/v3.6.6...v3.6.7) diff --git a/doc/manual/source/release-notes-determinate/rl-3.6.8.md b/doc/manual/source/release-notes-determinate/rl-3.6.8.md new file mode 100644 index 000000000000..c4b4b96c9e73 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.6.8.md @@ -0,0 +1,12 @@ +# Release 3.6.8 (2025-06-25) + +* Based on [upstream Nix 2.29.1](../release-notes/rl-2.29.md). + +## What's Changed +* Fix fetchToStore() caching with --impure, improve testing by @edolstra in [DeterminateSystems/nix-src#117](https://github.com/DeterminateSystems/nix-src/pull/117) +* Add lazy-locks setting by @edolstra in [DeterminateSystems/nix-src#113](https://github.com/DeterminateSystems/nix-src/pull/113) +* Sync 2.29.1 by @edolstra in [DeterminateSystems/nix-src#124](https://github.com/DeterminateSystems/nix-src/pull/124) +* Release v3.6.7 by @github-actions in [DeterminateSystems/nix-src#126](https://github.com/DeterminateSystems/nix-src/pull/126) + + +**Full Changelog**: [v3.6.6...v3.6.8](https://github.com/DeterminateSystems/nix-src/compare/v3.6.6...v3.6.8) diff --git a/doc/manual/source/release-notes-determinate/rl-3.7.0.md b/doc/manual/source/release-notes-determinate/rl-3.7.0.md new file mode 100644 index 000000000000..615e858592e2 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.7.0.md @@ -0,0 +1,63 @@ +# Release 3.7.0 (2025-07-03) + +- Based on [upstream Nix 2.29.1](../release-notes/rl-2.29.md). + +## What's Changed + +### Prefetch flake inputs in parallel + +By @edolstra in [DeterminateSystems/nix-src#127](https://github.com/DeterminateSystems/nix-src/pull/127) + +This release brings the command `nix flake prefetch-inputs`. + +Flake inputs are typically fetched "just in time." +That means Nix fetches a flake input when the evaluator needs it, and not before. +When the evaluator needs an input, evaluation is paused until the source is available. + +This causes a significant slow-down on projects with lots of flake inputs. + +The new command `nix flake prefetch-inputs` fetches all flake inputs in parallel. +We expect running this new command before building will dramatically improve evaluation performance for most projects, especially in CI. +Note that projects which with many unused flake inputs may not benefit from this change, since the new command fetches every input whether they're used or not. + +### Deep flake input overrides now work as expected + +By @edolstra in [DeterminateSystems/nix-src#108](https://github.com/DeterminateSystems/nix-src/pull/108) + +An override like: + +``` +inputs.foo.inputs.bar.inputs.nixpkgs.follows = "nixpkgs"; +``` + +implicitly set `inputs.foo.inputs.bar` to `flake:bar`, which led to an unexpected error like: + +``` +error: cannot find flake 'flake:bar' in the flake registries +``` + +We now no longer create a parent override (like for `foo.bar` in the example above) if it doesn't set an explicit ref or follows attribute. +We only recursively apply its child overrides. + +### `nix store delete` now shows you why deletion was not possible + +By @edolstra in [DeterminateSystems/nix-src#130](https://github.com/DeterminateSystems/nix-src/pull/130) + +For example: + +``` +error: Cannot delete path '/nix/store/6fcrjgfjip2ww3sx51rrmmghfsf60jvi-patchelf-0.14.3' + because it's referenced by the GC root '/home/eelco/Dev/nix-master/build/result'. + +error: Cannot delete path '/nix/store/lf3lrf8bjfn8xvr0az9q96y989sxs5r9-cowsay-3.8.4' + because it's referenced by the GC root '/proc/3600568/environ'. + +error: Cannot delete path '/nix/store/klyng5rpdkwi5kbxkncy4gjwb490dlhb-foo.drv' + because it's in use by '{nix-process:3605324}'. +``` + +### Lazy-tree improvements + +- Improved lazy-tree evaluation caching for flakes accessed with a `path` flakeref by @edolstra in [DeterminateSystems/nix-src#131](https://github.com/DeterminateSystems/nix-src/pull/131) + +**Full Changelog**: [v3.6.8...v3.7.0](https://github.com/DeterminateSystems/nix-src/compare/v3.6.8...v3.7.0) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.0.md b/doc/manual/source/release-notes-determinate/rl-3.8.0.md new file mode 100644 index 000000000000..4103d6df94e0 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.0.md @@ -0,0 +1,29 @@ +# Release 3.8.0 (2025-07-10) + +* Based on [upstream Nix 2.30.0](../release-notes/rl-2.30.md). + +## What's Changed + +### Faster CI with `nix flake check` + +`nix flake check` no longer downloads flake outputs if no building is necessary. + +This command is intended to validate that a flake can fully evaluate and all outputs can build. +If the outputs are available in a binary cache then both properties are confirmed to be true. +Notably, downloading the output from the binary cache is not strictly necessary for the validation. + +Previously, `nix flake check` would download a flake output if the full build is available in a binary cache. + +Some users will find this change significantly reduces costly bandwidth and CI workflow time. + +PR: [DeterminateSystems/nix-src#134](https://github.com/DeterminateSystems/nix-src/pull/134) + +### Improved flake locking of transitive dependencies + +Determinate Nix now re-locks all transitive dependencies when changing a flake input's source URL. + +This fixes an issue where in some scenarios Nix would not re-lock those inputs and incorrectly use the old inputs' dependencies. + +PR: [DeterminateSystems/nix-src#137](https://github.com/DeterminateSystems/nix-src/pull/137) + +**Full Changelog**: [v3.7.0...v3.8.0](https://github.com/DeterminateSystems/nix-src/compare/v3.7.0...v3.8.0) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.1.md b/doc/manual/source/release-notes-determinate/rl-3.8.1.md new file mode 100644 index 000000000000..90dc328f6ec2 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.1.md @@ -0,0 +1,9 @@ +# Release 3.8.1 (2025-07-11) + +* Based on [upstream Nix 2.30.0](../release-notes/rl-2.30.md). + +## What's Changed +* Address ifdef problem with macOS/BSD sandboxing by @gustavderdrache in [DeterminateSystems/nix-src#142](https://github.com/DeterminateSystems/nix-src/pull/142) + + +**Full Changelog**: [v3.8.0...v3.8.1](https://github.com/DeterminateSystems/nix-src/compare/v3.8.0...v3.8.1) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.2.md b/doc/manual/source/release-notes-determinate/rl-3.8.2.md new file mode 100644 index 000000000000..638d90f6841b --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.2.md @@ -0,0 +1,10 @@ +# Release 3.8.2 (2025-07-12) + +* Based on [upstream Nix 2.30.0](../release-notes/rl-2.30.md). + +## What's Changed +* ci: don't run the full test suite for x86_64-darwin by @grahamc in [DeterminateSystems/nix-src#144](https://github.com/DeterminateSystems/nix-src/pull/144) +* Try publishing the manual again by @grahamc in [DeterminateSystems/nix-src#145](https://github.com/DeterminateSystems/nix-src/pull/145) + + +**Full Changelog**: [v3.8.1...v3.8.2](https://github.com/DeterminateSystems/nix-src/compare/v3.8.1...v3.8.2) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.3.md b/doc/manual/source/release-notes-determinate/rl-3.8.3.md new file mode 100644 index 000000000000..d3eb02bc7ea5 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.3.md @@ -0,0 +1,26 @@ +# Release 3.8.3 (2025-07-18) + +* Based on [upstream Nix 2.30.1](../release-notes/rl-2.30.md). + +## What's Changed + +### Non-blocking evaluation caching + +Users reported evaluation would occasionally block other evaluation processes. + +The evaluation cache database is now opened in write-ahead mode to prevent delaying evaluations. + +PR: [DeterminateSystems/nix-src#150](https://github.com/DeterminateSystems/nix-src/pull/150) + +### New experimental feature: `external-builders` + +This experimental feature allows Nix to call an external program for the build environment. + +The interface and behavior of this feature may change at any moment without a correspondingly major semver version change. + +PRs: +- [DeterminateSystems/nix-src#141](https://github.com/DeterminateSystems/nix-src/pull/141) +- [DeterminateSystems/nix-src#152](https://github.com/DeterminateSystems/nix-src/pull/152) +- [DeterminateSystems/nix-src#78](https://github.com/DeterminateSystems/nix-src/pull/78) + +**Full Changelog**: [v3.8.2...v3.8.3](https://github.com/DeterminateSystems/nix-src/compare/v3.8.2...v3.8.3) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.4.md b/doc/manual/source/release-notes-determinate/rl-3.8.4.md new file mode 100644 index 000000000000..7c73e75ca023 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.4.md @@ -0,0 +1,9 @@ +# Release 3.8.4 (2025-07-21) + +* Based on [upstream Nix 2.30.1](../release-notes/rl-2.30.md). + +## What's Changed +* Revert "Use WAL mode for SQLite cache databases" by @grahamc in [DeterminateSystems/nix-src#155](https://github.com/DeterminateSystems/nix-src/pull/155) + + +**Full Changelog**: [v3.8.3...v3.8.4](https://github.com/DeterminateSystems/nix-src/compare/v3.8.3...v3.8.4) diff --git a/doc/manual/source/release-notes-determinate/rl-3.8.5.md b/doc/manual/source/release-notes-determinate/rl-3.8.5.md new file mode 100644 index 000000000000..0f1bbe6f99d7 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/rl-3.8.5.md @@ -0,0 +1,58 @@ +## What's Changed + +### Less time "unpacking into the Git cache" + +Unpacking sources into the user's cache is now takes 1/2 to 1/4 of the time it used to. +Previously, Nix serially unpacked sources into the cache. +This change takes better advantage of our users' hardware by parallelizing the import. +Real life testing shows an initial Nixpkgs import takes 3.6s on Linux, when it used to take 11.7s. + +PR: [DeterminateSystems/nix-src#149](https://github.com/DeterminateSystems/nix-src/pull/149) + +### Copy paths to the daemon in parallel + +Determinate Nix's evaluator no longer blocks evaluation when copying paths to the store. +Previously, Nix would pause evaluation when it needed to add files to the store. +Now, the copying is performed in the background allowing evaluation to proceed. + +PR: [DeterminateSystems/nix-src#162](https://github.com/DeterminateSystems/nix-src/pull/162) + +### Faster Nix evaluation by reducing duplicate Nix daemon queries + +Determinate Nix more effectively caches store path validity data within a single evaluation. +Previously, the Nix client would perform many thousands of exra Nix daemon requests. +Each extra request takes real time, and this change reduced a sample evaluation by over 12,000 requests. + +PR: [DeterminateSystems/nix-src#157](https://github.com/DeterminateSystems/nix-src/pull/157) + +### More responsive tab completion + +Tab completion now implies the "--offline" flag, which disables most network requests. +Previously, tab completing Nix arguments would attempt to fetch sources and access binary caches. +Operating in offline mode improves the interactive experience of Nix when tab completing. + +PR: [DeterminateSystems/nix-src#161](https://github.com/DeterminateSystems/nix-src/pull/161) + +### ZFS users: we fixed the mysterious stall. + +Opening the Nix database is usually instantaneous but sometimes has a several second latency. +Determinate Nix works around this issue, eliminating the frustrating random stall when running Nix commands. + +PR: [DeterminateSystems/nix-src#158](https://github.com/DeterminateSystems/nix-src/pull/158) + +### Other changes + +* Determinate Nix is now fully formatted by clang-format, making it easier than ever to contribute to the project. + +PR: [DeterminateSystems/nix-src#159](https://github.com/DeterminateSystems/nix-src/pull/159) + +* Determinate Nix is now based on upstream Nix 2.30.2. + +PR: [DeterminateSystems/nix-src#160](https://github.com/DeterminateSystems/nix-src/pull/160) + +* Determinate Nix now uses `main` as our development branch, moving away from `detsys-main`. + +PRs: +* [DeterminateSystems/nix-src#164](https://github.com/DeterminateSystems/nix-src/pull/164) +* [DeterminateSystems/nix-src#166](https://github.com/DeterminateSystems/nix-src/pull/166) + diff --git a/doc/manual/source/release-notes-determinate/v3.10.0.md b/doc/manual/source/release-notes-determinate/v3.10.0.md new file mode 100644 index 000000000000..c644dd787446 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.10.0.md @@ -0,0 +1,10 @@ +# Release 3.10.0 (2025-09-02) + +* Based on [upstream Nix 2.31.0](../release-notes/rl-2.31.md). + +## What's Changed + +This release rebases Determinate Nix on upstream Nix 2.31.0. + + +**Full Changelog**: [v3.9.1...v3.10.0](https://github.com/DeterminateSystems/nix-src/compare/v3.9.1...v3.10.0) diff --git a/doc/manual/source/release-notes-determinate/v3.10.1.md b/doc/manual/source/release-notes-determinate/v3.10.1.md new file mode 100644 index 000000000000..08cbe4fd0583 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.10.1.md @@ -0,0 +1,9 @@ +# Release 3.10.1 (2025-09-02) + +* Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed +This release rebases Determinate Nix on upstream Nix 2.31.1. + + +**Full Changelog**: [v3.10.0...v3.10.1](https://github.com/DeterminateSystems/nix-src/compare/v3.10.0...v3.10.1) diff --git a/doc/manual/source/release-notes-determinate/v3.11.0.md b/doc/manual/source/release-notes-determinate/v3.11.0.md new file mode 100644 index 000000000000..7abb665a5a9f --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.0.md @@ -0,0 +1,36 @@ +# Release 3.11.0 (2025-09-03) + +- Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed + +### Parallel evaluation + +The following commands are now able to evaluate Nix expressions in parallel: + +- `nix search` +- `nix flake check` +- `nix flake show` +- `nix eval --json` + +This is currently in developer preview, and we'll be turning it on for more users in the coming weeks. +If you would like to try it right away, specify `eval-cores` in your `/etc/nix/nix.custom.conf`: + +```ini +eval-cores = 0 # Evaluate across all cores +``` + +Further, we introduced a new builtin: `builtins.parallel`. +This new builtin allows users to explicitly parallelize evaluation within a Nix expression. + +Using this new builtin requires turning on an additional experimental feature: + +```ini +extra-experimental-features = parallel-eval +``` + +Please note that this new builtin is subject to change semantics or even go away during the developer preview. + +PR: [DeterminateSystems/nix-src#125](https://github.com/DeterminateSystems/nix-src/pull/125) + +**Full Changelog**: [v3.10.1...v3.11.0](https://github.com/DeterminateSystems/nix-src/compare/v3.10.1...v3.11.0) diff --git a/doc/manual/source/release-notes-determinate/v3.11.1.md b/doc/manual/source/release-notes-determinate/v3.11.1.md new file mode 100644 index 000000000000..305971643330 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.1.md @@ -0,0 +1,9 @@ +# Release 3.11.1 (2025-09-04) + +* Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed +* Fix race condition in Value::isTrivial() by @edolstra in [DeterminateSystems/nix-src#192](https://github.com/DeterminateSystems/nix-src/pull/192) + + +**Full Changelog**: [v3.11.0...v3.11.1](https://github.com/DeterminateSystems/nix-src/compare/v3.11.0...v3.11.1) diff --git a/doc/manual/source/release-notes-determinate/v3.11.2.md b/doc/manual/source/release-notes-determinate/v3.11.2.md new file mode 100644 index 000000000000..ac4fe569dffe --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.2.md @@ -0,0 +1,24 @@ +# Release 3.11.2 (2025-09-12) + +* Based on [upstream Nix 2.31.1](../release-notes/rl-2.31.md). + +## What's Changed + +### Fix some interactions with the registry and flakes that include a `?dir=` parameter + +Some users were experiencing issues when their flake registry contained a flake that included a `?dir=` parameter, causing commands like `nix eval registry-with-flake-in-subdir#output` and those that used --inputs-from` to fail or behave incorrectly. + +This is now fixed, so use your flakes inside subdirs without fear! + +PRs: [DeterminateSystems/nix-src#196](https://github.com/DeterminateSystems/nix-src/pull/196), [DeterminateSystems/nix-src#199](https://github.com/DeterminateSystems/nix-src/pull/199) + +### Only substitute inputs if they haven't already been fetched + +When using `lazy-trees`, you might have noticed Nix fetching some source inputs from a cache, even though you could have sworn it already fetched those inputs! + +This fixes that behavior such that Nix will try to fetch inputs from their original location, and only if that fails fall back to fetching from a substituter. + +PR: [DeterminateSystems/nix-src#202](https://github.com/DeterminateSystems/nix-src/pull/202) + + +**Full Changelog**: [v3.11.1...v3.11.2](https://github.com/DeterminateSystems/nix-src/compare/v3.11.1...v3.11.2) diff --git a/doc/manual/source/release-notes-determinate/v3.11.3.md b/doc/manual/source/release-notes-determinate/v3.11.3.md new file mode 100644 index 000000000000..fab5ed51a4b5 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.11.3.md @@ -0,0 +1,34 @@ +# Release 3.11.3 (2025-10-09) + +* Based on [upstream Nix 2.31.2](../release-notes/rl-2.31.md). + +## What's Changed + +### Fix some bugs and interactions with parallel eval + +We received some reports of parallel eval having issues, such as not being able to be interrupted, infinite recursion hanging forever, and segfaults when using the experimental `builtins.parallel`. + +Those have now been fixed. + +Additionally, the debugger now disables parallel eval, because the two features are incompatible. + +PRs: [DeterminateSystems/nix-src#206](https://github.com/DeterminateSystems/nix-src/pull/206), [DeterminateSystems/nix-src#213](https://github.com/DeterminateSystems/nix-src/pull/213), [DeterminateSystems/nix-src#218](https://github.com/DeterminateSystems/nix-src/pull/218), [DeterminateSystems/nix-src#205](https://github.com/DeterminateSystems/nix-src/pull/205) + +### `NIX_SSHOPTS` + `ssh-ng://root@localhost` fix + +We noticed that specifying `NIX_SSHOPTS=-p2222` when using a command that uses SSH (such as `nix copy --to ssh-ng://root@localhost`) stopped respecting the `NIX_SSHOPTS` setting because of an incorrect comparison. + +This has been fixed, so `NIX_SSHOPTS` and SSH stores that are accessed like `user@localhost` work again. + +PR: [DeterminateSystems/nix-src#219](https://github.com/DeterminateSystems/nix-src/pull/219) + +### Fix `error: [json.exception.type_error.302] type must be string, but is array` when using `exportReferencesGraph` + +We received a report of a `nix build` failing on a specific flake due to its expression using `exportReferencesGraph` with a heterogeneous array of dependencies, causing this inscrutable error. + +This specific case has been broken since Nix 2.29.0, and is now fixed. + +PRs: [DeterminateSystems/nix-src#221](https://github.com/DeterminateSystems/nix-src/pull/221), [DeterminateSystems/nix-src#225](https://github.com/DeterminateSystems/nix-src/pull/225) + + +**Full Changelog**: [v3.11.2...v3.11.3](https://github.com/DeterminateSystems/nix-src/compare/v3.11.2...v3.11.3) diff --git a/doc/manual/source/release-notes-determinate/v3.12.0.md b/doc/manual/source/release-notes-determinate/v3.12.0.md new file mode 100644 index 000000000000..55c1f10bf15f --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.12.0.md @@ -0,0 +1,17 @@ +# Release 3.12.0 (2025-10-23) + +* Based on [upstream Nix 2.32.1](../release-notes/rl-2.32.md). + +## What's Changed + +### `nix nario` + +Determinate Nix has a new command, `nix nario`, that replaces the commands `nix-store --export` and `nix-store --import` from the old CLI. `nix nario` allows you to serialize store paths to a file that can be imported into another Nix store. It is backwards compatible with the file format generated by `nix-store --export`. It also provides a new format (selected by passing `--format 2`) that supports store path attributes such as signatures, and allows store paths to be imported more efficiently. + +### Other changes + +`nix flake clone` now supports arbitrary input types. In particular, this allows you to clone tarball flakes, such as flakes on FlakeHub. + +When using `-vv`, Determinate Nix now prints the Nix version. This is useful when diagnosing Nix problems from the debug output of a Nix run. + +**Full Changelog**: [v3.11.3...v3.12.0](https://github.com/DeterminateSystems/nix-src/compare/v3.11.3...v3.12.0) diff --git a/doc/manual/source/release-notes-determinate/v3.12.1.md b/doc/manual/source/release-notes-determinate/v3.12.1.md new file mode 100644 index 000000000000..1be2b48e26d8 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.12.1.md @@ -0,0 +1,10 @@ +# Release 3.12.1 (2025-11-04) + +* Based on [upstream Nix 2.32.1](../release-notes/rl-2.32.md). + +## What's Changed +* Allow access to the result of fetchClosure by @edolstra in [DeterminateSystems/nix-src#241](https://github.com/DeterminateSystems/nix-src/pull/241) +* libstore/build: fixup JSON logger missing the resBuildResult result event by @cole-h in [DeterminateSystems/nix-src#246](https://github.com/DeterminateSystems/nix-src/pull/246) + + +**Full Changelog**: [v3.12.0...v3.12.1](https://github.com/DeterminateSystems/nix-src/compare/v3.12.0...v3.12.1) diff --git a/doc/manual/source/release-notes-determinate/v3.12.2.md b/doc/manual/source/release-notes-determinate/v3.12.2.md new file mode 100644 index 000000000000..4c8c3169aa72 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.12.2.md @@ -0,0 +1,42 @@ +# Release 3.12.2 (2025-11-05) + +* Based on [upstream Nix 2.32.2](../release-notes/rl-2.32.md). + +## What's Changed + +### Faster `revCount` computation + +When using Git repositories with a long history, calculating the `revCount` attribute can take a long time. Determinate Nix now computes `revCount` using multiple threads, making it much faster. + +Note that if you don't need `revCount`, you can disable it altogether by setting the flake input attribute `shallow = true`. + +PR: [DeterminateSystems/nix-src#245](https://github.com/DeterminateSystems/nix-src/pull/245) + +### More readable error messages + +Previously, Nix showed full flakerefs in error messages such as stack traces, e.g. +``` + … from call site + at «github:NixOS/nixpkgs/3bea86e918d8b54aa49780505d2d4cd9261413be?narHash=sha256-Ica%2B%2BSXFuLyxX9Q7YxhfZulUif6/gwM8AEQYlUxqSgE%3D»/lib/customisation.nix:69:16: + 68| let + 69| result = f origArgs; + | ^ + 70| +``` +It now abbreviates these by leaving out `narHash` and shortening Git revisions: +``` + … from call site + at «github:NixOS/nixpkgs/3bea86e»/lib/customisation.nix:69:16: + 68| let + 69| result = f origArgs; + | ^ + 70| +``` + +PR: [DeterminateSystems/nix-src#243](https://github.com/DeterminateSystems/nix-src/pull/243) + +### Other changes + +This release fixes an assertion failure in `nix flake check`. PR: [DeterminateSystems/nix-src#252](https://github.com/DeterminateSystems/nix-src/pull/252) + +**Full Changelog**: [v3.12.1...v3.12.2](https://github.com/DeterminateSystems/nix-src/compare/v3.12.1...v3.12.2) diff --git a/doc/manual/source/release-notes-determinate/v3.13.0.md b/doc/manual/source/release-notes-determinate/v3.13.0.md new file mode 100644 index 000000000000..09041c2acda0 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.13.0.md @@ -0,0 +1,45 @@ +# Release 3.13.0 (2025-11-09) + +* Based on [upstream Nix 2.32.3](../release-notes/rl-2.32.md). + +## What's Changed + + +### Git sources have a progress indicator again + +Nix used to feel "stuck" while it was cloning large repositories. +Determinate Nix now shows git's native progress indicator while fetching. + +PR: [DeterminateSystems/nix-src#250](https://github.com/DeterminateSystems/nix-src/pull/250) + +### C API improvements + +We've invested in the C API to support our work on closure analysis for SBOM generation, and made a couple of changes: + +* C API: add nix_locked_flake_read_path for flake file reading +* C API: make nix_store_get_fs_closure compatible with upstream + +PRs: +* [DeterminateSystems/nix-src#244](https://github.com/DeterminateSystems/nix-src/pull/244) +* [DeterminateSystems/nix-src#254](https://github.com/DeterminateSystems/nix-src/pull/254) + +### Dropping support for Intel Macs + +Determinate Nix no longer supports being installed on Intel Macs. +Determinate Nix will continue to support building for Intel macOS targets, but only from an Apple Silicon host. + +From our intent-to-ship: +> Over the past year, we’ve watched usage of Determinate on Intel macOS hosts dwindle to a minuscule fraction of total usage. +> It currently stands at approximately 0.02% of all installations. +> The vast majority are run in managed CI environments that, we anticipate, will be able to easily convert to using Apple Silicon runners. + +For more information: https://github.com/DeterminateSystems/nix-src/issues/224 + +PR: [DeterminateSystems/nix-src#257](https://github.com/DeterminateSystems/nix-src/pull/257) + +### Bugs fixed + +* IPv6 Store URLs now handles zone ID references like it did in previous releases [NixOS/nix#14434](https://github.com/NixOS/nix/pull/14434) + + +**Full Changelog**: [v3.12.2...v3.13.0](https://github.com/DeterminateSystems/nix-src/compare/v3.12.2...v3.13.0) diff --git a/doc/manual/source/release-notes-determinate/v3.13.1.md b/doc/manual/source/release-notes-determinate/v3.13.1.md new file mode 100644 index 000000000000..025a192c44ee --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.13.1.md @@ -0,0 +1,10 @@ +# Release 3.13.1 (2025-11-12) + +* Based on [upstream Nix 2.32.4](../release-notes/rl-2.32.md). + +## What's Changed +* nix bundle: Wait for async path writer by @edolstra in [DeterminateSystems/nix-src#260](https://github.com/DeterminateSystems/nix-src/pull/260) +* Sync with upstream 2.32.4 by @edolstra in [DeterminateSystems/nix-src#261](https://github.com/DeterminateSystems/nix-src/pull/261) + + +**Full Changelog**: [v3.13.0...v3.13.1](https://github.com/DeterminateSystems/nix-src/compare/v3.13.0...v3.13.1) diff --git a/doc/manual/source/release-notes-determinate/v3.13.2.md b/doc/manual/source/release-notes-determinate/v3.13.2.md new file mode 100644 index 000000000000..2490b865e6bc --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.13.2.md @@ -0,0 +1,68 @@ +# Release 3.13.2 (2025-11-19) + +* Based on [upstream Nix 2.32.4](../release-notes/rl-2.32.md). + +## What's Changed + +### Abbreviate flakerefs in lockfile diffs and `nix flake metadata` + +Flake refs are now abbreviated when possible, to reduce visual clutter. + +For example, this changes + +``` +• Updated input 'blender-bin': + 'https://api.flakehub.com/f/pinned/edolstra/blender-bin/1.0.19/01993ca7-2aa8-746f-96f5-ca8d2c2b962d/source.tar.gz?narHash=sha256-ZqVhVl9UYVErF8HW8lcvqss005VWYjuX//rZ%2BOmXyHg%3D' (2025-09-12) + → 'https://api.flakehub.com/f/pinned/edolstra/blender-bin/1.0.20/019a8772-b044-7738-8c03-109bdc9f0a01/source.tar.gz?narHash=sha256-sVj9Gmx0kwTDQPJ5kgQYszE3Hdjevu0zx0b/bL2fyUc%3D' (2025-11-15) +• Updated input 'nix': + 'github:DeterminateSystems/nix-src/236ebef6514f3a2a9765c8a1d80dd503b8e672be?narHash=sha256-s6/Err0yqOp5fM3OdCF1vhmEYpeElbPOWX88YrW2qj4%3D' (2025-10-23) + → 'github:DeterminateSystems/nix-src/ef054dc06e9701597bce0b0572af18cb4c7e7277?narHash=sha256-uqYmH0KA8caQqX5u4BMarZsuDlC%2B71HRsH3h4f3DPCA%3D' (2025-11-12) +``` + +to + +``` +• Updated input 'blender-bin': + 'https://api.flakehub.com/f/pinned/edolstra/blender-bin/1.0.19/01993ca7-2aa8-746f-96f5-ca8d2c2b962d/source.tar.gz' (2025-09-12) + → 'https://api.flakehub.com/f/pinned/edolstra/blender-bin/1.0.20/019a8772-b044-7738-8c03-109bdc9f0a01/source.tar.gz' (2025-11-15) +• Updated input 'nix': + 'github:DeterminateSystems/nix-src/236ebef' (2025-10-23) + → 'github:DeterminateSystems/nix-src/ef054dc' (2025-11-12) +``` + +PR: [DeterminateSystems/nix-src#264](https://github.com/DeterminateSystems/nix-src/pull/264) + +### `nix flake prefetch-inputs` now skips build-time inputs + +Build-time inputs can already be fetched in parallel, so prefetching them is usually not what you want. + +This can be especially noticeable in projects that make extensive use of build-time flake inputs. + +PR: [DeterminateSystems/nix-src#263](https://github.com/DeterminateSystems/nix-src/pull/263) + +### Don't compute `revCount`/`lastModified` if they're already specified + +We don't care if the user (or more likely the lock file) specifies an incorrect value for these attributes, since it doesn't matter for security (unlike content hashes like `narHash`). + +This can save time when operating on large repos -- having to recalculate these attributes could slow things down greatly. + +PR: [DeterminateSystems/nix-src#269](https://github.com/DeterminateSystems/nix-src/pull/269) + +### Avoid unnecessary Git refetches + +This fixes the issue where updating a Git input does a non-shallow fetch, and then a subsequent eval does a shallow refetch because the `revCount` is already known. + +Now the subsequent eval will reuse the repo used in the first fetch. + +PR: [DeterminateSystems/nix-src#270](https://github.com/DeterminateSystems/nix-src/pull/270) + +### Use our mirrored flake registry + +The flake registry is security-critical and thus should have high availability. + +By mirroring the upstream Nix flake registry, we can make it less likely that a GitHub outage affects being able to resolve from the registry. + +PR: [DeterminateSystems/nix-src#271](https://github.com/DeterminateSystems/nix-src/pull/271) + + +**Full Changelog**: [v3.13.1...v3.13.2](https://github.com/DeterminateSystems/nix-src/compare/v3.13.1...v3.13.2) diff --git a/doc/manual/source/release-notes-determinate/v3.14.0.md b/doc/manual/source/release-notes-determinate/v3.14.0.md new file mode 100644 index 000000000000..d72d5d21468c --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.14.0.md @@ -0,0 +1,159 @@ +# Release 3.14.0 (2025-12-08) + +* Based on [upstream Nix 2.32.4](../release-notes/rl-2.32.md). + +## What is going on?! `nix ps` to the rescue + +Determinate Nix now features a `nix ps` command to summarize all of the active builds and child processes: + +``` +$ nix ps +USER PID CPU DERIVATION/COMMAND +_nixbld1 30167 0.4s /nix/store/h431bcfml83czhpyzljhp9mw4yrq95vs-determinate-nix-manual-3.14.0.drv (wall=9s) +_nixbld1 30167 0.2s └───bash -e /nix/store/jwqf79v5p51x9mv8vx20fv9mzm2x7kig-source-stdenv.sh /nix/store/285whzixr5k1kfj6nidyj29mqqgv7n0b-default-builder.s +_nixbld1 30278 0.0s └───ninja -j14 +_nixbld1 30279 0.0s ├───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30286 0.0s │ └───/nix/store/z59zm01pjwzil2qkvv0s4ibk54risy9a-determinate-nix-3.14.0/bin/nix config show --json +_nixbld1 30280 0.0s ├───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30287 0.0s │ └───/nix/store/z59zm01pjwzil2qkvv0s4ibk54risy9a-determinate-nix-3.14.0/bin/nix __dump-language +_nixbld1 30281 0.0s ├───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30288 0.0s │ └───/nix/store/z59zm01pjwzil2qkvv0s4ibk54risy9a-determinate-nix-3.14.0/bin/nix __dump-cli +_nixbld1 30282 0.0s ├───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30284 0.0s │ └───/nix/store/z59zm01pjwzil2qkvv0s4ibk54risy9a-determinate-nix-3.14.0/bin/nix __dump-xp-features +_nixbld1 30283 0.0s └───/nix/store/p7rag2cw99d7alp6749rjqp71qc0mnzl-python3-3.12.11/bin/python3.12 /nix/store/8k5fancbc5fjmxq6izn0z4inwnmpj09y-mes +_nixbld1 30285 0.0s └───/nix/store/bs1pvy8margy5sj0jwahchxbjnqzi14i-bash-5.2p37/bin/bash -euo pipefail -c if type -p build-release-notes > /de +_nixbld1 30289 0.0s └───changelog-d ../source/release-notes/../../rl-next +``` + +For the integrators out there, it also has a `--json` flag with all the raw data. + +PRs: +* [DeterminateSystems/nix-src#282](https://github.com/DeterminateSystems/nix-src/pull/282) +* [DeterminateSystems/nix-src#287](https://github.com/DeterminateSystems/nix-src/pull/287) + + +## Nix `build`, `profile`, and `flake check` commands tell you what output failed + +These commands now tell you exactly what flake outputs failed to build. +Previously, the error would indicate only what derivation failed to build -- but not which output. + +Now, `nix build` and `nix profile` commands provide the specific output: + +``` +$ nix build .#oneFakeHash .#badSystem --keep-going +❌ git+file:///Users/grahamc/src/github.com/DeterminateSystems/samples#oneFakeHash +error: hash mismatch in fixed-output derivation '/nix/store/58pp1y74j4f5zxfq50xncv2wvnxf7w3y-one-fake-hash.drv': + specified: sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= + got: sha256-i7j83d71sibS/ssSjLJ5PMKmbhjAM+BHW0aElvkgEwY= +❌ git+file:///Users/grahamc/src/github.com/DeterminateSystems/samples#badSystem +error: Cannot build '/nix/store/5vsaxi730yl2icngkyvn8wiflik5wfmq-bad-system.drv'. + Reason: required system or feature not available + Required system: 'bogus' with features {} + Current system: 'aarch64-darwin' with features {apple-virt, benchmark, big-parallel, nixos-test} +``` + +And in a great change for CI, `nix flake check` users get improved summaries too: + +``` +$ nix flake check +❓ checks.aarch64-darwin.twoFakeHashes (cancelled) +❓ checks.aarch64-darwin.badSystemNested (cancelled) +❓ checks.aarch64-darwin.oneFakeHash (cancelled) +❓ checks.aarch64-darwin.failure (cancelled) +❓ checks.aarch64-darwin.badSystem (cancelled) +❓ checks.aarch64-darwin.weirdHash (cancelled) +❓ checks.aarch64-darwin.all (cancelled) +❓ checks.aarch64-darwin.fakeHashes (cancelled) +❓ checks.aarch64-darwin.incorrectHashes (cancelled) +❓ checks.aarch64-darwin.badFeaturesNested (cancelled) +❓ checks.aarch64-darwin.failureNested (cancelled) +❌ checks.aarch64-darwin.badFeatures +error: Cannot build '/nix/store/sc1cyhrpsm9yjx55cl2zzyr5lypwigi6-bad-feature.drv'. + Reason: required system or feature not available + Required system: 'aarch64-darwin' with features {bogus} + Current system: 'aarch64-darwin' with features {apple-virt, benchmark, big-parallel, nixos-test} +``` + +PRs: +* [DeterminateSystems/nix-src#281](https://github.com/DeterminateSystems/nix-src/pull/281) +* [DeterminateSystems/nix-src#285](https://github.com/DeterminateSystems/nix-src/pull/285) + + +## More seamless upgrades from Nix 2.18 and Nix 2.19 + +We've heard from some users who are trying to upgrade from Nix 2.18. + +These users are primarily experiencing problems caused by Nix 2.20 switching from `git-archive` to `libgit2` for fetching repositories. +This change caused some `git-archive` filters to stop executing, like autocrlf. +Not running those filters is an improvement, and running those filters *can cause* instability in source hashes. +However, this switch *did* cause previously valid hashes to become invalid. + +Determinate Nix now retries fetching an old archive with `git-archive` as a fallback when libgit2 fails to provide the correct source. + +Further, to support a progressive migration Determinate Nix has a new option: `nix-219-compat`. +Set `nix-219-compat=true` to cause Nix to author new flake.nix files with a `git-archive` based source hash. + +Finally, a user identified `builtins.path` changed since 2.18 and stopped propagating references. +We have corrected this regression. + +PRs: +* [DeterminateSystems/nix-src#283](https://github.com/DeterminateSystems/nix-src/pull/283) +* [DeterminateSystems/nix-src#278](https://github.com/DeterminateSystems/nix-src/pull/278) + +## Flake registry mirroring + +Determinate Nix now includes a fallback copy of the Nix Registry. +This change builds on top of v3.13.2, where we changed from the upstream Nix registry to a mirrored copy hosted by `install.determinate.systems`. + +Combined, these changes increase the reliability of Nix in the face of network outages. + +> [!NOTE] +> Flake registry URLs for `flake.nix` inputs is deprecated. +> The flake registry should only be used for interactive use. +> See: https://github.com/DeterminateSystems/nix-src/issues/37 + +PR: [DeterminateSystems/nix-src#273](https://github.com/DeterminateSystems/nix-src/pull/273) + +## Flake registry resolution CLI + +We added the new command `nix registry resolve` to help debug issues with Flake registries. +This command looks up a flake registry input name and returns the flakeref it resolves to. + +For example, looking up Nixpkgs: + +``` +$ nix registry resolve nixpkgs +github:NixOS/nixpkgs/nixpkgs-unstable +``` + +Or looking up the 25.11 branch of Nixpkgs: +``` +$ nix registry resolve nixpkgs/release-25.11 +github:NixOS/nixpkgs/release-25.11 +``` + +> [!NOTE] +> Flake registry URLs for `flake.nix` inputs is deprecated. +> The flake registry should only be used for interactive use. +> See: https://github.com/DeterminateSystems/nix-src/issues/37 + +PR: [DeterminateSystems/nix-src#273](https://github.com/DeterminateSystems/nix-src/pull/273) + +## Improved Docker image packaging + +Thanks to `employee-64c7dcd530593118dcccc3fb`, the OCI / Docker images built by the Determinate Nix flake.nix can be further customized. + +Users can specify their own base image by specifying `fromImage`. + +Additionally, users can specify additional directories to include at the beginning or end of the PATH variable with `extraPrePaths` and `extraPostPaths`. + +PRs: +* [DeterminateSystems/nix-src#277](https://github.com/DeterminateSystems/nix-src/pull/277) +* [DeterminateSystems/nix-src#280](https://github.com/DeterminateSystems/nix-src/pull/280) + +## Bug fixes + +* Corrected an error with parallel evaluation which ([DeterminateSystems/nix-src#286](https://github.com/DeterminateSystems/nix-src/pull/286)) +* Fixed compatibility with updated Nixpkgs versions. Thank you SandaruKasa! ([DeterminateSystems/nix-src#284](https://github.com/DeterminateSystems/nix-src/pull/284)) + +**Full Changelog**: [v3.13.2...v3.14.0](https://github.com/DeterminateSystems/nix-src/compare/v3.13.2...v3.14.0) diff --git a/doc/manual/source/release-notes-determinate/v3.15.0.md b/doc/manual/source/release-notes-determinate/v3.15.0.md new file mode 100644 index 000000000000..fb568374c3f2 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.15.0.md @@ -0,0 +1,28 @@ +# Release 3.15.0 (2025-12-19) + +* Based on [upstream Nix 2.33.0](../release-notes/rl-2.33.md). + +## `fetchTree` improvement + +`builtins.fetchTree` now implicitly treats the fetched tree as "final" when a `narHash` is supplied, meaning that it will not return attributes like `lastModified` or `revCount` unless they were specified by the caller. This makes it possible to substitute the tree from a binary cache, which is often more efficient. Furthermore, for Git inputs, it allows Nix to perform a shallow fetch, which is much faster. + +This is primarily useful for users of `flake-compat`, since it uses `builtins.fetchTree` internally. + +PR: [DeterminateSystems/nix-src#297](https://github.com/DeterminateSystems/nix-src/pull/297) + +## New builtin function `builtins.filterAttrs` + +Nixpkgs heavily relies on this function to select attributes from an attribute set: + +```nix +filterAttrs = pred: set: removeAttrs set (filter (name: !pred name set.${name}) (attrNames set)); +``` + +Determinate Nix now has this function built-in, which makes it much faster. + +PR: [DeterminateSystems/nix-src#291](https://github.com/DeterminateSystems/nix-src/pull/291) + +## New Contributors +* @not-ronjinger made their first contribution in [DeterminateSystems/nix-src#291](https://github.com/DeterminateSystems/nix-src/pull/291) + +**Full Changelog**: [v3.14.0...v3.15.0](https://github.com/DeterminateSystems/nix-src/compare/v3.14.0...v3.15.0) diff --git a/doc/manual/source/release-notes-determinate/v3.15.1.md b/doc/manual/source/release-notes-determinate/v3.15.1.md new file mode 100644 index 000000000000..9243962cf4b5 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.15.1.md @@ -0,0 +1,15 @@ +# Release 3.15.1 (2025-12-24) + +* Based on [upstream Nix 2.33.0](../release-notes/rl-2.33.md). + +## What's Changed +Users reported the v3.15.0 tarball could not be fetched in a fixed-output derivation due to current stdenv paths present in the documentation. This release eliminated those paths. + +PR: [DeterminateSystems/nix-src#306](https://github.com/DeterminateSystems/nix-src/pull/306) + +Additionally, this change re-enables CodeRabbit's code review on our changes. CodeRabit was disabled by the upstream project, and we inadvertently included that change. + +PR: [DeterminateSystems/nix-src#305](https://github.com/DeterminateSystems/nix-src/pull/305) + + +**Full Changelog**: [v3.15.0...v3.15.1](https://github.com/DeterminateSystems/nix-src/compare/v3.15.0...v3.15.1) diff --git a/doc/manual/source/release-notes-determinate/v3.15.2.md b/doc/manual/source/release-notes-determinate/v3.15.2.md new file mode 100644 index 000000000000..c5e5339990b5 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.15.2.md @@ -0,0 +1,44 @@ +# Release 3.15.2 (2026-01-20) + +* Based on [upstream Nix 2.33.1](../release-notes/rl-2.33.md). + +## What's Changed + +### Improved performance for users with a lot of dependencies + +If you even had the occasion to query your binary cache for over 110,000 store path simultaneously you might have found it rather slow. +Previously, Nix would enqueue all the downloads at once. +This appears to trigger quadratic behavior in curl. + +Determinate Nix now enqueues a reasonable number of subtitutions once. +At the same time, we fixed a performance issue in the progress bar with so many dependencies. + +PR: [DeterminateSystems/nix-src#315](https://github.com/DeterminateSystems/nix-src/pull/315) + +### Lazy trees update: path inputs are now lazy + +Previously inputs like `path:///path/to/a/dependency` were eagerly fetched when lazy-trees is enabled. +In Determinate Nix 3.15.2, path input types are also fetched lazily. +This change saves time and improves performance for users with path inputs. + +PRs: +* [DeterminateSystems/nix-src#312](https://github.com/DeterminateSystems/nix-src/pull/312) +* [DeterminateSystems/nix-src#317](https://github.com/DeterminateSystems/nix-src/pull/317) + +### `nix repl` now reports the Determinate version + +A small change, but now `nix repl` correctly reports the Determinate Nix version: + +``` +$ nix repl +Nix (Determinate Nix 3.15.1) 2.33.0 +Type :? for help. +nix-repl> +``` + +PR: [DeterminateSystems/nix-src#316](https://github.com/DeterminateSystems/nix-src/pull/316) + +## New Contributors +* @dliberalesso made their first contribution in [DeterminateSystems/nix-src#313](https://github.com/DeterminateSystems/nix-src/pull/313) + +**Full Changelog**: [v3.15.1...v3.15.2](https://github.com/DeterminateSystems/nix-src/compare/v3.15.1...v3.15.2) diff --git a/doc/manual/source/release-notes-determinate/v3.16.0.md b/doc/manual/source/release-notes-determinate/v3.16.0.md new file mode 100644 index 000000000000..8e80ac68402a --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.16.0.md @@ -0,0 +1,53 @@ +# Release 3.16.0 (2026-02-12) + +* Based on [upstream Nix 2.33.3](../release-notes/rl-2.33.md). + +## Support `.gitattributes` in subdirectories + +For performance, the Git backwards compatibility hack was only applied to repositories that had a `.gitattributes` in the root directory. +However, it is possible to have a `.gitattributes` file in a subdirectory, and there are real-world repos that do this, so we have dropped that restriction. + +PR: [DeterminateSystems/nix-src#335](https://github.com/DeterminateSystems/nix-src/pull/335) + +## Fix hung downloads when `http-connections = 0` + +When we started limiting the number of active cURL handles in [DeterminateSystems/nix-src#315](https://github.com/DeterminateSystems/nix-src/pull/315), we did not take into account that `http-connections = 0` is a special value that means, roughly "as many connections as possible" (the exact behavior is up to cURL). + +This should now be fixed. + +PR: [DeterminateSystems/nix-src#327](https://github.com/DeterminateSystems/nix-src/pull/327) + +## `builtins.getFlake` now supports relative paths + +`builtins.getFlake` now supports using relative paths, like: + +```nix +builtins.getFlake ./.. +``` + +instead of the hacky + +```nix +builtins.getFlake (builtins.flakeRefToString { type = "path"; path = self.sourceInfo.outPath; narHash = self.narHash; }); +``` + +Note that allowing `builtins.getFlake` to fetch from store paths is probably a bad idea, since it's ambiguous when using chroot stores, so a warning will be printed when this is encountered. + +PRs: +* [DeterminateSystems/nix-src#337](https://github.com/DeterminateSystems/nix-src/pull/337) +* [DeterminateSystems/nix-src#338](https://github.com/DeterminateSystems/nix-src/pull/338) + +## Fixed a bug with too many open files + +Recently, some users have reported seeing errors like: + +``` +error: creating git packfile indexer: failed to create temporary file '/Users/anon/.cache/nix/tarball-cache-v2/objects/pack/pack_git2_56d617039ac17c2b': Too many open files +``` + +This should now be fixed. + +PR: [DeterminateSystems/nix-src#347](https://github.com/DeterminateSystems/nix-src/pull/347) + + +**Full Changelog**: [v3.15.2...v3.16.0](https://github.com/DeterminateSystems/nix-src/compare/v3.15.2...v3.16.0) diff --git a/doc/manual/source/release-notes-determinate/v3.16.1.md b/doc/manual/source/release-notes-determinate/v3.16.1.md new file mode 100644 index 000000000000..6ecd5262b7c9 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.16.1.md @@ -0,0 +1,24 @@ +# Release 3.16.1 (2026-02-22) + +* Based on [upstream Nix 2.33.3](../release-notes/rl-2.33.md). + +## What's Changed + +### `nix store info` now correctly support `--refresh` and `--offline` + +Previously, Nix had a hard-coded TTL of seven days. +Determinate Nix moved that TTL to a new setting `narinfo-cache-meta-ttl` and now `nix store info` respects the `--refresh` and `--offline` flags. + +This change makes it possible to freshly validate authenticating to a remote store. + +PR: [DeterminateSystems/nix-src#355](https://github.com/DeterminateSystems/nix-src/pull/355) + +### Corrected `builtins.hashString` behavior under lazy trees + +`builtins.hashString` now devirtualizes lazy paths, making the hash result stable. + +PR: [DeterminateSystems/nix-src#360](https://github.com/DeterminateSystems/nix-src/pull/360) + + + +**Full Changelog**: [v3.16.0...v3.16.1](https://github.com/DeterminateSystems/nix-src/compare/v3.16.0...v3.16.1) diff --git a/doc/manual/source/release-notes-determinate/v3.16.2.md b/doc/manual/source/release-notes-determinate/v3.16.2.md new file mode 100644 index 000000000000..73a1b25f21c8 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.16.2.md @@ -0,0 +1,8 @@ +# Release 3.16.2 (2026-02-23) + +* Based on [upstream Nix 2.33.3](../release-notes/rl-2.33.md). + +## What's Changed +This release is exclusively improvements to `determinate-nixd`. + + diff --git a/doc/manual/source/release-notes-determinate/v3.16.3.md b/doc/manual/source/release-notes-determinate/v3.16.3.md new file mode 100644 index 000000000000..fcc6fefa33c7 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.16.3.md @@ -0,0 +1,6 @@ +# Release 3.16.3 (2026-02-24) + +* Based on [upstream Nix 2.33.3](../release-notes/rl-2.33.md). + +## What's Changed +This release only includes changes in determinate-nixd. diff --git a/doc/manual/source/release-notes-determinate/v3.17.0.md b/doc/manual/source/release-notes-determinate/v3.17.0.md new file mode 100644 index 000000000000..e09938786e55 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.17.0.md @@ -0,0 +1,10 @@ +# Release 3.17.0 (2026-03-04) + +* Based on [upstream Nix 2.33.3](../release-notes/rl-2.33.md). + +## What's Changed +Determinate Nix 3.17.0 brings exciting improvements like Flake Schemas, provenance, and Wasm / WASI. +We'll be posting more details over the next week or so on our blog: https://determinate.systems/blog/. + + +**Full Changelog**: [v3.16.3...v3.17.0](https://github.com/DeterminateSystems/nix-src/compare/v3.16.3...v3.17.0) diff --git a/doc/manual/source/release-notes-determinate/v3.8.6.md b/doc/manual/source/release-notes-determinate/v3.8.6.md new file mode 100644 index 000000000000..8f917f2362ff --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.8.6.md @@ -0,0 +1,14 @@ +# Release 3.8.6 (2025-08-19) + +* Based on [upstream Nix 2.30.2](../release-notes/rl-2.30.md). + +## What's Changed +* Auto update release notes by @grahamc in [DeterminateSystems/nix-src#170](https://github.com/DeterminateSystems/nix-src/pull/170) +* Use WAL mode for SQLite cache databases (2nd attempt) by @edolstra in [DeterminateSystems/nix-src#167](https://github.com/DeterminateSystems/nix-src/pull/167) +* Enable parallel marking in boehm-gc by @edolstra in [DeterminateSystems/nix-src#168](https://github.com/DeterminateSystems/nix-src/pull/168) +* BasicClientConnection::queryPathInfo(): Don't throw exception for invalid paths by @edolstra in [DeterminateSystems/nix-src#172](https://github.com/DeterminateSystems/nix-src/pull/172) +* Fix queryPathInfo() negative caching by @edolstra in [DeterminateSystems/nix-src#173](https://github.com/DeterminateSystems/nix-src/pull/173) +* forceDerivation(): Wait for async path write after forcing value by @edolstra in [DeterminateSystems/nix-src#176](https://github.com/DeterminateSystems/nix-src/pull/176) + + +**Full Changelog**: [v3.8.5...v3.8.6](https://github.com/DeterminateSystems/nix-src/compare/v3.8.5...v3.8.6) diff --git a/doc/manual/source/release-notes-determinate/v3.9.0.md b/doc/manual/source/release-notes-determinate/v3.9.0.md new file mode 100644 index 000000000000..66deb69b6192 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.9.0.md @@ -0,0 +1,45 @@ +# Release 3.9.0 (2025-08-26) + +* Based on [upstream Nix 2.30.2](../release-notes/rl-2.30.md). + +## What's Changed + +### Build-time flake inputs + +Some of our users have hundreds or thousands of flake inputs. +In those cases, it is painfully slow for Nix to fetch all the inputs during evaluation of the flake. + +Determinate Nix has an experimental feature for deferring the fetching to build time of the dependent derivations. + +This is currently in developer preview. +If you would like to try it, add the experimental feature to your `/etc/nix/nix.custom.conf`: + +```ini +extra-experimental-features = build-time-fetch-tree +``` + +Then, mark an input to be fetched at build time: + +```nix +inputs.example = { + type = "github"; + owner = "DeterminateSystems"; + repo = "example"; + flake = false; # <-- currently required + buildTime = true; +}; +``` + +Let us know what you think! + +PR: [DeterminateSystems/nix-src#49](https://github.com/DeterminateSystems/nix-src/pull/49) + +### Corrected inconsistent behavior of `nix flake check` + +Users reported that `nix flake check` would not consistently validate the entire flake. + +We've fixed this issue and improved our testing around `nix flake check`. + +PR: [DeterminateSystems/nix-src#182](https://github.com/DeterminateSystems/nix-src/pull/182) + +**Full Changelog**: [v3.8.6...v3.9.0](https://github.com/DeterminateSystems/nix-src/compare/v3.8.6...v3.9.0) diff --git a/doc/manual/source/release-notes-determinate/v3.9.1.md b/doc/manual/source/release-notes-determinate/v3.9.1.md new file mode 100644 index 000000000000..38d17199c2c0 --- /dev/null +++ b/doc/manual/source/release-notes-determinate/v3.9.1.md @@ -0,0 +1,20 @@ +# Release 3.9.1 (2025-08-28) + +- Based on [upstream Nix 2.30.2](../release-notes/rl-2.30.md). + +### A useful `nix flake init` template default + +Nix's default flake template is [extremely bare bones](https://github.com/NixOS/templates/blob/ad0e221dda33c4b564fad976281130ce34a20cb9/trivial/flake.nix), and not a useful starting point. + +Deteminate Nix now uses [a more fleshed out default template](https://github.com/DeterminateSystems/flake-templates/blob/8af99b99627da41f16897f60eb226db30c775e76/default/flake.nix), including targeting multiple systems. + +PR: [DeterminateSystems/nix-src#180](https://github.com/DeterminateSystems/nix-src/pull/180) + +### Build cancellation is repaired on macOS + +A recent macOS update changed how signals are handled by Nix and broke using Ctrl-C to stop a build. +Determinate Nix on macOS correctly handles these signals and stops the build. + +PR: [DeterminateSystems/nix-src#184](https://github.com/DeterminateSystems/nix-src/pull/184) + +**Full Changelog**: [v3.9.0...v3.9.1](https://github.com/DeterminateSystems/nix-src/compare/v3.9.0...v3.9.1) diff --git a/doc/manual/source/release-notes/rl-0.12.md b/doc/manual/source/release-notes/rl-0.12.md index 3a4aba07d693..3541b6487e73 100644 --- a/doc/manual/source/release-notes/rl-0.12.md +++ b/doc/manual/source/release-notes/rl-0.12.md @@ -80,7 +80,7 @@ ... the following paths will be downloaded/copied (30.02 MiB): /nix/store/4m8pvgy2dcjgppf5b4cj5l6wyshjhalj-samba-3.2.4 - /nix/store/7h1kwcj29ip8vk26rhmx6bfjraxp0g4l-libunwind-0.98.6 + /nix/store/spc1m987vlibchdx369qwa391s738s7l-libunwind-0.98.6 ... - Language features: diff --git a/doc/manual/source/release-notes/rl-0.8.md b/doc/manual/source/release-notes/rl-0.8.md index 5ba6e0e7217c..2bc6352c3540 100644 --- a/doc/manual/source/release-notes/rl-0.8.md +++ b/doc/manual/source/release-notes/rl-0.8.md @@ -63,7 +63,7 @@ Nix 0.8 has the following improvements: can query all paths that directly or indirectly use a certain Glibc: $ nix-store -q --referrers-closure \ - /nix/store/8lz9yc6zgmc0vlqmn2ipcpkjlmbi51vv-glibc-2.3.4 + /nix/store/1a6mdrjz4wn7b9sfmcw5ggbk1mi281mh-glibc-2.3.4 - The concept of fixed-output derivations has been formalised. Previously, functions such as `fetchurl` in Nixpkgs used a hack diff --git a/doc/manual/source/release-notes/rl-2.0.md b/doc/manual/source/release-notes/rl-2.0.md index 25cc5e0a5f38..181940f616f9 100644 --- a/doc/manual/source/release-notes/rl-2.0.md +++ b/doc/manual/source/release-notes/rl-2.0.md @@ -66,7 +66,7 @@ This release has the following new features: nix copy --to ssh://machine nixpkgs.hello - nix copy --to ssh://machine /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10 + nix copy --to ssh://machine /nix/store/qbhyj3blxpw2i6pb7c6grc9185nbnpvy-hello-2.10 nix copy --to ssh://machine '(with import {}; hello)' @@ -187,7 +187,7 @@ This release has the following new features: former is primarily useful in conjunction with remote stores, e.g. - nix ls-store --store https://cache.nixos.org/ -lR /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10 + nix ls-store --store https://cache.nixos.org/ -lR /nix/store/qbhyj3blxpw2i6pb7c6grc9185nbnpvy-hello-2.10 lists the contents of path in a binary cache. diff --git a/doc/manual/source/release-notes/rl-2.13.md b/doc/manual/source/release-notes/rl-2.13.md index 168708113ea9..6976f91501be 100644 --- a/doc/manual/source/release-notes/rl-2.13.md +++ b/doc/manual/source/release-notes/rl-2.13.md @@ -25,7 +25,7 @@ * Allow explicitly selecting outputs in a store derivation installable, just like we can do with other sorts of installables. For example, ```shell-session - # nix build /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^dev + # nix build /nix/store/fpq78s2h8ffh66v2iy0q1838mhff06y8-glibc-2.33-78.drv^dev ``` now works just as ```shell-session diff --git a/doc/manual/source/release-notes/rl-2.15.md b/doc/manual/source/release-notes/rl-2.15.md index e7e52631ba40..1d30c70a4c0c 100644 --- a/doc/manual/source/release-notes/rl-2.15.md +++ b/doc/manual/source/release-notes/rl-2.15.md @@ -18,13 +18,13 @@ For example, ```shell-session - $ nix path-info /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv + $ nix path-info /nix/store/fpq78s2h8ffh66v2iy0q1838mhff06y8-glibc-2.33-78.drv ``` now gives info about the derivation itself, while ```shell-session - $ nix path-info /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^* + $ nix path-info /nix/store/fpq78s2h8ffh66v2iy0q1838mhff06y8-glibc-2.33-78.drv^* ``` provides information about each of its outputs. diff --git a/doc/manual/source/release-notes/rl-2.19.md b/doc/manual/source/release-notes/rl-2.19.md index 04f8c9c28d29..0596ef909619 100644 --- a/doc/manual/source/release-notes/rl-2.19.md +++ b/doc/manual/source/release-notes/rl-2.19.md @@ -45,7 +45,7 @@ ```json5 [ { - "path": "/nix/store/8fv91097mbh5049i9rglc73dx6kjg3qk-bash-5.2-p15", + "path": "/nix/store/fvqsvk65d38p8qqir371ii0hyqxvjcw6-bash-5.2-p15", "valid": true, // ... }, @@ -60,7 +60,7 @@ ```json5 { - "/nix/store/8fv91097mbh5049i9rglc73dx6kjg3qk-bash-5.2-p15": { + "/nix/store/fvqsvk65d38p8qqir371ii0hyqxvjcw6-bash-5.2-p15": { // ... }, "/nix/store/wffw7l0alvs3iw94cbgi1gmmbmw99sqb-home-manager-path": null, @@ -69,7 +69,7 @@ This makes it match `nix derivation show`, which also maps store paths to information. -- When Nix is installed using the [binary installer](@docroot@/installation/installing-binary.md), in supported shells (Bash, Zsh, Fish) +- When Nix is installed using the binary installer, in supported shells (Bash, Zsh, Fish) [`XDG_DATA_DIRS`](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html#variables) is now populated with the path to the `/share` subdirectory of the current profile. This means that command completion scripts, `.desktop` files, and similar artifacts installed via [`nix-env`](@docroot@/command-ref/nix-env.md) or [`nix profile`](@docroot@/command-ref/new-cli/nix3-profile.md) (experimental) can be found by any program that follows the [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html). diff --git a/doc/manual/source/release-notes/rl-2.20.md b/doc/manual/source/release-notes/rl-2.20.md index eb724f600aa7..d54646379c88 100644 --- a/doc/manual/source/release-notes/rl-2.20.md +++ b/doc/manual/source/release-notes/rl-2.20.md @@ -182,7 +182,7 @@ «partially applied primop map» nix-repl> builtins.trace lib.id "my-value" - trace: «lambda id @ /nix/store/8rrzq23h2zq7sv5l2vhw44kls5w0f654-source/lib/trivial.nix:26:5» + trace: «lambda id @ /nix/store/kgr5lnaiiv08wb7k324yv1i1npjmrvjc-source/lib/trivial.nix:26:5» "my-value" ``` diff --git a/doc/manual/source/release-notes/rl-2.24.md b/doc/manual/source/release-notes/rl-2.24.md index e9b46bb22b70..f608fb54f7d9 100644 --- a/doc/manual/source/release-notes/rl-2.24.md +++ b/doc/manual/source/release-notes/rl-2.24.md @@ -268,6 +268,21 @@ be configured using the `warn-large-path-threshold` setting, e.g. `--warn-large-path-threshold 100M`. +- Wrap filesystem exceptions more correctly [#11378](https://github.com/NixOS/nix/pull/11378) + + With the switch to `std::filesystem` in different places, Nix started to throw `std::filesystem::filesystem_error` in many places instead of its own exceptions. + + This led to no longer generating error traces, for example when listing a non-existing directory. + + This version catches these types of exception correctly and wraps them into Nix's own exeception type. + + Author: [**@Mic92**](https://github.com/Mic92) + +- `` uses TLS verification [#11585](https://github.com/NixOS/nix/pull/11585) + + Previously `` did not do TLS verification. This was because the Nix sandbox in the past did not have access to TLS certificates, and Nix checks the hash of the fetched file anyway. However, this can expose authentication data from `netrc` and URLs to man-in-the-middle attackers. In addition, Nix now in some cases (such as when using impure derivations) does *not* check the hash. Therefore we have now enabled TLS verification. This means that downloads by `` will now fail if you're fetching from a HTTPS server that does not have a valid certificate. + + `` is also known as the builtin derivation builder `builtin:fetchurl`. It's not to be confused with the evaluation-time function `builtins.fetchurl`, which was not affected by this issue. ## Contributors diff --git a/doc/manual/source/release-notes/rl-2.33.md b/doc/manual/source/release-notes/rl-2.33.md index bed697029389..810dcad00b15 100644 --- a/doc/manual/source/release-notes/rl-2.33.md +++ b/doc/manual/source/release-notes/rl-2.33.md @@ -279,3 +279,35 @@ This release was made possible by the following 33 contributors: - Henry [**(@cootshk)**](https://github.com/cootshk) - Martin Joerg [**(@mjoerg)**](https://github.com/mjoerg) - Farid Zakaria [**(@fzakaria)**](https://github.com/fzakaria) +# Release 2.33.3 (2026-02-13) + +- S3 binary caches now use virtual-hosted-style addressing by default [#15208](https://github.com/NixOS/nix/issues/15208) + + S3 binary caches now use virtual-hosted-style URLs + (`https://bucket.s3.region.amazonaws.com/key`) instead of path-style URLs + (`https://s3.region.amazonaws.com/bucket/key`) when connecting to standard AWS + S3 endpoints. This enables HTTP/2 multiplexing and fixes TCP connection + exhaustion (TIME_WAIT socket accumulation) under high-concurrency workloads. + + A new `addressing-style` store option controls this behavior: + + - `auto` (default): virtual-hosted-style for standard AWS endpoints, path-style + for custom endpoints. + - `path`: forces path-style addressing (deprecated by AWS). + - `virtual`: forces virtual-hosted-style addressing (bucket names must not + contain dots). + + Bucket names containing dots (e.g., `my.bucket.name`) automatically fall back + to path-style addressing in `auto` mode, because dotted names create + multi-level subdomains that break TLS wildcard certificate validation. + + Example using path-style for backwards compatibility: + + ``` + s3://my-bucket/key?region=us-east-1&addressing-style=path + ``` + + Additionally, TCP keep-alive is now enabled on all HTTP connections, preventing + idle connections from being silently dropped by intermediate network devices + (NATs, firewalls, load balancers). + diff --git a/doc/manual/source/store/store-path.md b/doc/manual/source/store/store-path.md index 4061f3653f68..08b024e4a846 100644 --- a/doc/manual/source/store/store-path.md +++ b/doc/manual/source/store/store-path.md @@ -2,7 +2,7 @@ > **Example** > -> `/nix/store/a040m110amc4h71lds2jmr8qrkj2jhxd-git-2.38.1` +> `/nix/store/jf6gn2dzna4nmsfbdxsd7kwhsk6gnnlr-git-2.38.1` > > A rendered store path @@ -22,7 +22,7 @@ Store paths are pairs of > **Example** > -> - Digest: `b6gvzjyb2pg0kjfwrjmg1vfhh54ad73z` +> - Digest: `q06x3jll2yfzckz2bzqak089p43ixkkq` > - Name: `firefox-33.1` To make store objects accessible to operating system processes, stores have to expose store objects through the file system. @@ -38,7 +38,7 @@ A store path is rendered to a file system path as the concatenation of > **Example** > > ``` -> /nix/store/b6gvzjyb2pg0kjfwrjmg1vfhh54ad73z-firefox-33.1 +> /nix/store/q06x3jll2yfzckz2bzqak089p43ixkkq-firefox-33.1 > |--------| |------------------------------| |----------| > store directory digest name > ``` diff --git a/doc/manual/source/store/types/index.md.in b/doc/manual/source/store/types/index.md.in index a35161ce8fa4..b211ac98fe3a 100644 --- a/doc/manual/source/store/types/index.md.in +++ b/doc/manual/source/store/types/index.md.in @@ -8,7 +8,7 @@ Stores are specified using a URL-like syntax. For example, the command ```console # nix path-info --store https://cache.nixos.org/ --json \ - /nix/store/a7gvj343m05j2s32xcnwr35v31ynlypr-coreutils-9.1 + /nix/store/1542dip9i7k4f24y6hqgd04hmvid9hr5-coreutils-9.1 ``` fetches information about a store path in the HTTP binary cache diff --git a/docker.nix b/docker.nix index 32205224b734..72c13663488d 100644 --- a/docker.nix +++ b/docker.nix @@ -8,6 +8,7 @@ # Image configuration name ? "nix", tag ? "latest", + fromImage ? null, bundleNixpkgs ? true, channelName ? "nixpkgs", channelURL ? "https://channels.nixos.org/nixpkgs-unstable", @@ -27,6 +28,8 @@ "org.opencontainers.image.description" = "Nix container image"; }, Cmd ? [ (lib.getExe bashInteractive) ], + extraPrePaths ? [ ], + extraPostPaths ? [ ], # Default Packages nix ? pkgs.nix, bashInteractive ? pkgs.bashInteractive, @@ -336,7 +339,7 @@ let globalFlakeRegistryPath="$nixCacheDir/flake-registry.json" ln -s ${flake-registry-path} $out$globalFlakeRegistryPath mkdir -p $out/nix/var/nix/gcroots/auto - rootName=$(${lib.getExe' nix "nix"} --extra-experimental-features nix-command hash file --type sha1 --base32 <(echo -n $globalFlakeRegistryPath)) + rootName=$(${lib.getExe' nix "nix"} hash file --type sha1 --base32 <(echo -n $globalFlakeRegistryPath)) ln -s $globalFlakeRegistryPath $out/nix/var/nix/gcroots/auto/$rootName '') ); @@ -352,6 +355,7 @@ dockerTools.buildLayeredImageWithNixDb { gid uname gname + fromImage ; contents = [ baseSystem ]; @@ -373,11 +377,15 @@ dockerTools.buildLayeredImageWithNixDb { Env = [ "USER=${uname}" "PATH=${ - lib.concatStringsSep ":" [ - "${userHome}/.nix-profile/bin" - "/nix/var/nix/profiles/default/bin" - "/nix/var/nix/profiles/default/sbin" - ] + lib.concatStringsSep ":" ( + extraPrePaths + ++ [ + "${userHome}/.nix-profile/bin" + "/nix/var/nix/profiles/default/bin" + "/nix/var/nix/profiles/default/sbin" + ] + ++ extraPostPaths + ) }" "MANPATH=${ lib.concatStringsSep ":" [ diff --git a/flake.lock b/flake.lock index 19f7b0c1c21f..f56706ec761e 100644 --- a/flake.lock +++ b/flake.lock @@ -3,11 +3,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1733328505, - "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", "owner": "edolstra", "repo": "flake-compat", - "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", "type": "github" }, "original": { @@ -23,55 +23,51 @@ ] }, "locked": { - "lastModified": 1733312601, - "narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9", - "type": "github" + "lastModified": 1748821116, + "narHash": "sha256-F82+gS044J1APL0n4hH50GYdPRv/5JWm34oCJYmVKdE=", + "rev": "49f0870db23e8c1ca0b5259734a02cd9e1e371a1", + "revCount": 377, + "type": "tarball", + "url": "https://api.flakehub.com/f/pinned/hercules-ci/flake-parts/0.1.377%2Brev-49f0870db23e8c1ca0b5259734a02cd9e1e371a1/01972f28-554a-73f8-91f4-d488cc502f08/source.tar.gz" }, "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" + "type": "tarball", + "url": "https://flakehub.com/f/hercules-ci/flake-parts/0.1" } }, "git-hooks-nix": { "inputs": { - "flake-compat": [], + "flake-compat": "flake-compat", "gitignore": [], "nixpkgs": [ "nixpkgs" - ], - "nixpkgs-stable": [ - "nixpkgs" ] }, "locked": { - "lastModified": 1734279981, - "narHash": "sha256-NdaCraHPp8iYMWzdXAt5Nv6sA3MUzlCiGiR586TCwo0=", - "owner": "cachix", - "repo": "git-hooks.nix", - "rev": "aa9f40c906904ebd83da78e7f328cd8aeaeae785", - "type": "github" + "lastModified": 1747372754, + "narHash": "sha256-2Y53NGIX2vxfie1rOW0Qb86vjRZ7ngizoo+bnXU9D9k=", + "rev": "80479b6ec16fefd9c1db3ea13aeb038c60530f46", + "revCount": 1026, + "type": "tarball", + "url": "https://api.flakehub.com/f/pinned/cachix/git-hooks.nix/0.1.1026%2Brev-80479b6ec16fefd9c1db3ea13aeb038c60530f46/0196d79a-1b35-7b8e-a021-c894fb62163d/source.tar.gz" }, "original": { - "owner": "cachix", - "repo": "git-hooks.nix", - "type": "github" + "type": "tarball", + "url": "https://flakehub.com/f/cachix/git-hooks.nix/0.1.941" } }, "nixpkgs": { "locked": { - "lastModified": 1763948260, - "narHash": "sha256-zZk7fn2ARAqmLwaYTpxBJmj81KIdz11NiWt7ydHHD/M=", - "rev": "1c8ba8d3f7634acac4a2094eef7c32ad9106532c", + "lastModified": 1761597516, + "narHash": "sha256-wxX7u6D2rpkJLWkZ2E932SIvDJW8+ON/0Yy8+a5vsDU=", + "rev": "daf6dc47aa4b44791372d6139ab7b25269184d55", + "revCount": 811874, "type": "tarball", - "url": "https://releases.nixos.org/nixos/25.05/nixos-25.05.813095.1c8ba8d3f763/nixexprs.tar.xz" + "url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.2505.811874%2Brev-daf6dc47aa4b44791372d6139ab7b25269184d55/019a3494-3498-707e-9086-1fb81badc7fe/source.tar.gz" }, "original": { "type": "tarball", - "url": "https://channels.nixos.org/nixos-25.05/nixexprs.tar.xz" + "url": "https://flakehub.com/f/NixOS/nixpkgs/0.2505" } }, "nixpkgs-23-11": { @@ -108,7 +104,6 @@ }, "root": { "inputs": { - "flake-compat": "flake-compat", "flake-parts": "flake-parts", "git-hooks-nix": "git-hooks-nix", "nixpkgs": "nixpkgs", diff --git a/flake.nix b/flake.nix index d35363ab2e53..b32a95b06a23 100644 --- a/flake.nix +++ b/flake.nix @@ -1,24 +1,18 @@ { description = "The purely functional package manager"; - inputs.nixpkgs.url = "https://channels.nixos.org/nixos-25.05/nixexprs.tar.xz"; + inputs.nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.2505"; inputs.nixpkgs-regression.url = "github:NixOS/nixpkgs/215d4d0fd80ca5163643b03a33fde804a29cc1e2"; inputs.nixpkgs-23-11.url = "github:NixOS/nixpkgs/a62e6edd6d5e1fa0329b8653c801147986f8d446"; - inputs.flake-compat = { - url = "github:edolstra/flake-compat"; - flake = false; - }; # dev tooling - inputs.flake-parts.url = "github:hercules-ci/flake-parts"; - inputs.git-hooks-nix.url = "github:cachix/git-hooks.nix"; + inputs.flake-parts.url = "https://flakehub.com/f/hercules-ci/flake-parts/0.1"; + inputs.git-hooks-nix.url = "https://flakehub.com/f/cachix/git-hooks.nix/0.1.941"; # work around https://github.com/NixOS/nix/issues/7730 inputs.flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs"; inputs.git-hooks-nix.inputs.nixpkgs.follows = "nixpkgs"; - inputs.git-hooks-nix.inputs.nixpkgs-stable.follows = "nixpkgs"; # work around 7730 and https://github.com/NixOS/nix/issues/7807 - inputs.git-hooks-nix.inputs.flake-compat.follows = ""; inputs.git-hooks-nix.inputs.gitignore.follows = ""; outputs = @@ -34,26 +28,24 @@ officialRelease = true; - linux32BitSystems = [ "i686-linux" ]; + linux32BitSystems = [ ]; linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ]; linuxSystems = linux32BitSystems ++ linux64BitSystems; darwinSystems = [ - "x86_64-darwin" "aarch64-darwin" ]; systems = linuxSystems ++ darwinSystems; crossSystems = [ - "armv6l-unknown-linux-gnueabihf" - "armv7l-unknown-linux-gnueabihf" - "riscv64-unknown-linux-gnu" + #"armv6l-unknown-linux-gnueabihf" + #"armv7l-unknown-linux-gnueabihf" + #"riscv64-unknown-linux-gnu" # Disabled because of https://github.com/NixOS/nixpkgs/issues/344423 # "x86_64-unknown-netbsd" - "x86_64-unknown-freebsd" - "x86_64-w64-mingw32" + #"x86_64-unknown-freebsd" ]; stdenvs = [ @@ -372,6 +364,40 @@ nix-manual-manpages-only = nixpkgsFor.${system}.native.nixComponents2.nix-manual-manpages-only; nix-internal-api-docs = nixpkgsFor.${system}.native.nixComponents2.nix-internal-api-docs; nix-external-api-docs = nixpkgsFor.${system}.native.nixComponents2.nix-external-api-docs; + + fallbackPathsNix = + let + pkgs = nixpkgsFor.${system}.native; + + closures = forAllSystems (system: self.packages.${system}.default.outPath); + + closures_json = + pkgs.runCommand "versions.json" + { + buildInputs = [ pkgs.jq ]; + passAsFile = [ "json" ]; + json = builtins.toJSON closures; + } + '' + cat "$jsonPath" | jq . > $out + ''; + + closures_nix = + pkgs.runCommand "versions.nix" + { + buildInputs = [ pkgs.jq ]; + passAsFile = [ "template" ]; + jsonPath = closures_json; + template = '' + builtins.fromJSON('''@closures@''') + ''; + } + '' + export closures=$(cat "$jsonPath"); + substituteAll "$templatePath" "$out" + ''; + in + closures_nix; } # We need to flatten recursive attribute sets of derivations to pass `flake check`. // @@ -434,8 +460,6 @@ { # These attributes go right into `packages.`. "${pkgName}" = nixpkgsFor.${system}.native.nixComponents2.${pkgName}; - "${pkgName}-static" = nixpkgsFor.${system}.native.pkgsStatic.nixComponents2.${pkgName}; - "${pkgName}-llvm" = nixpkgsFor.${system}.native.pkgsLLVM.nixComponents2.${pkgName}; } // lib.optionalAttrs supportsCross ( flatMapAttrs (lib.genAttrs crossSystems (_: { })) ( @@ -446,6 +470,9 @@ "${pkgName}-${crossSystem}" = nixpkgsFor.${system}.cross.${crossSystem}.nixComponents2.${pkgName}; } ) + // { + "${pkgName}-static" = nixpkgsFor.${system}.native.pkgsStatic.nixComponents2.${pkgName}; + } ) // flatMapAttrs (lib.genAttrs stdenvs (_: { })) ( stdenvName: @@ -455,6 +482,10 @@ "${pkgName}-${stdenvName}" = nixpkgsFor.${system}.nativeForStdenv.${stdenvName}.nixComponents2.${pkgName}; } + // lib.optionalAttrs supportsCross { + "${pkgName}-${stdenvName}-static" = + nixpkgsFor.${system}.nativeForStdenv.${stdenvName}.pkgsStatic.nixComponents2.${pkgName}; + } ) ) // lib.optionalAttrs (builtins.elem system linux64BitSystems) { @@ -512,32 +543,6 @@ } ) ) - // lib.optionalAttrs (!nixpkgsFor.${system}.native.stdenv.isDarwin) ( - prefixAttrs "static" ( - forAllStdenvs ( - stdenvName: - makeShell { - pkgs = nixpkgsFor.${system}.nativeForStdenv.${stdenvName}.pkgsStatic; - } - ) - ) - // prefixAttrs "llvm" ( - forAllStdenvs ( - stdenvName: - makeShell { - pkgs = nixpkgsFor.${system}.nativeForStdenv.${stdenvName}.pkgsLLVM; - } - ) - ) - // prefixAttrs "cross" ( - forAllCrossSystems ( - crossSystem: - makeShell { - pkgs = nixpkgsFor.${system}.cross.${crossSystem}; - } - ) - ) - ) // { native = self.devShells.${system}.native-stdenv; default = self.devShells.${system}.native; diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 414e6c570ab4..7f7447b19e49 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -102,6 +102,7 @@ # Don't format vendored code ''^doc/manual/redirects\.js$'' ''^doc/manual/theme/highlight\.js$'' + ''^src/libfetchers/builtin-flake-registry\.json$'' ]; }; shellcheck = { diff --git a/maintainers/invalidate-store-paths.sh b/maintainers/invalidate-store-paths.sh new file mode 100755 index 000000000000..a075e2621834 --- /dev/null +++ b/maintainers/invalidate-store-paths.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +set -euo pipefail +set -x + +git ls-files -z \ + | xargs -0 grep -o '[0123456789abcdfghijklmnpqrsvwxyz]\{32\}' 2> /dev/null \ + | rev \ + | cut -d: -f1 \ + | rev \ + | sort \ + | uniq \ + | while read -r oldhash; do + if ! curl --fail -I "https://cache.nixos.org/$oldhash.narinfo" > /dev/null 2>&1; then + continue + fi + + newhash=$( + nix eval --expr "builtins.toFile \"006c6ssvddri1sg34wnw65mzd05pcp3qliylxlhv49binldajba5\" \"$oldhash\"" \ + | cut -d- -f1 \ + | cut -d/ -f4 + ) + + msg=$(printf "bad: %s -> %s" "$oldhash" "$newhash") + echo "$msg" + git ls-files -z \ + | xargs -0 grep -a -l "$oldhash" 2> /dev/null \ + | while read -r file; do + [ -L "$file" ] && continue + perl -pi -e "s/$oldhash/$newhash/g" "$file" || true + done || true + git commit -am "$msg" + done diff --git a/maintainers/link-headers b/maintainers/link-headers new file mode 100755 index 000000000000..2457a2dc8295 --- /dev/null +++ b/maintainers/link-headers @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 + +# This script must be run from the root of the Nix repository. +# +# For include path hygiene, we need to put headers in a separate +# directory than sources. But during development, it is nice to paths +# that are similar for headers and source files, e.g. +# `foo/bar/baz.{cc,hh}`, e.g. for less typing when opening one file, and +# then opening the other file. +# +# This script symlinks the headers next to the source files to +# facilitate such a development workflows. It also updates +# `.git/info/exclude` so that the symlinks are not accidentally committed +# by mistake. + +from pathlib import Path +import subprocess +import os + + +def main() -> None: + # Path to the source directory + GIT_TOPLEVEL = Path( + subprocess.run( + ["git", "rev-parse", "--show-toplevel"], + text=True, + stdout=subprocess.PIPE, + check=True, + ).stdout.strip() + ) + + # Get header files from git + result = subprocess.run( + ["git", "-C", str(GIT_TOPLEVEL), "ls-files", "*/include/nix/**.hh"], + text=True, + stdout=subprocess.PIPE, + check=True, + ) + header_files = result.stdout.strip().split("\n") + header_files.sort() + + links = [] + for file_str in header_files: + project_str, header_str = file_str.split("/include/nix/", 1) + project = Path(project_str) + header = Path(header_str) + + # Reconstruct the full path (relative to SRC_DIR) to the header file. + file = project / "include" / "nix" / header + + # The symlink should be created at "project/header", i.e. next to the project's sources. + link = project / header + + # Compute a relative path from the symlink's parent directory to the actual header file. + relative_source = os.path.relpath( + GIT_TOPLEVEL / file, GIT_TOPLEVEL / link.parent + ) + + # Create the symbolic link. + full_link_path = GIT_TOPLEVEL / link + full_link_path.parent.mkdir(parents=True, exist_ok=True) + if full_link_path.is_symlink(): + full_link_path.unlink() + full_link_path.symlink_to(relative_source) + links.append(link) + + # Generate .gitignore file + gitignore_path = GIT_TOPLEVEL / ".git" / "info" / "exclude" + gitignore_path.parent.mkdir(parents=True, exist_ok=True) + with gitignore_path.open("w") as gitignore: + gitignore.write("# DO NOT EDIT! Autogenerated\n") + gitignore.write( + "# Symlinks for headers to be next to sources for development\n" + ) + gitignore.write('# Run "maintainers/link-headers" to regenerate\n\n') + gitignore.write('# Run "maintainers/link-headers" to regenerate\n\n') + + for link in links: + gitignore.write(f"/{link}\n") + + +if __name__ == "__main__": + main() diff --git a/nix-meson-build-support/common/meson.build b/nix-meson-build-support/common/meson.build index 5fcf557e70ba..b192c0d03521 100644 --- a/nix-meson-build-support/common/meson.build +++ b/nix-meson-build-support/common/meson.build @@ -65,5 +65,14 @@ endif # Darwin ld doesn't like "X.Y.ZpreABCD+W" nix_soversion = meson.project_version().split('+')[0].split('pre')[0] +cxx = meson.get_compiler('cpp') + +# Clang does not support prelinking on static builds +if cxx.get_id() == 'clang' and get_option('default_library') == 'static' + prelink = false +else + prelink = true +endif + subdir('assert-fail') subdir('asan-options') diff --git a/nix-meson-build-support/default-system-cpu/meson.build b/nix-meson-build-support/default-system-cpu/meson.build index f63b07975b6e..3e872578efca 100644 --- a/nix-meson-build-support/default-system-cpu/meson.build +++ b/nix-meson-build-support/default-system-cpu/meson.build @@ -14,6 +14,6 @@ if (host_machine.cpu_family() in [ 'ppc64', 'ppc' ]) and host_machine.endian() = nix_system_cpu += 'le' elif host_machine.cpu_family() in [ 'mips64', 'mips' ] and host_machine.endian() == 'little' nix_system_cpu += 'el' -elif host_machine.cpu_family() == 'arm' +elif host_machine.cpu_family() in [ 'arm', 'arm64' ] nix_system_cpu = host_machine.cpu() endif diff --git a/packaging/components.nix b/packaging/components.nix index dbf2180e8942..6402e8b7b2f8 100644 --- a/packaging/components.nix +++ b/packaging/components.nix @@ -27,7 +27,7 @@ let pkg-config ; - baseVersion = lib.fileContents ../.version; + baseVersion = lib.fileContents ../.version-determinate; versionSuffix = lib.optionalString (!officialRelease) "pre"; @@ -51,15 +51,6 @@ let exts: userFn: stdenv.mkDerivation (lib.extends (lib.composeManyExtensions exts) userFn); setVersionLayer = finalAttrs: prevAttrs: { - preConfigure = - prevAttrs.preConfigure or "" - + - # Update the repo-global .version file. - # Symlink ./.version points there, but by default only workDir is writable. - '' - chmod u+w ./.version - echo ${finalAttrs.version} > ./.version - ''; }; localSourceLayer = diff --git a/packaging/dependencies.nix b/packaging/dependencies.nix index 7b7ee0ecf4d8..f5e39e6e005f 100644 --- a/packaging/dependencies.nix +++ b/packaging/dependencies.nix @@ -16,9 +16,41 @@ in scope: { inherit stdenv; + libblake3 = + (pkgs.libblake3.override { + inherit stdenv; + # Nixpkgs disables tbb on static + useTBB = !stdenv.hostPlatform.isStatic; + }) + # For some reason that is not clear, it is wanting to use libgcc_eh which is not available. + # Force this to be built with compiler-rt & libunwind over libgcc_eh works. + # Issue: https://github.com/NixOS/nixpkgs/issues/177129 + .overrideAttrs + ( + attrs: + lib.optionalAttrs + ( + stdenv.cc.isClang + && stdenv.hostPlatform.isStatic + && stdenv.cc.libcxx != null + && stdenv.cc.libcxx.isLLVM + ) + { + NIX_CFLAGS_COMPILE = [ + "-rtlib=compiler-rt" + "-unwindlib=libunwind" + ]; + + buildInputs = [ + pkgs.llvmPackages.libunwind + ]; + } + ); + boehmgc = (pkgs.boehmgc.override { enableLargeConfig = true; + inherit stdenv; }).overrideAttrs (attrs: { # Increase the initial mark stack size to avoid stack @@ -27,20 +59,51 @@ scope: { # small, run Nix with GC_PRINT_STATS=1 and look for messages # such as `Mark stack overflow`, `No room to copy back mark # stack`, and `Grew mark stack to ... frames`. - NIX_CFLAGS_COMPILE = "-DINITIAL_MARK_STACK_SIZE=1048576"; + NIX_CFLAGS_COMPILE = [ + "-DINITIAL_MARK_STACK_SIZE=1048576" + ] + # For some reason that is not clear, it is wanting to use libgcc_eh which is not available. + # Force this to be built with compiler-rt & libunwind over libgcc_eh works. + # Issue: https://github.com/NixOS/nixpkgs/issues/177129 + ++ + lib.optionals + ( + stdenv.cc.isClang + && stdenv.hostPlatform.isStatic + && stdenv.cc.libcxx != null + && stdenv.cc.libcxx.isLLVM + ) + [ + "-rtlib=compiler-rt" + "-unwindlib=libunwind" + ]; + + buildInputs = + (attrs.buildInputs or [ ]) + ++ lib.optional ( + stdenv.cc.isClang + && stdenv.hostPlatform.isStatic + && stdenv.cc.libcxx != null + && stdenv.cc.libcxx.isLLVM + ) pkgs.llvmPackages.libunwind; }); - lowdown = pkgs.lowdown.overrideAttrs (prevAttrs: rec { - version = "2.0.2"; - src = pkgs.fetchurl { - url = "https://kristaps.bsd.lv/lowdown/snapshots/lowdown-${version}.tar.gz"; - hash = "sha512-cfzhuF4EnGmLJf5EGSIbWqJItY3npbRSALm+GarZ7SMU7Hr1xw0gtBFMpOdi5PBar4TgtvbnG4oRPh+COINGlA=="; - }; - nativeBuildInputs = prevAttrs.nativeBuildInputs ++ [ pkgs.buildPackages.bmake ]; - postInstall = - lib.replaceStrings [ "lowdown.so.1" "lowdown.1.dylib" ] [ "lowdown.so.2" "lowdown.2.dylib" ] - (prevAttrs.postInstall or ""); - }); + lowdown = + if lib.versionAtLeast pkgs.lowdown.version "2.0.2" then + pkgs.lowdown + else + pkgs.lowdown.overrideAttrs (prevAttrs: rec { + version = "2.0.2"; + src = pkgs.fetchurl { + url = "https://kristaps.bsd.lv/lowdown/snapshots/lowdown-${version}.tar.gz"; + hash = "sha512-cfzhuF4EnGmLJf5EGSIbWqJItY3npbRSALm+GarZ7SMU7Hr1xw0gtBFMpOdi5PBar4TgtvbnG4oRPh+COINGlA=="; + }; + patches = [ ]; + nativeBuildInputs = prevAttrs.nativeBuildInputs ++ [ pkgs.buildPackages.bmake ]; + postInstall = + lib.replaceStrings [ "lowdown.so.1" "lowdown.1.dylib" ] [ "lowdown.so.2" "lowdown.2.dylib" ] + (prevAttrs.postInstall or ""); + }); # TODO: Remove this when https://github.com/NixOS/nixpkgs/pull/442682 is included in a stable release toml11 = @@ -66,12 +129,22 @@ scope: { "--with-coroutine" "--with-iostreams" "--with-url" + "--with-thread" ]; enableIcu = false; + inherit stdenv; }).overrideAttrs (old: { # Need to remove `--with-*` to use `--with-libraries=...` buildPhase = lib.replaceStrings [ "--without-python" ] [ "" ] old.buildPhase; installPhase = lib.replaceStrings [ "--without-python" ] [ "" ] old.installPhase; }); + + wasmtime = pkgs.callPackage ./wasmtime.nix { }; + + curl = pkgs.curl.override { + # libpsl uses a data file needed at runtime, not useful for nix. + pslSupport = !stdenv.hostPlatform.isStatic; + idnSupport = !stdenv.hostPlatform.isStatic; + }; } diff --git a/packaging/dev-shell.nix b/packaging/dev-shell.nix index eecfa2ea84c0..8f963f961fb1 100644 --- a/packaging/dev-shell.nix +++ b/packaging/dev-shell.nix @@ -215,7 +215,7 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( }; # Remove the version suffix to avoid unnecessary attempts to substitute in nix develop - version = lib.fileContents ../.version; + version = lib.fileContents ../.version-determinate; name = finalAttrs.pname; installFlags = "sysconfdir=$(out)/etc"; @@ -259,10 +259,13 @@ pkgs.nixComponents2.nix-util.overrideAttrs ( # We use this shell with the local checkout, not unpackPhase. src = null; + # Workaround https://sourceware.org/pipermail/gdb-patches/2025-October/221398.html # Remove when gdb fix is rolled out everywhere. separateDebugInfo = false; + mesonBuildType = "debugoptimized"; + env = { # For `make format`, to work without installing pre-commit _NIX_PRE_COMMIT_HOOKS_CONFIG = "${(pkgs.formats.yaml { }).generate "pre-commit-config.yaml" diff --git a/packaging/everything.nix b/packaging/everything.nix index f6bdad4907b6..3206b8ba4235 100644 --- a/packaging/everything.nix +++ b/packaging/everything.nix @@ -75,7 +75,7 @@ let }; devdoc = buildEnv { - name = "nix-${nix-cli.version}-devdoc"; + name = "determinate-nix-${nix-cli.version}-devdoc"; paths = [ nix-internal-api-docs nix-external-api-docs @@ -84,7 +84,7 @@ let in stdenv.mkDerivation (finalAttrs: { - pname = "nix"; + pname = "determinate-nix"; version = nix-cli.version; /** diff --git a/packaging/hydra.nix b/packaging/hydra.nix index 3a31314f709e..9839dd621639 100644 --- a/packaging/hydra.nix +++ b/packaging/hydra.nix @@ -122,77 +122,6 @@ rec { system: self.devShells.${system}.default.inputDerivation )) [ "i686-linux" ]; - buildStatic = forAllPackages ( - pkgName: - lib.genAttrs linux64BitSystems ( - system: nixpkgsFor.${system}.native.pkgsStatic.nixComponents2.${pkgName} - ) - ); - - buildCross = forAllPackages ( - pkgName: - # Hack to avoid non-evaling package - ( - if pkgName == "nix-functional-tests" then - lib.flip builtins.removeAttrs [ "x86_64-w64-mingw32" ] - else - lib.id - ) - ( - forAllCrossSystems ( - crossSystem: - lib.genAttrs [ "x86_64-linux" ] ( - system: nixpkgsFor.${system}.cross.${crossSystem}.nixComponents2.${pkgName} - ) - ) - ) - ); - - # Builds with sanitizers already have GC disabled, so this buildNoGc can just - # point to buildWithSanitizers in order to reduce the load on hydra. - buildNoGc = buildWithSanitizers; - - buildWithSanitizers = - let - components = forAllSystems ( - system: - let - pkgs = nixpkgsFor.${system}.native; - in - pkgs.nixComponents2.overrideScope ( - self: super: { - # Boost coroutines fail with ASAN on darwin. - withASan = !pkgs.stdenv.buildPlatform.isDarwin; - withUBSan = true; - nix-expr = super.nix-expr.override { enableGC = false; }; - # Unclear how to make Perl bindings work with a dynamically linked ASAN. - nix-perl-bindings = null; - } - ) - ); - in - forAllPackages (pkgName: forAllSystems (system: components.${system}.${pkgName})); - - buildNoTests = forAllSystems (system: nixpkgsFor.${system}.native.nixComponents2.nix-cli); - - # Toggles some settings for better coverage. Windows needs these - # library combinations, and Debian build Nix with GNU readline too. - buildReadlineNoMarkdown = - let - components = forAllSystems ( - system: - nixpkgsFor.${system}.native.nixComponents2.overrideScope ( - self: super: { - nix-cmd = super.nix-cmd.override { - enableMarkdown = false; - readlineFlavor = "readline"; - }; - } - ) - ); - in - forAllPackages (pkgName: forAllSystems (system: components.${system}.${pkgName})); - # Perl bindings for various platforms. perlBindings = forAllSystems (system: nixpkgsFor.${system}.native.nixComponents2.nix-perl-bindings); @@ -203,30 +132,6 @@ rec { system: nixpkgsFor.${system}.native.callPackage ./binary-tarball.nix { } ); - binaryTarballCross = lib.genAttrs [ "x86_64-linux" ] ( - system: - forAllCrossSystems ( - crossSystem: nixpkgsFor.${system}.cross.${crossSystem}.callPackage ./binary-tarball.nix { } - ) - ); - - # The first half of the installation script. This is uploaded - # to https://nixos.org/nix/install. It downloads the binary - # tarball for the user's system and calls the second half of the - # installation script. - installerScript = installScriptFor [ - # Native - self.hydraJobs.binaryTarball."x86_64-linux" - self.hydraJobs.binaryTarball."i686-linux" - self.hydraJobs.binaryTarball."aarch64-linux" - self.hydraJobs.binaryTarball."x86_64-darwin" - self.hydraJobs.binaryTarball."aarch64-darwin" - # Cross - self.hydraJobs.binaryTarballCross."x86_64-linux"."armv6l-unknown-linux-gnueabihf" - self.hydraJobs.binaryTarballCross."x86_64-linux"."armv7l-unknown-linux-gnueabihf" - self.hydraJobs.binaryTarballCross."x86_64-linux"."riscv64-unknown-linux-gnu" - ]; - installerScriptForGHA = forAllSystems ( system: nixpkgsFor.${system}.native.callPackage ./installer { @@ -294,6 +199,19 @@ rec { pkgs = nixpkgsFor.${system}.native; } ); + + nixpkgsLibTestsLazy = forAllSystems ( + system: + lib.overrideDerivation + (import (nixpkgs + "/lib/tests/test-with-nix.nix") { + lib = nixpkgsFor.${system}.native.lib; + nix = self.packages.${system}.nix-cli; + pkgs = nixpkgsFor.${system}.native; + }) + (_: { + "NIX_CONFIG" = "lazy-trees = true"; + }) + ); }; metrics.nixpkgs = import "${nixpkgs-regression}/pkgs/top-level/metrics.nix" { @@ -308,17 +226,12 @@ rec { in pkgs.runCommand "install-tests" { againstSelf = testNixVersions pkgs pkgs.nix; - againstCurrentLatest = - # FIXME: temporarily disable this on macOS because of #3605. - if system == "x86_64-linux" then testNixVersions pkgs pkgs.nixVersions.latest else null; + #againstCurrentLatest = + # # FIXME: temporarily disable this on macOS because of #3605. + # if system == "x86_64-linux" then testNixVersions pkgs pkgs.nixVersions.latest else null; # Disabled because the latest stable version doesn't handle # `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work # againstLatestStable = testNixVersions pkgs pkgs.nixStable; } "touch $out" ); - - installerTests = import ../tests/installer { - binaryTarballs = self.hydraJobs.binaryTarball; - inherit nixpkgsFor; - }; } diff --git a/packaging/installer/default.nix b/packaging/installer/default.nix index e171f36f99f7..a8e344b496c8 100644 --- a/packaging/installer/default.nix +++ b/packaging/installer/default.nix @@ -32,7 +32,7 @@ runCommand "installer-script" in '' \ - --replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${tarball}/*.tar.xz) \ + --replace '@tarballHash_${system}@' $(nix hash-file --base16 --type sha256 ${tarball}/*.tar.xz) \ --replace '@tarballPath_${system}@' $(tarballPath ${tarball}/*.tar.xz) \ '' ) tarballs diff --git a/packaging/wasmtime.nix b/packaging/wasmtime.nix new file mode 100644 index 000000000000..3e8b71280c07 --- /dev/null +++ b/packaging/wasmtime.nix @@ -0,0 +1,74 @@ +# Stripped-down version of https://github.com/NixOS/nixpkgs/blob/master/pkgs/by-name/wa/wasmtime/package.nix, +# license: https://github.com/NixOS/nixpkgs/blob/master/COPYING +{ + lib, + stdenv, + rust_1_89, + fetchFromGitHub, + cmake, + enableShared ? !stdenv.hostPlatform.isStatic, + enableStatic ? stdenv.hostPlatform.isStatic, +}: +rust_1_89.packages.stable.rustPlatform.buildRustPackage (finalAttrs: { + pname = "wasmtime"; + version = "40.0.2"; + + src = fetchFromGitHub { + owner = "bytecodealliance"; + repo = "wasmtime"; + tag = "v${finalAttrs.version}"; + hash = "sha256-4y9WpCdyuF/Tp2k/1d5rZxwYunWNdeibEsFgHcBC52Q="; + fetchSubmodules = true; + }; + + # Disable cargo-auditable until https://github.com/rust-secure-code/cargo-auditable/issues/124 is solved. + auditable = false; + + cargoHash = "sha256-aTPgnuBvOIqg1+Sa2ZLdMTLujm8dKGK5xpZ3qHpr3f8="; + cargoBuildFlags = [ + "--package" + "wasmtime-c-api" + "--no-default-features" + "--features cranelift,wasi,pooling-allocator,wat,demangle,gc-null" + ]; + + outputs = [ + "out" + "lib" + ]; + + nativeBuildInputs = [ + cmake + ]; + + doCheck = + with stdenv.buildPlatform; + # SIMD tests are only executed on platforms that support all + # required processor features (e.g. SSE3, SSSE3 and SSE4.1 on x86_64): + # https://github.com/bytecodealliance/wasmtime/blob/v9.0.0/cranelift/codegen/src/isa/x64/mod.rs#L220 + (isx86_64 -> sse3Support && ssse3Support && sse4_1Support) + && + # The dependency `wasi-preview1-component-adapter` fails to build because of: + # error: linker `rust-lld` not found + !isAarch64; + + postInstall = + let + inherit (stdenv.hostPlatform.rust) cargoShortTarget; + in + '' + moveToOutput lib $lib + ${lib.optionalString (!enableShared) "rm -f $lib/lib/*.so{,.*}"} + ${lib.optionalString (!enableStatic) "rm -f $lib/lib/*.a"} + + # copy the build.rs generated c-api headers + # https://github.com/rust-lang/cargo/issues/9661 + mkdir -p $out + cp -r target/${cargoShortTarget}/release/build/wasmtime-c-api-impl-*/out/include $out/include + '' + + lib.optionalString stdenv.hostPlatform.isDarwin '' + install_name_tool -id \ + $lib/lib/libwasmtime.dylib \ + $lib/lib/libwasmtime.dylib + ''; +}) diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh index 5ff760a6143f..683beca10fde 100644 --- a/scripts/install-multi-user.sh +++ b/scripts/install-multi-user.sh @@ -53,8 +53,8 @@ readonly PROFILE_NIX_FILE_FISH="$NIX_ROOT/var/nix/profiles/default/etc/profile.d readonly NIX_INSTALLED_NIX="@nix@" readonly NIX_INSTALLED_CACERT="@cacert@" -#readonly NIX_INSTALLED_NIX="/nix/store/j8dbv5w6jl34caywh2ygdy88knx1mdf7-nix-2.3.6" -#readonly NIX_INSTALLED_CACERT="/nix/store/7dxhzymvy330i28ii676fl1pqwcahv2f-nss-cacert-3.49.2" +#readonly NIX_INSTALLED_NIX="/nix/store/byi37zv50wnfrpp4d81z3spswd5zva37-nix-2.3.6" +#readonly NIX_INSTALLED_CACERT="/nix/store/7pi45g541xa8ahwgpbpy7ggsl0xj1jj6-nss-cacert-3.49.2" EXTRACTED_NIX_PATH="$(dirname "$0")" readonly EXTRACTED_NIX_PATH diff --git a/shell.nix b/shell.nix deleted file mode 100644 index 918f4bbd9e9e..000000000000 --- a/shell.nix +++ /dev/null @@ -1,3 +0,0 @@ -(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") { - src = ./.; -}).shellNix diff --git a/src/external-api-docs/package.nix b/src/external-api-docs/package.nix index b194e16d4608..28cde8c09e69 100644 --- a/src/external-api-docs/package.nix +++ b/src/external-api-docs/package.nix @@ -14,7 +14,7 @@ let in mkMesonDerivation (finalAttrs: { - pname = "nix-external-api-docs"; + pname = "determinate-nix-external-api-docs"; inherit version; workDir = ./.; diff --git a/src/internal-api-docs/package.nix b/src/internal-api-docs/package.nix index 6c4f354aee5c..636c19653eab 100644 --- a/src/internal-api-docs/package.nix +++ b/src/internal-api-docs/package.nix @@ -14,7 +14,7 @@ let in mkMesonDerivation (finalAttrs: { - pname = "nix-internal-api-docs"; + pname = "determinate-nix-internal-api-docs"; inherit version; workDir = ./.; diff --git a/src/libcmd/builtin-flake-schemas.nix b/src/libcmd/builtin-flake-schemas.nix new file mode 100644 index 000000000000..f326b413c9e6 --- /dev/null +++ b/src/libcmd/builtin-flake-schemas.nix @@ -0,0 +1,438 @@ +{ + description = "Schemas for well-known Nix flake output types"; + + outputs = + { self }: + let + mapAttrsToList = f: attrs: map (name: f name attrs.${name}) (builtins.attrNames attrs); + + checkModule = module: builtins.isAttrs module || builtins.isFunction module; + + schemasSchema = { + version = 1; + doc = '' + The `schemas` flake output is used to define and document flake outputs. + For the expected format, consult the Nix manual. + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (schemaName: schemaDef: { + shortDescription = "A schema checker for the `${schemaName}` flake output"; + evalChecks.isValidSchema = + schemaDef.version or 0 == 1 + && schemaDef ? doc + && builtins.isString (schemaDef.doc) + && schemaDef ? inventory + && builtins.isFunction (schemaDef.inventory); + what = "flake schema"; + }) output + ); + }; + + appsSchema = { + version = 1; + doc = '' + The `apps` output provides commands available via `nix run`. + ''; + roles.nix-run = { }; + appendSystem = true; + defaultAttrPath = [ "default" ]; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs ( + system: apps: + let + forSystems = [ system ]; + in + { + inherit forSystems; + children = builtins.mapAttrs (appName: app: { + inherit forSystems; + evalChecks.isValidApp = + app ? type + && app.type == "app" + && app ? program + && builtins.isString app.program + && + builtins.removeAttrs app [ + "type" + "program" + "meta" + ] == { }; + what = "app"; + }) apps; + } + ) output + ); + }; + + packagesSchema = { + version = 1; + doc = '' + The `packages` flake output contains packages that can be added to a shell using `nix shell`. + ''; + roles.nix-build = { }; + roles.nix-run = { }; + roles.nix-develop = { }; + roles.nix-search = { }; + appendSystem = true; + defaultAttrPath = [ "default" ]; + inventory = self.lib.derivationsInventory "package" false; + }; + + dockerImagesSchema = { + version = 1; + doc = '' + The `dockerImages` flake output contains derivations that build valid Docker images. + ''; + inventory = self.lib.derivationsInventory "Docker image" false; + }; + + legacyPackagesSchema = { + version = 1; + doc = '' + The `legacyPackages` flake output is similar to `packages` but different in that it can be nested and thus contain attribute sets that contain more packages. + Since enumerating packages in nested attribute sets can be inefficient, you should favor `packages` over `legacyPackages`. + ''; + roles.nix-build = { }; + roles.nix-run = { }; + roles.nix-search = { }; + appendSystem = true; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (systemType: packagesForSystem: { + forSystems = [ systemType ]; + children = + let + recurse = + prefix: attrs: + builtins.mapAttrs ( + attrName: attrs: + # Necessary to deal with `AAAAAASomeThingsFailToEvaluate` etc. in Nixpkgs. + self.lib.try ( + if attrs.type or null == "derivation" then + { + forSystems = [ attrs.system ]; + shortDescription = attrs.meta.description or ""; + derivationAttrPath = [ ]; + what = "package"; + } + else + # Recurse at the first and second levels, or if the + # recurseForDerivations attribute if set. + if attrs.recurseForDerivations or false then + { + children = recurse (prefix + attrName + ".") attrs; + } + else + { + what = "unknown"; + } + ) (throw "failed") + ) attrs; + in + # The top-level cannot be a derivation. + assert packagesForSystem.type or null != "derivation"; + recurse (systemType + ".") packagesForSystem; + }) output + ); + }; + + checksSchema = { + version = 1; + doc = '' + The `checks` flake output contains derivations that will be built by `nix flake check`. + ''; + # FIXME: add role + inventory = self.lib.derivationsInventory "CI test" true; + }; + + devShellsSchema = { + version = 1; + doc = '' + The `devShells` flake output contains derivations that provide a development environment for `nix develop`. + ''; + roles.nix-develop = { }; + appendSystem = true; + defaultAttrPath = [ "default" ]; + inventory = self.lib.derivationsInventory "development environment" false; + }; + + formatterSchema = { + version = 1; + doc = '' + The `formatter` output specifies the package to use to format the project. + ''; + roles.nix-fmt = { }; + appendSystem = true; + defaultAttrPath = [ ]; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (system: formatter: { + forSystems = [ system ]; + shortDescription = formatter.meta.description or ""; + derivationAttrPath = [ ]; + what = "formatter"; + isFlakeCheck = false; + }) output + ); + }; + + templatesSchema = { + version = 1; + doc = '' + The `templates` output provides project templates. + ''; + roles.nix-template = { }; + defaultAttrPath = [ "default" ]; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (templateName: template: { + shortDescription = template.description or ""; + evalChecks.isValidTemplate = + template ? path + && builtins.isPath template.path + && template ? description + && builtins.isString template.description; + what = "template"; + }) output + ); + }; + + hydraJobsSchema = { + version = 1; + doc = '' + The `hydraJobs` flake output defines derivations to be built by the Hydra continuous integration system. + ''; + allowIFD = false; + inventory = + output: + let + recurse = + prefix: attrs: + self.lib.mkChildren ( + builtins.mapAttrs ( + attrName: attrs: + if attrs.type or null == "derivation" then + { + forSystems = [ attrs.system ]; + shortDescription = attrs.meta.description or ""; + derivationAttrPath = [ ]; + what = "Hydra CI test"; + } + else + recurse (prefix + attrName + ".") attrs + ) attrs + ); + in + # The top-level cannot be a derivation. + assert output.type or null != "derivation"; + recurse "" output; + }; + + overlaysSchema = { + version = 1; + doc = '' + The `overlays` flake output defines ["overlays"](https://nixos.org/manual/nixpkgs/stable/#chap-overlays) that can be plugged into Nixpkgs. + Overlays add additional packages or modify or replace existing packages. + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (overlayName: overlay: { + what = "Nixpkgs overlay"; + evalChecks.isOverlay = + # FIXME: should try to apply the overlay to an actual + # Nixpkgs. But we don't have access to a nixpkgs + # flake here. Maybe this schema should be moved to the + # nixpkgs flake, where it does have access. + if !builtins.isFunction overlay then + throw "overlay is not a function, but a set instead" + else + builtins.isAttrs (overlay { } { }); + }) output + ); + }; + + nixosConfigurationsSchema = { + version = 1; + doc = '' + The `nixosConfigurations` flake output defines [NixOS system configurations](https://nixos.org/manual/nixos/stable/#ch-configuration). + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (configName: machine: { + what = "NixOS configuration"; + derivationAttrPath = [ + "config" + "system" + "build" + "toplevel" + ]; + forSystems = [ machine.pkgs.stdenv.system ]; + }) output + ); + }; + + nixosModulesSchema = { + version = 1; + doc = '' + The `nixosModules` flake output defines importable [NixOS modules](https://nixos.org/manual/nixos/stable/#sec-writing-modules). + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (moduleName: module: { + what = "NixOS module"; + evalChecks.isFunctionOrAttrs = checkModule module; + }) output + ); + }; + + homeConfigurationsSchema = { + version = 1; + doc = '' + The `homeConfigurations` flake output defines [Home Manager configurations](https://github.com/nix-community/home-manager). + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (configName: this: { + what = "Home Manager configuration"; + derivationAttrPath = [ "activationPackage" ]; + forSystems = [ this.activationPackage.system ]; + }) output + ); + }; + + homeModulesSchema = { + version = 1; + doc = '' + The `homeModules` flake output defines importable [Home Manager](https://github.com/nix-community/home-manager) modules. + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (moduleName: module: { + what = "Home Manager module"; + evalChecks.isFunctionOrAttrs = checkModule module; + }) output + ); + }; + + darwinConfigurationsSchema = { + version = 1; + doc = '' + The `darwinConfigurations` flake output defines [nix-darwin configurations](https://github.com/nix-darwin/nix-darwin). + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (configName: this: { + what = "nix-darwin configuration"; + derivationAttrPath = [ "system" ]; + forSystems = [ this.system.system ]; + }) output + ); + }; + + darwinModulesSchema = { + version = 1; + doc = '' + The `darwinModules` flake output defines importable [nix-darwin modules](https://github.com/nix-darwin/nix-darwin). + ''; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs (moduleName: module: { + what = "nix-darwin module"; + evalChecks.isFunctionOrAttrs = checkModule module; + }) output + ); + }; + + bundlersSchema = { + version = 1; + doc = '' + The `bundlers` flake output defines ["bundlers"](https://nix.dev/manual/nix/latest/command-ref/new-cli/nix3-bundle) that transform derivation outputs into other formats, typically self-extracting executables or container images. + ''; + roles.nix-bundler = { }; + appendSystem = true; + defaultAttrPath = [ "default" ]; + inventory = + output: + self.lib.mkChildren ( + builtins.mapAttrs ( + system: bundlers: + let + forSystems = [ system ]; + in + { + inherit forSystems; + children = builtins.mapAttrs (bundlerName: bundler: { + inherit forSystems; + evalChecks.isValidBundler = builtins.isFunction bundler; + what = "bundler"; + }) bundlers; + } + ) output + ); + }; + + in + + { + # Helper functions + lib = { + try = + e: default: + let + res = builtins.tryEval e; + in + if res.success then res.value else default; + + mkChildren = children: { inherit children; }; + + derivationsInventory = + what: isFlakeCheck: output: + self.lib.mkChildren ( + builtins.mapAttrs (systemType: packagesForSystem: { + forSystems = [ systemType ]; + children = builtins.mapAttrs (packageName: package: { + forSystems = [ systemType ]; + shortDescription = package.meta.description or ""; + derivationAttrPath = [ ]; + inherit what; + isFlakeCheck = isFlakeCheck; + }) packagesForSystem; + }) output + ); + }; + + # FIXME: distinguish between available and active schemas? + schemas.schemas = schemasSchema; + schemas.apps = appsSchema; + schemas.packages = packagesSchema; + schemas.legacyPackages = legacyPackagesSchema; + schemas.checks = checksSchema; + schemas.devShells = devShellsSchema; + schemas.formatter = formatterSchema; + schemas.templates = templatesSchema; + schemas.hydraJobs = hydraJobsSchema; + schemas.overlays = overlaysSchema; + schemas.nixosConfigurations = nixosConfigurationsSchema; + schemas.nixosModules = nixosModulesSchema; + schemas.homeConfigurations = homeConfigurationsSchema; + schemas.homeModules = homeModulesSchema; + schemas.darwinConfigurations = darwinConfigurationsSchema; + schemas.darwinModules = darwinModulesSchema; + schemas.dockerImages = dockerImagesSchema; + schemas.bundlers = bundlersSchema; + }; +} diff --git a/src/libcmd/call-flake-schemas.nix b/src/libcmd/call-flake-schemas.nix new file mode 100644 index 000000000000..f1604259aef7 --- /dev/null +++ b/src/libcmd/call-flake-schemas.nix @@ -0,0 +1,38 @@ +# The flake providing default schemas. +defaultSchemasFlake: + +# The flake whose contents we want to extract. +flake: + +let + + # Helper functions. + + mapAttrsToList = f: attrs: map (name: f name attrs.${name}) (builtins.attrNames attrs); + + outputNames = builtins.attrNames flake.outputs; + + schemas = flake.outputs.schemas or defaultSchemasFlake.schemas; + +in + +{ + outputs = flake.outputs; + + inventory = builtins.mapAttrs ( + outputName: _: + if schemas ? ${outputName} && schemas.${outputName}.version == 1 then + schemas.${outputName} + // ( + if flake.outputs ? ${outputName} then + { + output = schemas.${outputName}.inventory flake.outputs.${outputName}; + } + else + { + } + ) + else + { unknown = true; } + ) (schemas // flake.outputs); +} diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc index 798ef072eb14..226b65f4a7c3 100644 --- a/src/libcmd/command.cc +++ b/src/libcmd/command.cc @@ -138,6 +138,13 @@ ref EvalCommand::getEvalStore() ref EvalCommand::getEvalState() { if (!evalState) { + if (startReplOnEvalErrors && evalSettings.evalCores != 1U) { + // Disable parallel eval if the debugger is enabled, since + // they're incompatible at the moment. + warn("using the debugger disables multi-threaded evaluation"); + evalSettings.evalCores = 1; + } + evalState = std::allocate_shared( traceable_allocator(), lookupPath, getEvalStore(), fetchSettings, evalSettings, getStore()); diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc index 30e76b2455d4..865901febf48 100644 --- a/src/libcmd/common-eval-args.cc +++ b/src/libcmd/common-eval-args.cc @@ -19,17 +19,12 @@ namespace nix { -fetchers::Settings fetchSettings; - -static GlobalConfig::Register rFetchSettings(&fetchSettings); - EvalSettings evalSettings{ settings.readOnlyMode, { { "flake", [](EvalState & state, std::string_view rest) { - experimentalFeatureSettings.require(Xp::Flakes); // FIXME `parseFlakeRef` should take a `std::string_view`. auto flakeRef = parseFlakeRef(fetchSettings, std::string{rest}, {}, true, false); debug("fetching flake search path element '%s''", rest); @@ -186,7 +181,6 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s, const std::files } else if (hasPrefix(s, "flake:")) { - experimentalFeatureSettings.require(Xp::Flakes); auto flakeRef = parseFlakeRef(fetchSettings, std::string(s.substr(6)), {}, true, false); auto [accessor, lockedRef] = flakeRef.resolve(fetchSettings, *state.store).lazyFetch(fetchSettings, *state.store); diff --git a/src/libcmd/flake-schemas.cc b/src/libcmd/flake-schemas.cc new file mode 100644 index 000000000000..725326c0fd75 --- /dev/null +++ b/src/libcmd/flake-schemas.cc @@ -0,0 +1,362 @@ +#include "nix/cmd/flake-schemas.hh" +#include "nix/expr/eval-settings.hh" +#include "nix/fetchers/fetch-to-store.hh" +#include "nix/util/memory-source-accessor.hh" +#include "nix/util/mounted-source-accessor.hh" + +namespace nix::flake_schemas { + +using namespace eval_cache; +using namespace flake; + +static LockedFlake getBuiltinDefaultSchemasFlake(EvalState & state) +{ + auto accessor = make_ref(); + + accessor->setPathDisplay("«builtin-flake-schemas»"); + + accessor->addFile( + CanonPath("flake.nix"), +#include "builtin-flake-schemas.nix.gen.hh" + ); + + auto [storePath, narHash] = state.store->computeStorePath("source", {accessor}); + + state.allowPath(storePath); // FIXME: should just whitelist the entire virtual store + + state.storeFS->mount(CanonPath(state.store->printStorePath(storePath)), accessor); + + // Construct a dummy flakeref. + auto flakeRef = parseFlakeRef( + fetchSettings, + fmt("tarball+https://builtin-flake-schemas?narHash=%s", narHash.to_string(HashFormat::SRI, true))); + + auto flake = readFlake(state, flakeRef, flakeRef, flakeRef, state.storePath(storePath), {}); + + return lockFlake(flakeSettings, state, flakeRef, {}, flake); +} + +ref call( + EvalState & state, + std::shared_ptr lockedFlake, + std::optional defaultSchemasFlake, + bool allowEvalCache) +{ + auto fingerprint = lockedFlake->getFingerprint(*state.store, state.fetchSettings); + + std::string callFlakeSchemasNix = +#include "call-flake-schemas.nix.gen.hh" + ; + + auto lockedDefaultSchemasFlake = defaultSchemasFlake + ? flake::lockFlake(flakeSettings, state, *defaultSchemasFlake, {}) + : getBuiltinDefaultSchemasFlake(state); + auto lockedDefaultSchemasFlakeFingerprint = + lockedDefaultSchemasFlake.getFingerprint(*state.store, state.fetchSettings); + + std::optional fingerprint2; + if (fingerprint && lockedDefaultSchemasFlakeFingerprint) + fingerprint2 = hashString( + HashAlgorithm::SHA256, + fmt("app:%s:%s:%s", + hashString(HashAlgorithm::SHA256, callFlakeSchemasNix).to_string(HashFormat::Base16, false), + fingerprint->to_string(HashFormat::Base16, false), + lockedDefaultSchemasFlakeFingerprint->to_string(HashFormat::Base16, false))); + + auto cache = make_ref( + allowEvalCache && evalSettings.useEvalCache && evalSettings.pureEval ? fingerprint2 : std::nullopt, + state, + [&state, lockedFlake, callFlakeSchemasNix, lockedDefaultSchemasFlake]() { + auto vCallFlakeSchemas = state.allocValue(); + state.eval( + state.parseExprFromString(callFlakeSchemasNix, state.rootPath(CanonPath::root)), *vCallFlakeSchemas); + + auto vFlake = state.allocValue(); + flake::callFlake(state, *lockedFlake, *vFlake); + + auto vDefaultSchemasFlake = state.allocValue(); + if (vFlake->type() == nAttrs && vFlake->attrs()->get(state.symbols.create("schemas"))) + vDefaultSchemasFlake->mkNull(); + else + flake::callFlake(state, lockedDefaultSchemasFlake, *vDefaultSchemasFlake); + + auto vRes = state.allocValue(); + Value * args[] = {vDefaultSchemasFlake, vFlake}; + state.callFunction(*vCallFlakeSchemas, args, *vRes, noPos); + + return vRes; + }); + + /* Derive the flake output attribute path from the cursor used to + traverse the inventory. We do this so we don't have to maintain + a separate attrpath for that. */ + cache->cleanupAttrPath = [&](AttrPath && attrPath) { + AttrPath res; + auto i = attrPath.begin(); + if (i == attrPath.end()) + return attrPath; + + if (state.symbols[*i] == "inventory") { + ++i; + if (i != attrPath.end()) { + res.push_back(*i++); // copy output name + if (i != attrPath.end()) + ++i; // skip "outputs" + while (i != attrPath.end()) { + ++i; // skip "children" + if (i != attrPath.end()) + res.push_back(*i++); + } + } + } + + else if (state.symbols[*i] == "outputs") { + res.insert(res.begin(), ++i, attrPath.end()); + } + + else + abort(); + + return res; + }; + + return cache; +} + +void forEachOutput( + ref inventory, + std::function output, const std::string & doc, bool isLast)> f) +{ + auto outputNames = inventory->getAttrs(); + + auto doOutputs = [&](bool allowIFD) { + evalSettings.enableImportFromDerivation.setDefault(allowIFD); + for (const auto & [i, outputName] : enumerate(outputNames)) { + auto outputInfo = inventory->getAttr(outputName); + try { + auto allowIFDAttr = outputInfo->maybeGetAttr("allowIFD"); + if (allowIFD != (!allowIFDAttr || allowIFDAttr->getBool())) + continue; + auto isUnknown = (bool) outputInfo->maybeGetAttr("unknown"); + auto output = outputInfo->maybeGetAttr("output"); + if (!output && !isUnknown) + // We have a schema but no corresponding output, so skip this. + continue; + Activity act(*logger, lvlInfo, actUnknown, fmt("evaluating '%s'", outputInfo->getAttrPathStr())); + f(outputName, + isUnknown ? std::shared_ptr() : output, + isUnknown ? "" : outputInfo->getAttr("doc")->getString(), + i + 1 == outputNames.size()); + } catch (Error & e) { + e.addTrace(nullptr, "while evaluating the flake output '%s':", outputInfo->getAttrPathStr()); + throw; + } + } + }; + + // Do outputs that disallow import-from-derivation first. That way, they can't depend on outputs that do allow it. + doOutputs(false); + doOutputs(true); +} + +void visit( + std::optional system, + ref node, + std::function visitLeaf, + std::function)> visitNonLeaf, + std::function node, const std::vector & systems)> visitFiltered) +{ + Activity act(*logger, lvlInfo, actUnknown, fmt("evaluating '%s'", node->getAttrPathStr())); + + /* Apply the system type filter. */ + if (system) { + if (auto forSystems = Node(node).forSystems()) { + if (std::find(forSystems->begin(), forSystems->end(), *system) == forSystems->end()) { + visitFiltered(node, *forSystems); + return; + } + } + } + + if (auto children = node->maybeGetAttr("children")) { + visitNonLeaf([&](ForEachChild f) { + auto attrNames = children->getAttrs(); + for (const auto & [i, attrName] : enumerate(attrNames)) { + try { + f(attrName, children->getAttr(attrName), i + 1 == attrNames.size()); + } catch (Error & e) { + // FIXME: make it a flake schema attribute whether to ignore evaluation errors. + if (node->root->state.symbols[node->getAttrPath()[0]] != "legacyPackages") { + e.addTrace( + nullptr, "while evaluating the flake output attribute '%s':", node->getAttrPathStr()); + throw; + } + } + } + }); + } + + else + visitLeaf(Leaf(node)); +} + +std::optional> Node::forSystems() const +{ + if (auto forSystems = node->maybeGetAttr("forSystems")) + return forSystems->getListOfStrings(); + else + return std::nullopt; +} + +ref Node::getOutput(const ref & outputs) const +{ + auto res = outputs->findAlongAttrPath(node->getAttrPath()); + if (!res) + throw Error("flake output '%s' should exist according to its schema, but it doesn't", node->getAttrPathStr()); + return *res; +} + +std::optional Leaf::what() const +{ + if (auto what = node->maybeGetAttr("what")) + return what->getString(); + else + return std::nullopt; +} + +std::optional Leaf::shortDescription() const +{ + if (auto what = node->maybeGetAttr("shortDescription")) + return what->getString(); + return std::nullopt; +} + +std::optional Leaf::derivationAttrPath() const +{ + auto n = node->maybeGetAttr("derivationAttrPath"); + if (!n) + return std::nullopt; + return AttrPath::fromStrings(node->root->state, n->getListOfStrings()); +} + +std::shared_ptr Leaf::derivation(const ref & outputs) const +{ + auto path = derivationAttrPath(); + if (!path) { + auto n = node->maybeGetAttr("derivation"); + if (n) + warn( + "Flake output '%s' has a schema that uses the deprecated 'derivation' attribute instead of 'derivationAttrPath'. " + "Please update the schema to use 'derivationAttrPath' instead. " + "You may want to upgrade to version 0.3.0 or higher of https://github.com/DeterminateSystems/flake-schemas.", + node->getAttrPathStr()); + return n; + } + auto drv = getOutput(outputs)->findAlongAttrPath(*path); + if (!drv) + throw Error( + "flake output '%s' does not have a derivation attribute '%s'", + node->getAttrPathStr(), + path->to_string(node->root->state)); + return *drv; +} + +bool Leaf::isFlakeCheck() const +{ + auto isFlakeCheck = node->maybeGetAttr("isFlakeCheck"); + return isFlakeCheck && isFlakeCheck->getBool(); +} + +std::optional getOutputInfo(ref inventory, AttrPath attrPath) +{ + if (attrPath.empty()) + return std::nullopt; + + auto outputName = attrPath.front(); + + auto schemaInfo = inventory->maybeGetAttr(outputName); + if (!schemaInfo) + return std::nullopt; + + auto node = schemaInfo->maybeGetAttr("output"); + if (!node) + return std::nullopt; + + auto pathLeft = std::span(attrPath).subspan(1); + + while (!pathLeft.empty()) { + auto children = node->maybeGetAttr("children"); + if (!children) + break; + auto attr = pathLeft.front(); + node = children->maybeGetAttr(attr); + if (!node) + return std::nullopt; + pathLeft = pathLeft.subspan(1); + } + + return OutputInfo{ + .schemaInfo = ref(schemaInfo), + .nodeInfo = ref(node), + .leafAttrPath = AttrPath(pathLeft.begin(), pathLeft.end()), + }; +} + +Schemas getSchemas(ref inventory) +{ + auto & state(inventory->root->state); + + Schemas schemas; + + for (auto & schemaName : inventory->getAttrs()) { + auto schema = inventory->getAttr(schemaName); + + SchemaInfo schemaInfo; + + if (auto roles = schema->maybeGetAttr("roles")) { + for (auto & roleName : roles->getAttrs()) { + schemaInfo.roles.insert(std::string(state.symbols[roleName])); + } + } + + if (auto appendSystem = schema->maybeGetAttr("appendSystem")) + schemaInfo.appendSystem = appendSystem->getBool(); + + if (auto defaultAttrPath = schema->maybeGetAttr("defaultAttrPath")) { + AttrPath attrPath; + for (auto & s : defaultAttrPath->getListOfStrings()) + attrPath.push_back(state.symbols.create(s)); + schemaInfo.defaultAttrPath = std::move(attrPath); + } + + schemas.insert_or_assign(std::string(state.symbols[schemaName]), std::move(schemaInfo)); + } + + return schemas; +} + +} // namespace nix::flake_schemas + +namespace nix { + +MixFlakeSchemas::MixFlakeSchemas() +{ + addFlag( + {.longName = "default-flake-schemas", + .description = "The URL of the flake providing default flake schema definitions.", + .labels = {"flake-ref"}, + .handler = {&defaultFlakeSchemas}, + .completer = {[&](AddCompletions & completions, size_t, std::string_view prefix) { + completeFlakeRef(completions, getStore(), prefix); + }}}); +} + +std::optional MixFlakeSchemas::getDefaultFlakeSchemas() +{ + if (!defaultFlakeSchemas) + return std::nullopt; + else + return parseFlakeRef(fetchSettings, *defaultFlakeSchemas, absPath(getCommandBaseDir())); +} + +} // namespace nix diff --git a/src/libcmd/include/nix/cmd/command.hh b/src/libcmd/include/nix/cmd/command.hh index d1b528e2477c..ec2e0d9add4c 100644 --- a/src/libcmd/include/nix/cmd/command.hh +++ b/src/libcmd/include/nix/cmd/command.hh @@ -132,7 +132,16 @@ struct MixFlakeOptions : virtual Args, EvalCommand } }; -struct SourceExprCommand : virtual Args, MixFlakeOptions +struct MixFlakeSchemas : virtual Args, virtual StoreCommand +{ + std::optional defaultFlakeSchemas; + + MixFlakeSchemas(); + + std::optional getDefaultFlakeSchemas(); +}; + +struct SourceExprCommand : virtual Args, MixFlakeOptions, MixFlakeSchemas { std::optional file; std::optional expr; @@ -143,9 +152,13 @@ struct SourceExprCommand : virtual Args, MixFlakeOptions ref parseInstallable(ref store, const std::string & installable); - virtual Strings getDefaultFlakeAttrPaths(); - - virtual Strings getDefaultFlakeAttrPathPrefixes(); + /** + * Return a set of "roles" that this command implements + * (e.g. `nix-build` or `nix-develop`). This is used by flake + * schemas to determine which flake outputs are used as default + * attrpath prefixes. + */ + virtual StringSet getRoles(); /** * Complete an installable from the given prefix. @@ -214,6 +227,8 @@ struct InstallableCommand : virtual Args, SourceExprCommand { InstallableCommand(); + virtual void preRun(ref store); + virtual void run(ref store, ref installable) = 0; void run(ref store) override; @@ -372,8 +387,7 @@ void completeFlakeRefWithFragment( AddCompletions & completions, ref evalState, flake::LockFlags lockFlags, - Strings attrPathPrefixes, - const Strings & defaultFlakeAttrPaths, + const StringSet & roles, std::string_view prefix); std::string showVersions(const StringSet & versions); diff --git a/src/libcmd/include/nix/cmd/common-eval-args.hh b/src/libcmd/include/nix/cmd/common-eval-args.hh index 67cb0714827f..4f9ebb83df53 100644 --- a/src/libcmd/include/nix/cmd/common-eval-args.hh +++ b/src/libcmd/include/nix/cmd/common-eval-args.hh @@ -25,9 +25,6 @@ namespace flake { struct Settings; } -/** - * @todo Get rid of global settings variables - */ extern fetchers::Settings fetchSettings; /** diff --git a/src/libcmd/include/nix/cmd/flake-schemas.hh b/src/libcmd/include/nix/cmd/flake-schemas.hh new file mode 100644 index 000000000000..c1ceefdc35aa --- /dev/null +++ b/src/libcmd/include/nix/cmd/flake-schemas.hh @@ -0,0 +1,93 @@ +#pragma once + +#include "nix/expr/eval-cache.hh" +#include "nix/flake/flake.hh" +#include "nix/cmd/command.hh" + +namespace nix::flake_schemas { + +using namespace eval_cache; + +ref call( + EvalState & state, + std::shared_ptr lockedFlake, + std::optional defaultSchemasFlake, + bool allowEvalCache = true); + +void forEachOutput( + ref inventory, + std::function output, const std::string & doc, bool isLast)> f); + +/** + * A convenience wrapper around `AttrCursor` for nodes in the `inventory` tree returned by call-flake-schemas.nix. + */ +struct Node +{ + const ref node; + + Node(const ref & node) + : node(node) + { + } + + /** + * Return the `forSystems` attribute. This can be null, which + * means "all systems". + */ + std::optional> forSystems() const; + + /** + * Return the actual output corresponding to this info node. + */ + ref getOutput(const ref & outputs) const; +}; + +struct Leaf : Node +{ + using Node::Node; + + std::optional what() const; + + std::optional shortDescription() const; + + std::optional derivationAttrPath() const; + + /** + * Return the attribute corresponding to `derivationAttrPath`, if set. + */ + std::shared_ptr derivation(const ref & outputs) const; + + bool isFlakeCheck() const; +}; + +typedef std::function attr, bool isLast)> ForEachChild; + +void visit( + std::optional system, + ref node, + std::function visitLeaf, + std::function)> visitNonLeaf, + std::function node, const std::vector & systems)> visitFiltered); + +struct OutputInfo +{ + ref schemaInfo; + ref nodeInfo; + AttrPath leafAttrPath; +}; + +std::optional getOutputInfo(ref inventory, AttrPath attrPath); + +struct SchemaInfo +{ + std::string doc; + StringSet roles; + bool appendSystem = false; + std::optional defaultAttrPath; +}; + +using Schemas = std::map; + +Schemas getSchemas(ref root); + +} // namespace nix::flake_schemas diff --git a/src/libcmd/include/nix/cmd/installable-flake.hh b/src/libcmd/include/nix/cmd/installable-flake.hh index 9f449ad48f2e..3acce913dcb7 100644 --- a/src/libcmd/include/nix/cmd/installable-flake.hh +++ b/src/libcmd/include/nix/cmd/installable-flake.hh @@ -36,11 +36,14 @@ struct ExtraPathInfoFlake : ExtraPathInfoValue struct InstallableFlake : InstallableValue { FlakeRef flakeRef; - Strings attrPaths; - Strings prefixes; + std::string fragment; + AttrPath parsedFragment; + StringSet roles; ExtendedOutputsSpec extendedOutputsSpec; const flake::LockFlags & lockFlags; mutable std::shared_ptr _lockedFlake; + bool useEvalCache = true; + std::optional defaultFlakeSchemas; InstallableFlake( SourceExprCommand * cmd, @@ -48,17 +51,15 @@ struct InstallableFlake : InstallableValue FlakeRef && flakeRef, std::string_view fragment, ExtendedOutputsSpec extendedOutputsSpec, - Strings attrPaths, - Strings prefixes, - const flake::LockFlags & lockFlags); + StringSet roles, + const flake::LockFlags & lockFlags, + std::optional defaultFlakeSchemas); std::string what() const override { - return flakeRef.to_string() + "#" + *attrPaths.begin(); + return flakeRef.to_string() + "#" + fragment; } - std::vector getActualAttrPaths(); - DerivedPathsWithInfo toDerivedPaths() override; std::pair toValue(EvalState & state) override; @@ -67,11 +68,23 @@ struct InstallableFlake : InstallableValue * Get a cursor to every attrpath in getActualAttrPaths() that * exists. However if none exists, throw an exception. */ - std::vector> getCursors(EvalState & state) override; + std::vector> getCursors(EvalState & state, bool useDefaultAttrPath) override; + + void getCompletions(const std::string & flakeRefS, AddCompletions & completions); ref getLockedFlake() const; FlakeRef nixpkgsFlakeRef() const; + + std::shared_ptr makeProvenance(std::string_view attrPath) const; + + ref openEvalCache() const; + +private: + + mutable std::shared_ptr _evalCache; + + std::vector getAttrPaths(bool useDefaultAttrPath, ref inventory); }; /** diff --git a/src/libcmd/include/nix/cmd/installable-value.hh b/src/libcmd/include/nix/cmd/installable-value.hh index 27a1fb9815d4..09178c96c972 100644 --- a/src/libcmd/include/nix/cmd/installable-value.hh +++ b/src/libcmd/include/nix/cmd/installable-value.hh @@ -93,7 +93,7 @@ struct InstallableValue : Installable * However if none exists, throw exception instead of returning * empty vector. */ - virtual std::vector> getCursors(EvalState & state); + virtual std::vector> getCursors(EvalState & state, bool useDefaultAttrPath = true); /** * Get the first and most preferred cursor this Installable could diff --git a/src/libcmd/include/nix/cmd/installables.hh b/src/libcmd/include/nix/cmd/installables.hh index 530334e037b7..2ea35261c7fa 100644 --- a/src/libcmd/include/nix/cmd/installables.hh +++ b/src/libcmd/include/nix/cmd/installables.hh @@ -96,6 +96,22 @@ typedef std::vector DerivedPathsWithInfo; struct Installable; +struct InstallableWithBuildResult +{ + ref installable; + + using Success = BuiltPathWithResult; + + using Failure = BuildResult; // must be a `BuildResult::Failure` + + std::variant result; + + /** + * Throw an exception if this represents a failure, otherwise returns a `BuiltPathWithResult`. + */ + const BuiltPathWithResult & getSuccess() const; +}; + /** * Shorthand, for less typing and helping us keep the choice of * collection in sync. @@ -160,13 +176,15 @@ struct Installable const Installables & installables, BuildMode bMode = bmNormal); - static std::vector, BuiltPathWithResult>> build2( + static std::vector build2( ref evalStore, ref store, Realise mode, const Installables & installables, BuildMode bMode = bmNormal); + static void throwBuildErrors(std::vector & buildResults, const Store & store); + static std::set toStorePathSet( ref evalStore, ref store, Realise mode, OperateOn operateOn, const Installables & installables); diff --git a/src/libcmd/include/nix/cmd/meson.build b/src/libcmd/include/nix/cmd/meson.build index 119d0814b9f1..7ab3e596ae40 100644 --- a/src/libcmd/include/nix/cmd/meson.build +++ b/src/libcmd/include/nix/cmd/meson.build @@ -9,6 +9,7 @@ headers = files( 'common-eval-args.hh', 'compatibility-settings.hh', 'editor-for.hh', + 'flake-schemas.hh', 'installable-attr-path.hh', 'installable-derived-path.hh', 'installable-flake.hh', diff --git a/src/libcmd/installable-attr-path.cc b/src/libcmd/installable-attr-path.cc index 28c3db3fc79a..3a80aa384de4 100644 --- a/src/libcmd/installable-attr-path.cc +++ b/src/libcmd/installable-attr-path.cc @@ -89,7 +89,8 @@ DerivedPathsWithInfo InstallableAttrPath::toDerivedPaths() } DerivedPathsWithInfo res; - for (auto & [drvPath, outputs] : byDrvPath) + for (auto & [drvPath, outputs] : byDrvPath) { + state->waitForPath(drvPath); res.push_back({ .path = DerivedPath::Built{ @@ -102,6 +103,7 @@ DerivedPathsWithInfo InstallableAttrPath::toDerivedPaths() so we can fill in this info. */ }), }); + } return res; } diff --git a/src/libcmd/installable-flake.cc b/src/libcmd/installable-flake.cc index 11bbdbf8429d..84b6ccfa9d07 100644 --- a/src/libcmd/installable-flake.cc +++ b/src/libcmd/installable-flake.cc @@ -17,6 +17,8 @@ #include "nix/util/url.hh" #include "nix/fetchers/registry.hh" #include "nix/store/build-result.hh" +#include "nix/flake/provenance.hh" +#include "nix/cmd/flake-schemas.hh" #include #include @@ -25,32 +27,14 @@ namespace nix { -std::vector InstallableFlake::getActualAttrPaths() -{ - std::vector res; - if (attrPaths.size() == 1 && attrPaths.front().starts_with(".")) { - attrPaths.front().erase(0, 1); - res.push_back(attrPaths.front()); - return res; - } - - for (auto & prefix : prefixes) - res.push_back(prefix + *attrPaths.begin()); - - for (auto & s : attrPaths) - res.push_back(s); - - return res; -} - -static std::string showAttrPaths(const std::vector & paths) +static std::string showAttrPaths(EvalState & state, const std::vector & paths) { std::string s; for (const auto & [n, i] : enumerate(paths)) { if (n > 0) s += n + 1 == paths.size() ? " or " : ", "; s += '\''; - s += i; + s += i.to_string(state); s += '\''; } return s; @@ -62,15 +46,17 @@ InstallableFlake::InstallableFlake( FlakeRef && flakeRef, std::string_view fragment, ExtendedOutputsSpec extendedOutputsSpec, - Strings attrPaths, - Strings prefixes, - const flake::LockFlags & lockFlags) + StringSet roles, + const flake::LockFlags & lockFlags, + std::optional defaultFlakeSchemas) : InstallableValue(state) , flakeRef(flakeRef) - , attrPaths(fragment == "" ? attrPaths : Strings{(std::string) fragment}) - , prefixes(fragment == "" ? Strings{} : prefixes) + , fragment(fragment) + , parsedFragment(AttrPath::parse(*state, fragment)) + , roles(roles) , extendedOutputsSpec(std::move(extendedOutputsSpec)) , lockFlags(lockFlags) + , defaultFlakeSchemas(defaultFlakeSchemas) { if (cmd && cmd->getAutoArgs(*state)->size()) throw UsageError("'--arg' and '--argstr' are incompatible with flakes"); @@ -84,6 +70,8 @@ DerivedPathsWithInfo InstallableFlake::toDerivedPaths() auto attrPath = attr->getAttrPathStr(); + PushProvenance pushedProvenance(*state, makeProvenance(attrPath)); + if (!attr->isDerivation()) { // FIXME: use eval cache? @@ -102,6 +90,7 @@ DerivedPathsWithInfo InstallableFlake::toDerivedPaths() } auto drvPath = attr->forceDerivation(); + state->waitForPath(drvPath); std::optional priority; @@ -157,34 +146,166 @@ std::pair InstallableFlake::toValue(EvalState & state) return {&getCursor(state)->forceValue(), noPos}; } -std::vector> InstallableFlake::getCursors(EvalState & state) +std::vector InstallableFlake::getAttrPaths(bool useDefaultAttrPath, ref inventory) { - auto evalCache = openEvalCache(state, getLockedFlake()); + if (fragment.starts_with(".")) + return {AttrPath::parse(*state, fragment.substr(1))}; + + std::vector attrPaths; + + auto schemas = flake_schemas::getSchemas(inventory); + + // FIXME: Ugly hack to preserve the historical precedence + // between outputs. We should add a way for schemas to declare + // priorities. + std::vector schemasSorted; + std::set schemasSeen; + auto doSchema = [&](const std::string & schema) { + if (schemas.contains(schema)) { + schemasSorted.push_back(schema); + schemasSeen.insert(schema); + } + }; + doSchema("apps"); + doSchema("devShells"); + doSchema("packages"); + doSchema("legacyPackages"); + for (auto & schema : schemas) + if (!schemasSeen.contains(schema.first)) + schemasSorted.push_back(schema.first); + + for (auto & role : roles) { + for (auto & schemaName : schemasSorted) { + auto & schema = schemas.find(schemaName)->second; + if (schema.roles.contains(role)) { + AttrPath attrPath{state->symbols.create(schemaName)}; + if (schema.appendSystem) + attrPath.push_back(state->symbols.create(settings.thisSystem.get())); + + if (useDefaultAttrPath && parsedFragment.empty()) { + if (schema.defaultAttrPath) { + auto attrPath2{attrPath}; + for (auto & x : *schema.defaultAttrPath) + attrPath2.push_back(x); + attrPaths.push_back(attrPath2); + } + } else { + auto attrPath2{attrPath}; + for (auto & x : parsedFragment) + attrPath2.push_back(x); + attrPaths.push_back(attrPath2); + } + } + } + } + + if (!parsedFragment.empty()) + attrPaths.push_back(parsedFragment); + + // FIXME: compatibility hack to get `nix repl` to return all + // outputs by default. + if (parsedFragment.empty() && roles.contains("nix-repl")) + attrPaths.push_back({}); + + return attrPaths; +} + +std::vector> InstallableFlake::getCursors(EvalState & state, bool useDefaultAttrPath) +{ + auto cache = openEvalCache(); + + auto inventory = cache->getRoot()->getAttr("inventory"); + auto outputs = cache->getRoot()->getAttr("outputs"); - auto root = evalCache->getRoot(); + auto attrPaths = getAttrPaths(useDefaultAttrPath, inventory); + + if (attrPaths.empty()) + throw Error( + "Flake '%s' does not have any schema that provides a default output for the role(s) %s.", + flakeRef, + concatStringsSep(", ", roles)); std::vector> res; Suggestions suggestions; - auto attrPaths = getActualAttrPaths(); for (auto & attrPath : attrPaths) { - debug("trying flake output attribute '%s'", attrPath); + debug("trying flake output attribute '%s'", attrPath.to_string(state)); + + PushProvenance pushedProvenance(state, makeProvenance(attrPath.to_string(state))); + +#if 0 + auto outputInfo = flake_schemas::getOutputInfo(inventory, attrPath); + + if (outputInfo && outputInfo->leafAttrPath.empty()) { + if (auto drv = outputInfo->nodeInfo->maybeGetAttr("derivation")) { + res.push_back(ref(drv)); + continue; + } + } +#endif - auto attr = root->findAlongAttrPath(AttrPath::parse(state, attrPath)); - if (attr) { + auto attr = outputs->findAlongAttrPath(attrPath); + if (attr) res.push_back(ref(*attr)); - } else { + else suggestions += attr.getSuggestions(); - } } if (res.size() == 0) - throw Error(suggestions, "flake '%s' does not provide attribute %s", flakeRef, showAttrPaths(attrPaths)); + throw Error(suggestions, "flake '%s' does not provide attribute %s", flakeRef, showAttrPaths(state, attrPaths)); return res; } +void InstallableFlake::getCompletions(const std::string & flakeRefS, AddCompletions & completions) +{ + auto cache = openEvalCache(); + + auto inventory = cache->getRoot()->getAttr("inventory"); + auto outputs = cache->getRoot()->getAttr("outputs"); + + if (fragment.ends_with(".") || fragment.empty()) + // Represent that we're looking for attributes starting with the empty prefix (i.e. all attributes inside the + // parent. + parsedFragment.push_back(state->symbols.create("")); + + auto attrPaths = getAttrPaths(true, inventory); + + if (fragment.empty()) + // Return all top-level flake outputs. + attrPaths.push_back(AttrPath{state->symbols.create("")}); + + auto lastAttr = fragment.ends_with(".") || parsedFragment.empty() ? std::string_view("") + : state->symbols[parsedFragment.back()]; + std::string prefix; + if (auto dot = fragment.rfind('.'); dot != std::string::npos) + prefix = fragment.substr(0, dot); + if (fragment.starts_with(".") && !prefix.starts_with(".")) + prefix = "." + prefix; + + for (auto attrPath : attrPaths) { + if (attrPath.empty()) + attrPath.push_back(state->symbols.create("")); + + auto attrPathParent{attrPath}; + attrPathParent.pop_back(); + + auto attr = outputs->findAlongAttrPath(attrPathParent); + if (!attr) + continue; + + for (auto & childName : (*attr)->getAttrs()) { + if (hasPrefix(state->symbols[childName], lastAttr)) { + auto attrPathChild = (*attr)->getAttrPath(childName); + completions.add( + flakeRefS + "#" + prefix + (prefix.empty() || prefix.ends_with(".") ? "" : ".") + + state->symbols[childName]); + } + } + } +} + ref InstallableFlake::getLockedFlake() const { if (!_lockedFlake) { @@ -197,6 +318,14 @@ ref InstallableFlake::getLockedFlake() const return ref(_lockedFlake); } +ref InstallableFlake::openEvalCache() const +{ + if (!_evalCache) { + _evalCache = flake_schemas::call(*state, getLockedFlake(), defaultFlakeSchemas, useEvalCache); + } + return ref(_evalCache); +} + FlakeRef InstallableFlake::nixpkgsFlakeRef() const { auto lockedFlake = getLockedFlake(); @@ -211,4 +340,12 @@ FlakeRef InstallableFlake::nixpkgsFlakeRef() const return defaultNixpkgsFlakeRef(); } +std::shared_ptr InstallableFlake::makeProvenance(std::string_view attrPath) const +{ + auto provenance = getLockedFlake()->flake.provenance; + if (!provenance) + return nullptr; + return std::make_shared(provenance, std::string(attrPath), evalSettings.pureEval); +} + } // namespace nix diff --git a/src/libcmd/installable-value.cc b/src/libcmd/installable-value.cc index 3a167af3db49..6c2fd60efd8e 100644 --- a/src/libcmd/installable-value.cc +++ b/src/libcmd/installable-value.cc @@ -4,7 +4,7 @@ namespace nix { -std::vector> InstallableValue::getCursors(EvalState & state) +std::vector> InstallableValue::getCursors(EvalState & state, bool useDefaultAttrPath) { auto evalCache = std::make_shared(std::nullopt, state, [&]() { return toValue(state).first; }); @@ -15,7 +15,7 @@ ref InstallableValue::getCursor(EvalState & state) { /* Although getCursors should return at least one element, in case it doesn't, bound check to avoid an undefined behavior for vector[0] */ - return getCursors(state).at(0); + return getCursors(state, true).at(0); } static UsageError nonValueInstallable(Installable & installable) @@ -55,7 +55,7 @@ InstallableValue::trySinglePathToDerivedPaths(Value & v, const PosIdx pos, std:: else if (v.type() == nString) { return {{ - .path = DerivedPath::fromSingle(state->coerceToSingleDerivedPath(pos, v, errorCtx)), + .path = DerivedPath::fromSingle(state->devirtualize(state->coerceToSingleDerivedPath(pos, v, errorCtx))), .info = make_ref(), }}; } diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 7e3861e2f1d3..740e53d74af4 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -21,6 +21,7 @@ #include "nix/util/url.hh" #include "nix/fetchers/registry.hh" #include "nix/store/build-result.hh" +#include "nix/util/exit.hh" #include #include @@ -233,19 +234,9 @@ MixReadOnlyOption::MixReadOnlyOption() }); } -Strings SourceExprCommand::getDefaultFlakeAttrPaths() +StringSet SourceExprCommand::getRoles() { - return {"packages." + settings.thisSystem.get() + ".default", "defaultPackage." + settings.thisSystem.get()}; -} - -Strings SourceExprCommand::getDefaultFlakeAttrPathPrefixes() -{ - return {// As a convenience, look for the attribute in - // 'outputs.packages'. - "packages." + settings.thisSystem.get() + ".", - // As a temporary hack until Nixpkgs is properly converted - // to provide a clean 'packages' set, look in 'legacyPackages'. - "legacyPackages." + settings.thisSystem.get() + "."}; + return {"nix-build"}; } Args::CompleterClosure SourceExprCommand::getCompleteInstallable() @@ -299,13 +290,7 @@ void SourceExprCommand::completeInstallable(AddCompletions & completions, std::s } } } else { - completeFlakeRefWithFragment( - completions, - getEvalState(), - lockFlags, - getDefaultFlakeAttrPathPrefixes(), - getDefaultFlakeAttrPaths(), - prefix); + completeFlakeRefWithFragment(completions, getEvalState(), lockFlags, getRoles(), prefix); } } catch (EvalError &) { // Don't want eval errors to mess-up with the completion engine, so let's just swallow them @@ -316,91 +301,37 @@ void completeFlakeRefWithFragment( AddCompletions & completions, ref evalState, flake::LockFlags lockFlags, - Strings attrPathPrefixes, - const Strings & defaultFlakeAttrPaths, + const StringSet & roles, std::string_view prefix) -{ - /* Look for flake output attributes that match the - prefix. */ - try { - auto hash = prefix.find('#'); - if (hash == std::string::npos) { - completeFlakeRef(completions, evalState->store, prefix); - } else { - completions.setType(AddCompletions::Type::Attrs); - - auto fragment = prefix.substr(hash + 1); - std::string prefixRoot = ""; - if (fragment.starts_with(".")) { - fragment = fragment.substr(1); - prefixRoot = "."; - } - auto flakeRefS = std::string(prefix.substr(0, hash)); - - // TODO: ideally this would use the command base directory instead of assuming ".". - auto flakeRef = - parseFlakeRef(fetchSettings, expandTilde(flakeRefS), std::filesystem::current_path().string()); - - auto evalCache = openEvalCache( - *evalState, make_ref(lockFlake(flakeSettings, *evalState, flakeRef, lockFlags))); - - auto root = evalCache->getRoot(); - - if (prefixRoot == ".") { - attrPathPrefixes.clear(); - } - /* Complete 'fragment' relative to all the - attrpath prefixes as well as the root of the - flake. */ - attrPathPrefixes.push_back(""); - - for (auto & attrPathPrefixS : attrPathPrefixes) { - auto attrPathPrefix = AttrPath::parse(*evalState, attrPathPrefixS); - auto attrPathS = attrPathPrefixS + std::string(fragment); - auto attrPath = AttrPath::parse(*evalState, attrPathS); - - std::string lastAttr; - if (!attrPath.empty() && !hasSuffix(attrPathS, ".")) { - lastAttr = evalState->symbols[attrPath.back()]; - attrPath.pop_back(); - } +try { + auto hash = prefix.find('#'); + if (hash == std::string::npos) { + completeFlakeRef(completions, evalState->store, prefix); + return; + } - auto attr = root->findAlongAttrPath(attrPath); - if (!attr) - continue; + completions.setType(AddCompletions::Type::Attrs); - for (auto & attr2 : (*attr)->getAttrs()) { - if (hasPrefix(evalState->symbols[attr2], lastAttr)) { - auto attrPath2 = (*attr)->getAttrPath(attr2); - /* Strip the attrpath prefix. */ - attrPath2.erase(attrPath2.begin(), attrPath2.begin() + attrPathPrefix.size()); - // FIXME: handle names with dots - completions.add(flakeRefS + "#" + prefixRoot + attrPath2.to_string(*evalState)); - } - } - } + auto fragment = prefix.substr(hash + 1); + auto flakeRefS = std::string(prefix.substr(0, hash)); - /* And add an empty completion for the default - attrpaths. */ - if (fragment.empty()) { - for (auto & attrPath : defaultFlakeAttrPaths) { - auto attr = root->findAlongAttrPath(AttrPath::parse(*evalState, attrPath)); - if (!attr) - continue; - completions.add(flakeRefS + "#" + prefixRoot); - } - } - } - } catch (Error & e) { - warn(e.msg()); - } + InstallableFlake{ + nullptr, + evalState, + // TODO: ideally this would use the command base directory instead of assuming ".". + parseFlakeRef(fetchSettings, expandTilde(flakeRefS), std::filesystem::current_path().string()), + fragment, + ExtendedOutputsSpec::Default{}, // FIXME: could be that we're completing the outputs spec... + roles, + lockFlags, + {}} + .getCompletions(flakeRefS, completions); +} catch (Error & e) { + warn(e.msg()); } void completeFlakeRef(AddCompletions & completions, ref store, std::string_view prefix) { - if (!experimentalFeatureSettings.isEnabled(Xp::Flakes)) - return; - if (prefix == "") completions.add("."); @@ -510,9 +441,9 @@ Installables SourceExprCommand::parseInstallables(ref store, std::vector< std::move(flakeRef), fragment, std::move(extendedOutputsSpec), - getDefaultFlakeAttrPaths(), - getDefaultFlakeAttrPathPrefixes(), - lockFlags)); + getRoles(), + lockFlags, + getDefaultFlakeSchemas())); continue; } catch (...) { ex = std::current_exception(); @@ -554,46 +485,69 @@ static SingleBuiltPath getBuiltPath(ref evalStore, ref store, cons b.raw()); } -std::vector Installable::build( - ref evalStore, ref store, Realise mode, const Installables & installables, BuildMode bMode) +const BuiltPathWithResult & InstallableWithBuildResult::getSuccess() const { - std::vector res; - for (auto & [_, builtPathWithResult] : build2(evalStore, store, mode, installables, bMode)) - res.push_back(builtPathWithResult); - return res; + if (auto * failure = std::get_if(&result)) { + auto failure2 = failure->tryGetFailure(); + assert(failure2); + failure2->rethrow(); + } else + return *std::get_if(&result); } -static void throwBuildErrors(std::vector & buildResults, const Store & store) +void Installable::throwBuildErrors(std::vector & buildResults, const Store & store) { - std::vector> failed; for (auto & buildResult : buildResults) { - if (auto * failure = buildResult.tryGetFailure()) { - failed.push_back({&buildResult, failure}); - } - } + if (std::get_if(&buildResult.result)) { + // Report success first. + for (auto & buildResult : buildResults) { + if (std::get_if(&buildResult.result)) + notice("✅ " ANSI_BOLD "%s" ANSI_NORMAL, buildResult.installable->what()); + } - auto failedResult = failed.begin(); - if (failedResult != failed.end()) { - if (failed.size() == 1) { - failedResult->second->rethrow(); - } else { - StringSet failedPaths; - for (; failedResult != failed.end(); failedResult++) { - if (!failedResult->second->errorMsg.empty()) { - logError( - ErrorInfo{ - .level = lvlError, - .msg = failedResult->second->errorMsg, - }); + // Then cancelled builds. + for (auto & buildResult : buildResults) { + if (auto failure = std::get_if(&buildResult.result)) { + if (failure->isCancelled()) + notice( + "❓ " ANSI_BOLD "%s" ANSI_NORMAL ANSI_FAINT " (cancelled)", + buildResult.installable->what()); + } + } + + // Then failures. + for (auto & buildResult : buildResults) { + if (auto failure = std::get_if(&buildResult.result)) { + if (failure->isCancelled()) + continue; + auto failure2 = failure->tryGetFailure(); + assert(failure2); + printError("❌ " ANSI_RED "%s" ANSI_NORMAL, buildResult.installable->what()); + try { + failure2->rethrow(); + } catch (Error & e) { + logError(e.info()); + } } - failedPaths.insert(failedResult->first->path.to_string(store)); } - throw Error("build of %s failed", concatStringsSep(", ", quoteStrings(failedPaths))); + + throw Exit(1); } } } -std::vector, BuiltPathWithResult>> Installable::build2( +std::vector Installable::build( + ref evalStore, ref store, Realise mode, const Installables & installables, BuildMode bMode) +{ + auto results = build2(evalStore, store, mode, installables, bMode); + throwBuildErrors(results, *store); + std::vector res; + for (auto & b : results) + res.push_back(b.getSuccess()); + return res; +} + +std::vector Installable::build2( ref evalStore, ref store, Realise mode, const Installables & installables, BuildMode bMode) { if (mode == Realise::Nothing) @@ -615,7 +569,7 @@ std::vector, BuiltPathWithResult>> Installable::build } } - std::vector, BuiltPathWithResult>> res; + std::vector res; switch (mode) { @@ -630,17 +584,21 @@ std::vector, BuiltPathWithResult>> Installable::build [&](const DerivedPath::Built & bfd) { auto outputs = resolveDerivedPath(*store, bfd, &*evalStore); res.push_back( - {aux.installable, - {.path = - BuiltPath::Built{ - .drvPath = - make_ref(getBuiltPath(evalStore, store, *bfd.drvPath)), - .outputs = outputs, - }, - .info = aux.info}}); + {.installable = aux.installable, + .result = InstallableWithBuildResult::Success{ + .path = + BuiltPath::Built{ + .drvPath = make_ref( + getBuiltPath(evalStore, store, *bfd.drvPath)), + .outputs = outputs, + }, + .info = aux.info}}); }, [&](const DerivedPath::Opaque & bo) { - res.push_back({aux.installable, {.path = BuiltPath::Opaque{bo.path}, .info = aux.info}}); + res.push_back( + {.installable = aux.installable, + .result = InstallableWithBuildResult::Success{ + .path = BuiltPath::Opaque{bo.path}, .info = aux.info}}); }, }, path.raw()); @@ -654,9 +612,13 @@ std::vector, BuiltPathWithResult>> Installable::build printMissing(store, pathsToBuild, lvlInfo); auto buildResults = store->buildPathsWithResults(pathsToBuild, bMode, evalStore); - throwBuildErrors(buildResults, *store); for (auto & buildResult : buildResults) { - // If we didn't throw, they must all be sucesses + if (buildResult.tryGetFailure()) { + for (auto & aux : backmap[buildResult.path]) { + res.push_back({.installable = aux.installable, .result = buildResult}); + } + continue; + } auto & success = std::get(buildResult.inner); for (auto & aux : backmap[buildResult.path]) { std::visit( @@ -666,20 +628,22 @@ std::vector, BuiltPathWithResult>> Installable::build for (auto & [outputName, realisation] : success.builtOutputs) outputs.emplace(outputName, realisation.outPath); res.push_back( - {aux.installable, - {.path = - BuiltPath::Built{ - .drvPath = - make_ref(getBuiltPath(evalStore, store, *bfd.drvPath)), - .outputs = outputs, - }, - .info = aux.info, - .result = buildResult}}); + {.installable = aux.installable, + .result = InstallableWithBuildResult::Success{ + .path = + BuiltPath::Built{ + .drvPath = make_ref( + getBuiltPath(evalStore, store, *bfd.drvPath)), + .outputs = outputs, + }, + .info = aux.info, + .result = buildResult}}); }, [&](const DerivedPath::Opaque & bo) { res.push_back( - {aux.installable, - {.path = BuiltPath::Opaque{bo.path}, .info = aux.info, .result = buildResult}}); + {.installable = aux.installable, + .result = InstallableWithBuildResult::Success{ + .path = BuiltPath::Opaque{bo.path}, .info = aux.info, .result = buildResult}}); }, }, buildResult.path.raw()); @@ -840,8 +804,11 @@ InstallableCommand::InstallableCommand() }); } +void InstallableCommand::preRun(ref store) {} + void InstallableCommand::run(ref store) { + preRun(store); auto installable = parseInstallable(store, _installable); run(store, std::move(installable)); } diff --git a/src/libcmd/meson.build b/src/libcmd/meson.build index f553afa0ba17..087da84f9293 100644 --- a/src/libcmd/meson.build +++ b/src/libcmd/meson.build @@ -67,6 +67,7 @@ config_priv_h = configure_file( ) subdir('nix-meson-build-support/common') +subdir('nix-meson-build-support/generate-header') sources = files( 'built-path.cc', @@ -74,6 +75,7 @@ sources = files( 'command.cc', 'common-eval-args.cc', 'editor-for.cc', + 'flake-schemas.cc', 'installable-attr-path.cc', 'installable-derived-path.cc', 'installable-flake.cc', @@ -86,6 +88,11 @@ sources = files( 'repl.cc', ) +sources += [ + gen_header.process('call-flake-schemas.nix'), + gen_header.process('builtin-flake-schemas.nix'), +] + subdir('include/nix/cmd') subdir('nix-meson-build-support/export-all-symbols') @@ -99,7 +106,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, cpp_pch : do_pch ? [ 'pch/precompiled-headers.hh' ] : [], ) diff --git a/src/libcmd/package.nix b/src/libcmd/package.nix index c382f0e5760d..1d677142da1d 100644 --- a/src/libcmd/package.nix +++ b/src/libcmd/package.nix @@ -35,7 +35,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-cmd"; + pname = "determinate-nix-cmd"; inherit version; workDir = ./.; @@ -49,6 +49,8 @@ mkMesonLibrary (finalAttrs: { ./include/nix/cmd/meson.build (fileset.fileFilter (file: file.hasExt "cc") ./.) (fileset.fileFilter (file: file.hasExt "hh") ./.) + ./call-flake-schemas.nix + ./builtin-flake-schemas.nix ]; buildInputs = [ diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index 8fbb54dd30de..d8e61b5b5205 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -177,7 +177,7 @@ ReplExitStatus NixRepl::mainLoop() if (state->debugRepl) { debuggerNotice = " debugger"; } - notice("Nix %1%%2%\nType :? for help.", nixVersion, debuggerNotice); + notice("Nix %s\nType :? for help.", version(), debuggerNotice); } isFirstRepl = false; @@ -332,6 +332,7 @@ StorePath NixRepl::getDerivationPath(Value & v) auto drvPath = packageInfo->queryDrvPath(); if (!drvPath) throw Error("expression did not evaluate to a valid derivation (no 'drvPath' attribute)"); + state->waitForPath(*drvPath); if (!state->store->isValidPath(*drvPath)) throw Error("expression evaluated to invalid derivation '%s'", state->store->printStorePath(*drvPath)); return *drvPath; diff --git a/src/libexpr-c/meson.build b/src/libexpr-c/meson.build index c47704ce4112..df1e3c05880c 100644 --- a/src/libexpr-c/meson.build +++ b/src/libexpr-c/meson.build @@ -54,7 +54,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libexpr-c/nix_api_expr.cc b/src/libexpr-c/nix_api_expr.cc index 0dd9fa0a51d8..bfbd0a9c361f 100644 --- a/src/libexpr-c/nix_api_expr.cc +++ b/src/libexpr-c/nix_api_expr.cc @@ -71,6 +71,7 @@ nix_err nix_expr_eval_from_string( nix::Expr * parsedExpr = state->state.parseExprFromString(expr, state->state.rootPath(nix::CanonPath(path))); state->state.eval(parsedExpr, *value->value); state->state.forceValue(*value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -82,6 +83,7 @@ nix_err nix_value_call(nix_c_context * context, EvalState * state, Value * fn, n try { state->state.callFunction(*fn->value, *arg->value, *value->value, nix::noPos); state->state.forceValue(*value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -100,6 +102,7 @@ nix_err nix_value_call_multi( try { state->state.callFunction(*fn->value, {internal_args.data(), nargs}, *value->value, nix::noPos); state->state.forceValue(*value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -110,6 +113,7 @@ nix_err nix_value_force(nix_c_context * context, EvalState * state, nix_value * context->last_err_code = NIX_OK; try { state->state.forceValue(*value->value, nix::noPos); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } @@ -120,6 +124,7 @@ nix_err nix_value_force_deep(nix_c_context * context, EvalState * state, nix_val context->last_err_code = NIX_OK; try { state->state.forceValueDeep(*value->value); + state->state.waitForAllPaths(); } NIXC_CATCH_ERRS } diff --git a/src/libexpr-c/nix_api_value.cc b/src/libexpr-c/nix_api_value.cc index 7fd8233adec2..b6a838284eff 100644 --- a/src/libexpr-c/nix_api_value.cc +++ b/src/libexpr-c/nix_api_value.cc @@ -194,6 +194,8 @@ ValueType nix_get_type(nix_c_context * context, const nix_value * value) switch (v.type()) { case nThunk: return NIX_TYPE_THUNK; + case nFailed: + return NIX_TYPE_FAILED; case nInt: return NIX_TYPE_INT; case nFloat: @@ -386,6 +388,7 @@ nix_value * nix_get_attr_byname(nix_c_context * context, const nix_value * value auto attr = v.attrs()->get(s); if (attr) { state->state.forceValue(*attr->value, nix::noPos); + state->state.waitForAllPaths(); return new_nix_value(attr->value, state->state.mem); } nix_set_err_msg(context, NIX_ERR_KEY, "missing attribute"); diff --git a/src/libexpr-c/nix_api_value.h b/src/libexpr-c/nix_api_value.h index 5bd45da9059d..a01bfb280599 100644 --- a/src/libexpr-c/nix_api_value.h +++ b/src/libexpr-c/nix_api_value.h @@ -100,7 +100,10 @@ typedef enum { /** @brief External value from C++ plugins or C API * @see Externals */ - NIX_TYPE_EXTERNAL + NIX_TYPE_EXTERNAL, + /** @brief Failed value. Contains an exception that can be rethrown. + */ + NIX_TYPE_FAILED, } ValueType; // forward declarations diff --git a/src/libexpr-c/package.nix b/src/libexpr-c/package.nix index 694fbc1fe789..ec92ecce1054 100644 --- a/src/libexpr-c/package.nix +++ b/src/libexpr-c/package.nix @@ -15,7 +15,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-expr-c"; + pname = "determinate-nix-expr-c"; inherit version; workDir = ./.; diff --git a/src/libexpr-test-support/include/nix/expr/tests/value/context.hh b/src/libexpr-test-support/include/nix/expr/tests/value/context.hh index 68a0b8dea7d7..2311f3941c13 100644 --- a/src/libexpr-test-support/include/nix/expr/tests/value/context.hh +++ b/src/libexpr-test-support/include/nix/expr/tests/value/context.hh @@ -26,6 +26,12 @@ struct Arbitrary static Gen arbitrary(); }; +template<> +struct Arbitrary +{ + static Gen arbitrary(); +}; + template<> struct Arbitrary { diff --git a/src/libexpr-test-support/meson.build b/src/libexpr-test-support/meson.build index df28661b7e78..0fae96b47f1e 100644 --- a/src/libexpr-test-support/meson.build +++ b/src/libexpr-test-support/meson.build @@ -50,7 +50,7 @@ this_library = library( # TODO: Remove `-lrapidcheck` when https://github.com/emil-e/rapidcheck/pull/326 # is available. See also ../libutil/build.meson link_args : linker_export_flags + [ '-lrapidcheck' ], - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libexpr-test-support/package.nix b/src/libexpr-test-support/package.nix index 5cb4adaa8c46..1879a5716082 100644 --- a/src/libexpr-test-support/package.nix +++ b/src/libexpr-test-support/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util-test-support"; + pname = "determinate-nix-util-test-support"; inherit version; workDir = ./.; diff --git a/src/libexpr-test-support/tests/value/context.cc b/src/libexpr-test-support/tests/value/context.cc index d6036601a948..8ce84fb51f54 100644 --- a/src/libexpr-test-support/tests/value/context.cc +++ b/src/libexpr-test-support/tests/value/context.cc @@ -16,6 +16,15 @@ Gen Arbitrary::arb }); } +Gen Arbitrary::arbitrary() +{ + return gen::map(gen::arbitrary(), [](StorePath storePath) { + return NixStringContextElem::Path{ + .storePath = storePath, + }; + }); +} + Gen Arbitrary::arbitrary() { return gen::mapcat( @@ -31,6 +40,8 @@ Gen Arbitrary::arbitrary() case 2: return gen::map( gen::arbitrary(), [](NixStringContextElem a) { return a; }); + case 3: + return gen::map(gen::arbitrary(), [](NixStringContextElem a) { return a; }); default: assert(false); } diff --git a/src/libexpr-tests/value/value.cc b/src/libexpr-tests/value/value.cc index 420db0f31b17..bd8f0da71213 100644 --- a/src/libexpr-tests/value/value.cc +++ b/src/libexpr-tests/value/value.cc @@ -13,7 +13,6 @@ TEST_F(ValueTest, unsetValue) { Value unsetValue; ASSERT_EQ(false, unsetValue.isValid()); - ASSERT_EQ(nThunk, unsetValue.type(true)); ASSERT_DEATH(unsetValue.type(), ""); } diff --git a/src/libexpr/attr-path.cc b/src/libexpr/attr-path.cc index 575a135422ab..c8b800245881 100644 --- a/src/libexpr/attr-path.cc +++ b/src/libexpr/attr-path.cc @@ -39,6 +39,14 @@ AttrPath AttrPath::parse(EvalState & state, std::string_view s) return res; } +AttrPath AttrPath::fromStrings(EvalState & state, const std::vector & attrNames) +{ + AttrPath res; + for (auto & attrName : attrNames) + res.push_back(state.symbols.create(attrName)); + return res; +} + std::string AttrPath::to_string(EvalState & state) const { return dropEmptyInitThenConcatStringsSep(".", state.symbols.resolve({*this})); diff --git a/src/libexpr/eval-cache.cc b/src/libexpr/eval-cache.cc index 43f10da6eac9..0d6bbdaf4839 100644 --- a/src/libexpr/eval-cache.cc +++ b/src/libexpr/eval-cache.cc @@ -364,23 +364,33 @@ void AttrCursor::fetchCachedValue() throw CachedEvalError(parent->first, parent->second); } -AttrPath AttrCursor::getAttrPath() const +AttrPath AttrCursor::getAttrPathRaw() const { if (parent) { - auto attrPath = parent->first->getAttrPath(); + auto attrPath = parent->first->getAttrPathRaw(); attrPath.push_back(parent->second); return attrPath; } else return {}; } -AttrPath AttrCursor::getAttrPath(Symbol name) const +AttrPath AttrCursor::getAttrPath() const +{ + return root->cleanupAttrPath(getAttrPathRaw()); +} + +AttrPath AttrCursor::getAttrPathRaw(Symbol name) const { - auto attrPath = getAttrPath(); + auto attrPath = getAttrPathRaw(); attrPath.push_back(name); return attrPath; } +AttrPath AttrCursor::getAttrPath(Symbol name) const +{ + return root->cleanupAttrPath(getAttrPathRaw(name)); +} + std::string AttrCursor::getAttrPathStr() const { return getAttrPath().to_string(root->state); @@ -554,16 +564,17 @@ string_t AttrCursor::getStringWithContext() if (auto s = std::get_if(&cachedValue->second)) { bool valid = true; for (auto & c : s->second) { - const StorePath & path = std::visit( + const StorePath * path = std::visit( overloaded{ - [&](const NixStringContextElem::DrvDeep & d) -> const StorePath & { return d.drvPath; }, - [&](const NixStringContextElem::Built & b) -> const StorePath & { - return b.drvPath->getBaseStorePath(); + [&](const NixStringContextElem::DrvDeep & d) -> const StorePath * { return &d.drvPath; }, + [&](const NixStringContextElem::Built & b) -> const StorePath * { + return &b.drvPath->getBaseStorePath(); }, - [&](const NixStringContextElem::Opaque & o) -> const StorePath & { return o.path; }, + [&](const NixStringContextElem::Opaque & o) -> const StorePath * { return &o.path; }, + [&](const NixStringContextElem::Path & p) -> const StorePath * { return nullptr; }, }, c.raw); - if (!root->state.store->isValidPath(path)) { + if (!path || !root->state.store->isValidPath(*path)) { valid = false; break; } @@ -711,6 +722,7 @@ StorePath AttrCursor::forceDerivation() /* The eval cache contains 'drvPath', but the actual path has been garbage-collected. So force it to be regenerated. */ aDrvPath->forceValue(); + root->state.waitForPath(drvPath); if (!root->state.store->isValidPath(drvPath)) throw Error( "don't know how to recreate store derivation '%s'!", root->state.store->printStorePath(drvPath)); diff --git a/src/libexpr/eval-gc.cc b/src/libexpr/eval-gc.cc index 0d25f38f64de..c1e974e053b8 100644 --- a/src/libexpr/eval-gc.cc +++ b/src/libexpr/eval-gc.cc @@ -46,6 +46,88 @@ static void * oomHandler(size_t requested) throw std::bad_alloc(); } +static size_t getFreeMem() +{ + /* On Linux, use the `MemAvailable` or `MemFree` fields from + /proc/cpuinfo. */ +# ifdef __linux__ + { + std::unordered_map fields; + for (auto & line : + tokenizeString>(readFile(std::filesystem::path("/proc/meminfo")), "\n")) { + auto colon = line.find(':'); + if (colon == line.npos) + continue; + fields.emplace(line.substr(0, colon), trim(line.substr(colon + 1))); + } + + auto i = fields.find("MemAvailable"); + if (i == fields.end()) + i = fields.find("MemFree"); + if (i != fields.end()) { + auto kb = tokenizeString>(i->second, " "); + if (kb.size() == 2 && kb[1] == "kB") + return string2Int(kb[0]).value_or(0) * 1024; + } + } +# endif + + /* On non-Linux systems, conservatively assume that 25% of memory is free. */ + long pageSize = sysconf(_SC_PAGESIZE); + long pages = sysconf(_SC_PHYS_PAGES); + if (pageSize > 0 && pages > 0) + return (static_cast(pageSize) * static_cast(pages)) / 4; + return 0; +} + +/** + * When a thread goes into a coroutine, we lose its original sp until + * control flow returns to the thread. This causes Boehm GC to crash + * since it will scan memory between the coroutine's sp and the + * original stack base of the thread. Therefore, we detect when the + * current sp is outside of the original thread stack and push the + * entire thread stack instead, as an approximation. + * + * This is not optimal, because it causes the stack below sp to be + * scanned. However, we usually we don't have active coroutines during + * evaluation, so this is acceptable. + * + * Note that we don't scan coroutine stacks. It's currently assumed + * that we don't have GC roots in coroutines. + */ +void fixupBoehmStackPointer(void ** sp_ptr, void * _pthread_id) +{ + void *& sp = *sp_ptr; + auto pthread_id = reinterpret_cast(_pthread_id); + size_t osStackSize; + char * osStackHi; + char * osStackLo; + +# ifdef __APPLE__ + osStackSize = pthread_get_stacksize_np(pthread_id); + osStackHi = (char *) pthread_get_stackaddr_np(pthread_id); + osStackLo = osStackHi - osStackSize; +# else + pthread_attr_t pattr; + if (pthread_attr_init(&pattr)) + throw Error("fixupBoehmStackPointer: pthread_attr_init failed"); +# ifdef HAVE_PTHREAD_GETATTR_NP + if (pthread_getattr_np(pthread_id, &pattr)) + throw Error("fixupBoehmStackPointer: pthread_getattr_np failed"); +# else +# error "Need `pthread_attr_get_np`" +# endif + if (pthread_attr_getstack(&pattr, (void **) &osStackLo, &osStackSize)) + throw Error("fixupBoehmStackPointer: pthread_attr_getstack failed"); + if (pthread_attr_destroy(&pattr)) + throw Error("fixupBoehmStackPointer: pthread_attr_destroy failed"); + osStackHi = osStackLo + osStackSize; +# endif + + if (sp >= osStackHi || sp < osStackLo) // sp is outside the os stack + sp = osStackLo; +} + static inline void initGCReal() { /* Initialise the Boehm garbage collector. */ @@ -76,8 +158,11 @@ static inline void initGCReal() GC_set_oom_fn(oomHandler); - /* Set the initial heap size to something fairly big (25% of - physical RAM, up to a maximum of 384 MiB) so that in most cases + GC_set_sp_corrector(&fixupBoehmStackPointer); + assert(GC_get_sp_corrector()); + + /* Set the initial heap size to something fairly big (80% of + free RAM, up to a maximum of 4 GiB) so that in most cases we don't need to garbage collect at all. (Collection has a fairly significant overhead.) The heap size can be overridden through libgc's GC_INITIAL_HEAP_SIZE environment variable. We @@ -88,15 +173,10 @@ static inline void initGCReal() if (!getEnv("GC_INITIAL_HEAP_SIZE")) { size_t size = 32 * 1024 * 1024; # if HAVE_SYSCONF && defined(_SC_PAGESIZE) && defined(_SC_PHYS_PAGES) - size_t maxSize = 384 * 1024 * 1024; - long pageSize = sysconf(_SC_PAGESIZE); - long pages = sysconf(_SC_PHYS_PAGES); - if (pageSize != -1) - size = (pageSize * pages) / 4; // 25% of RAM - if (size > maxSize) - size = maxSize; + size_t maxSize = 4ULL * 1024 * 1024 * 1024; + auto free = getFreeMem(); + size = std::max(size, std::min((size_t) (free * 0.5), maxSize)); # endif - debug("setting initial heap size to %1% bytes", size); GC_expand_hp(size); } } diff --git a/src/libexpr/eval-settings.cc b/src/libexpr/eval-settings.cc index 04c6193885e1..27205864b8ba 100644 --- a/src/libexpr/eval-settings.cc +++ b/src/libexpr/eval-settings.cc @@ -91,9 +91,19 @@ bool EvalSettings::isPseudoUrl(std::string_view s) std::string EvalSettings::resolvePseudoUrl(std::string_view url) { - if (hasPrefix(url, "channel:")) - return "https://channels.nixos.org/" + std::string(url.substr(8)) + "/nixexprs.tar.xz"; - else + if (hasPrefix(url, "channel:")) { + auto realUrl = "https://channels.nixos.org/" + std::string(url.substr(8)) + "/nixexprs.tar.xz"; + static bool haveWarned = false; + warnOnce( + haveWarned, + "Channels are deprecated in favor of flakes in Determinate Nix. " + "Instead of '%s', use '%s'. " + "See https://zero-to-nix.com for a guide to Nix flakes. " + "For details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34.", + url, + realUrl); + return realUrl; + } else return std::string(url); } diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index ab3f7b3ff5da..1392ce38b0a6 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -25,6 +25,8 @@ #include "nix/fetchers/tarball.hh" #include "nix/fetchers/input-cache.hh" #include "nix/util/current-process.hh" +#include "nix/store/async-path-writer.hh" +#include "nix/expr/parallel-eval.hh" #include "parser-tab.hh" @@ -44,6 +46,11 @@ #include #include #include +#include + +#ifndef _WIN32 // TODO use portable implementation +# include +#endif #include "nix/util/strings-inline.hh" @@ -155,6 +162,8 @@ std::string_view showType(ValueType type, bool withArticle) return WA("a", "float"); case nThunk: return WA("a", "thunk"); + case nFailed: + return WA("a", "failure"); } unreachable(); } @@ -193,20 +202,36 @@ PosIdx Value::determinePos(const PosIdx pos) const return attrs()->pos; case tLambda: return lambda().fun->pos; +#if 0 + // FIXME: disabled because reading from an app is racy. case tApp: return app().left->determinePos(pos); +#endif default: return pos; } #pragma GCC diagnostic pop } -bool Value::isTrivial() const +template<> +bool ValueStorage::isTrivial() const { - return !isa() - && (!isa() - || (dynamic_cast(thunk().expr) && ((ExprAttrs *) thunk().expr)->dynamicAttrs->empty()) - || dynamic_cast(thunk().expr) || dynamic_cast(thunk().expr)); + auto p1_ = p1; // must acquire before reading p0, since thunks can change + auto p0_ = p0.load(std::memory_order_acquire); + + auto pd = static_cast(p0_ & discriminatorMask); + + if (pd == pdThunk || pd == pdPending || pd == pdAwaited) { + bool isApp = p1_ & discriminatorMask; + if (isApp) + return false; + auto expr = untagPointer(p1_); + return (dynamic_cast(expr) && ((ExprAttrs *) expr)->dynamicAttrs->empty()) + || dynamic_cast(expr) || dynamic_cast(expr); + } + + else + return true; } static Symbol getName(const AttrName & name, EvalState & state, Env & env) @@ -232,6 +257,8 @@ EvalMemory::EvalMemory() assertGCInitialized(); } +thread_local EvalState::EvalContext EvalState::evalContext; + EvalState::EvalState( const LookupPath & lookupPathFromArguments, ref store, @@ -300,6 +327,7 @@ EvalState::EvalState( , debugRepl(nullptr) , debugStop(false) , trylevel(0) + , asyncPathWriter(AsyncPathWriter::make(store)) , srcToStore(make_ref()) , importResolutionCache(make_ref()) , fileEvalCache(make_ref()) @@ -311,6 +339,7 @@ EvalState::EvalState( , baseEnv(mem.allocEnv(BASE_ENV_SIZE)) #endif , staticBaseEnv{std::make_shared(nullptr, nullptr)} + , executor{make_ref(settings)} { corepkgsFS->setPathDisplay(""); internalFS->setPathDisplay("«nix-internal»", ""); @@ -451,7 +480,8 @@ void EvalState::checkURI(const std::string & uri) Value * EvalState::addConstant(const std::string & name, Value & v, Constant info) { Value * v2 = allocValue(); - *v2 = v; + // Do a raw copy since `operator =` barfs on thunks. + memcpy((char *) v2, (char *) &v, sizeof(Value)); addConstant(name, v2, info); return v2; } @@ -467,8 +497,10 @@ void EvalState::addConstant(const std::string & name, Value * v, Constant info) We might know the type of a thunk in advance, so be allowed to just write it down in that case. */ - if (auto gotType = v->type(true); gotType != nThunk) - assert(info.type == gotType); + if (v->isFinished()) { + if (auto gotType = v->type(); gotType != nThunk) + assert(info.type == gotType); + } /* Install value the base environment. */ staticBaseEnv->vars.emplace_back(symbols.create(name), baseEnvDispl); @@ -654,7 +686,7 @@ void printStaticEnvBindings(const SymbolTable & st, const StaticEnv & se) // just for the current level of Env, not the whole chain. void printWithBindings(const SymbolTable & st, const Env & env) { - if (!env.values[0]->isThunk()) { + if (env.values[0]->isFinished()) { std::cout << "with: "; std::cout << ANSI_MAGENTA; auto j = env.values[0]->attrs()->begin(); @@ -709,7 +741,7 @@ void mapStaticEnvBindings(const SymbolTable & st, const StaticEnv & se, const En if (env.up && se.up) { mapStaticEnvBindings(st, *se.up, *env.up, vm); - if (se.isWith && !env.values[0]->isThunk()) { + if (se.isWith && env.values[0]->isFinished()) { // add 'with' bindings. for (auto & j : *env.values[0]->attrs()) vm.insert_or_assign(std::string(st[j.name]), j.value); @@ -943,7 +975,14 @@ void EvalState::mkPos(Value & v, PosIdx p) auto origin = positions.originOf(p); if (auto path = std::get_if(&origin)) { auto attrs = buildBindings(3); - attrs.alloc(s.file).mkString(path->path.abs(), mem); + if (path->accessor == rootFS && store->isInStore(path->path.abs())) + // FIXME: only do this for virtual store paths? + attrs.alloc(s.file).mkString( + path->path.abs(), + {NixStringContextElem::Path{.storePath = store->toStorePath(path->path.abs()).first}}, + mem); + else + attrs.alloc(s.file).mkString(path->path.abs(), mem); makePositionThunks(*this, p, attrs.alloc(s.line), attrs.alloc(s.column)); v.mkAttrs(attrs); } else @@ -991,6 +1030,7 @@ std::string EvalState::mkSingleDerivedPathStringRaw(const SingleDerivedPath & p) auto optStaticOutputPath = std::visit( overloaded{ [&](const SingleDerivedPath::Opaque & o) { + waitForPath(o.path); auto drv = store->readDerivation(o.path); auto i = drv.outputs.find(b.output); if (i == drv.outputs.end()) @@ -1066,10 +1106,9 @@ Value * ExprPath::maybeThunk(EvalState & state, Env & env) * from a thunk, ensuring that every file is parsed/evaluated only * once (via the thunk stored in `EvalState::fileEvalCache`). */ -struct ExprParseFile : Expr, gc +struct ExprParseFile : Expr { - // FIXME: make this a reference (see below). - SourcePath path; + SourcePath & path; bool mustBeTrivial; ExprParseFile(SourcePath & path, bool mustBeTrivial) @@ -1120,18 +1159,14 @@ void EvalState::evalFile(const SourcePath & path, Value & v, bool mustBeTrivial) } Value * vExpr; - // FIXME: put ExprParseFile on the stack instead of the heap once - // https://github.com/NixOS/nix/pull/13930 is merged. That will ensure - // the post-condition that `expr` is unreachable after - // `forceValue()` returns. - auto expr = new ExprParseFile{*resolvedPath, mustBeTrivial}; + ExprParseFile expr{*resolvedPath, mustBeTrivial}; fileEvalCache->try_emplace_and_cvisit( *resolvedPath, nullptr, [&](auto & i) { vExpr = allocValue(); - vExpr->mkThunk(&baseEnv, expr); + vExpr->mkThunk(&baseEnv, &expr); i.second = vExpr; }, [&](auto & i) { vExpr = i.second; }); @@ -1437,7 +1472,7 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) state.attrSelects[pos2]++; } - state.forceValue(*vAttrs, (pos2 ? pos2 : this->pos)); + state.forceValue(*vAttrs, pos2 ? pos2 : this->pos); } catch (Error & e) { if (pos2) { @@ -1496,6 +1531,8 @@ void ExprLambda::eval(EvalState & state, Env & env, Value & v) v.mkLambda(&env, this); } +thread_local size_t EvalState::callDepth = 0; + void EvalState::callFunction(Value & fun, std::span args, Value & vRes, const PosIdx pos) { auto _level = addCallDepth(pos); @@ -1511,15 +1548,16 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, forceValue(fun, pos); - Value vCur(fun); + Value vCur = fun; auto makeAppChain = [&]() { - vRes = vCur; for (auto arg : args) { auto fun2 = allocValue(); - *fun2 = vRes; - vRes.mkPrimOpApp(fun2, arg); + *fun2 = vCur; + vCur.reset(); + vCur.mkPrimOpApp(fun2, arg); } + vRes = vCur; }; const Attr * functor; @@ -1615,6 +1653,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, lambda.name ? concatStrings("'", symbols[lambda.name], "'") : "anonymous lambda") : nullptr; + vCur.reset(); lambda.body->eval(*this, env2, vCur); } catch (Error & e) { if (loggerSettings.showTrace.get()) { @@ -1649,7 +1688,9 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, primOpCalls[fn->name]++; try { - fn->fun(*this, vCur.determinePos(noPos), args.data(), vCur); + auto pos = vCur.determinePos(noPos); + vCur.reset(); + fn->fun(*this, pos, args.data(), vCur); } catch (Error & e) { if (fn->addTrace) addErrorTrace(e, pos, "while calling the '%1%' builtin", fn->name); @@ -1671,6 +1712,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, assert(primOp->isPrimOp()); auto arity = primOp->primOp()->arity; auto argsLeft = arity - argsDone; + assert(argsLeft); if (args.size() < argsLeft) { /* We still don't have enough arguments, so extend the tPrimOpApp chain. */ @@ -1699,7 +1741,9 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, // 2. Create a fake env (arg1, arg2, etc.) and a fake expr (arg1: arg2: etc: builtins.name arg1 arg2 // etc) // so the debugger allows to inspect the wrong parameters passed to the builtin. - fn->fun(*this, vCur.determinePos(noPos), vArgs, vCur); + auto pos = vCur.determinePos(noPos); + vCur.reset(); + fn->fun(*this, pos, vArgs, vCur); } catch (Error & e) { if (fn->addTrace) addErrorTrace(e, pos, "while calling the '%1%' builtin", fn->name); @@ -1716,6 +1760,7 @@ void EvalState::callFunction(Value & fun, std::span args, Value & vRes, heap-allocate a copy and use that instead. */ Value * args2[] = {allocValue(), args[0]}; *args2[0] = vCur; + vCur.reset(); try { callFunction(*functor->value, args2, vCur, functor->pos); } catch (Error & e) { @@ -1903,8 +1948,12 @@ void ExprOpImpl::eval(EvalState & state, Env & env, Value & v) || state.evalBool(env, e2, pos, "in the right operand of the IMPL (->) operator")); } -void ExprOpUpdate::eval(EvalState & state, Value & v, Value & v1, Value & v2) +void ExprOpUpdate::eval(EvalState & state, Env & env, Value & v) { + Value v1, v2; + state.evalAttrs(env, e1, v1, pos, "in the left operand of the update (//) operator"); + state.evalAttrs(env, e2, v2, pos, "in the right operand of the update (//) operator"); + state.nrOpUpdates++; const Bindings & bindings1 = *v1.attrs(); @@ -1978,38 +2027,6 @@ void ExprOpUpdate::eval(EvalState & state, Value & v, Value & v1, Value & v2) state.nrOpUpdateValuesCopied += v.attrs()->size(); } -void ExprOpUpdate::eval(EvalState & state, Env & env, Value & v) -{ - UpdateQueue q; - evalForUpdate(state, env, q); - - v.mkAttrs(&Bindings::emptyBindings); - for (auto & rhs : std::views::reverse(q)) { - /* Remember that queue is sorted rightmost attrset first. */ - eval(state, /*v=*/v, /*v1=*/v, /*v2=*/rhs); - } -} - -void Expr::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx) -{ - Value v; - state.evalAttrs(env, this, v, getPos(), errorCtx); - q.push_back(v); -} - -void ExprOpUpdate::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q) -{ - /* Output rightmost attrset first to the merge queue as the one - with the most priority. */ - e2->evalForUpdate(state, env, q, "in the right operand of the update (//) operator"); - e1->evalForUpdate(state, env, q, "in the left operand of the update (//) operator"); -} - -void ExprOpUpdate::evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx) -{ - evalForUpdate(state, env, q); -} - void ExprOpConcatLists::eval(EvalState & state, Env & env, Value & v) { Value v1; @@ -2129,7 +2146,7 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v) } else if (firstType == nFloat) { v.mkFloat(nf); } else if (firstType == nPath) { - if (!context.empty()) + if (hasContext(context)) state.error("a string that refers to a store path cannot be appended to a path") .atPos(pos) .withFrame(env, *this) @@ -2157,16 +2174,6 @@ void ExprPos::eval(EvalState & state, Env & env, Value & v) state.mkPos(v, pos); } -void ExprBlackHole::eval(EvalState & state, [[maybe_unused]] Env & env, Value & v) -{ - throwInfiniteRecursionError(state, v); -} - -[[gnu::noinline]] [[noreturn]] void ExprBlackHole::throwInfiniteRecursionError(EvalState & state, Value & v) -{ - state.error("infinite recursion encountered").atPos(v.determinePos(noPos)).debugThrow(); -} - // always force this to be separate, otherwise forceValue may inline it and take // a massive perf hit [[gnu::noinline]] @@ -2199,6 +2206,7 @@ void EvalState::forceValueDeep(Value & v) for (auto & i : *v.attrs()) try { // If the value is a thunk, we're evaling. Otherwise no trace necessary. + // FIXME: race, thunk might be updated by another thread auto dts = state.debugRepl && i.value->isThunk() ? makeDebugTraceStacker( state, *i.value->thunk().expr, @@ -2351,12 +2359,15 @@ std::string_view EvalState::forceStringNoCtx(Value & v, const PosIdx pos, std::s { auto s = forceString(v, pos, errorCtx); if (v.context()) { - error( - "the string '%1%' is not allowed to refer to a store path (such as '%2%')", - v.string_view(), - (*v.context()->begin())->view()) - .withTrace(pos, errorCtx) - .debugThrow(); + NixStringContext context; + copyContext(v, context); + if (hasContext(context)) + error( + "the string '%1%' is not allowed to refer to a store path (such as '%2%')", + v.string_view(), + (*v.context()->begin())->view()) + .withTrace(pos, errorCtx) + .debugThrow(); } return s; } @@ -2411,14 +2422,21 @@ BackedStringView EvalState::coerceToString( } if (v.type() == nPath) { + // FIXME: instead of copying the path to the store, we could + // return a virtual store path that lazily copies the path to + // the store in devirtualize(). if (!canonicalizePath && !copyToStore) { // FIXME: hack to preserve path literals that end in a // slash, as in /foo/${x}. return v.pathStrView(); } else if (copyToStore) { - return store->printStorePath(copyPathToStore(context, v.path())); + return store->printStorePath(copyPathToStore(context, v.path(), v.determinePos(pos))); } else { - return std::string{v.path().path.abs()}; + auto path = v.path(); + if (path.accessor == rootFS && store->isInStore(path.path.abs())) { + context.insert(NixStringContextElem::Path{.storePath = store->toStorePath(path.path.abs()).first}); + } + return std::string(path.path.abs()); } } @@ -2490,7 +2508,7 @@ BackedStringView EvalState::coerceToString( .debugThrow(); } -StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePath & path) +StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePath & path, PosIdx pos) { if (nix::isDerivation(path.path.abs())) error("file names are not allowed to end in '%1%'", drvExtension).debugThrow(); @@ -2503,7 +2521,7 @@ StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePat *store, path.resolveSymlinks(SymlinkResolution::Ancestors), settings.readOnlyMode ? FetchMode::DryRun : FetchMode::Copy, - path.baseName(), + computeBaseName(path, pos), ContentAddressMethod::Raw::NixArchive, nullptr, repair); @@ -2555,7 +2573,9 @@ EvalState::coerceToStorePath(const PosIdx pos, Value & v, NixStringContext & con auto path = coerceToString(pos, v, context, errorCtx, false, false, true).toOwned(); if (auto storePath = store->maybeParseStorePath(path)) return *storePath; - error("path '%1%' is not in the Nix store", path).withTrace(pos, errorCtx).debugThrow(); + error("cannot coerce '%s' to a store path because it is not a subpath of the Nix store", path) + .withTrace(pos, errorCtx) + .debugThrow(); } std::pair EvalState::coerceToSingleDerivedPathUnchecked( @@ -2579,6 +2599,9 @@ std::pair EvalState::coerceToSingleDerivedP .debugThrow(); }, [&](NixStringContextElem::Built && b) -> SingleDerivedPath { return std::move(b); }, + [&](NixStringContextElem::Path && p) -> SingleDerivedPath { + error("string '%s' has no context", s).withTrace(pos, errorCtx).debugThrow(); + }, }, ((NixStringContextElem &&) *context.begin()).raw); return { @@ -2813,8 +2836,11 @@ void EvalState::assertEqValues(Value & v1, Value & v2, const PosIdx pos, std::st } return; - case nThunk: // Must not be left by forceValue - assert(false); + // Cannot be returned by forceValue(). + case nThunk: + case nFailed: + unreachable(); + default: // Note that we pass compiler flags that should make `default:` unreachable. // Also note that this probably ran after `eqValues`, which implements // the same logic more efficiently (without having to unwind stacks), @@ -2906,8 +2932,11 @@ bool EvalState::eqValues(Value & v1, Value & v2, const PosIdx pos, std::string_v // !!! return v1.fpoint() == v2.fpoint(); - case nThunk: // Must not be left by forceValue - assert(false); + // Cannot be returned by forceValue(). + case nThunk: + case nFailed: + unreachable(); + default: // Note that we pass compiler flags that should make `default:` unreachable. error("eqValues: cannot compare %1% with %2%", showType(v1), showType(v2)) .withTrace(pos, errorCtx) @@ -3013,6 +3042,11 @@ void EvalState::printStatistics() topObj["nrOpUpdates"] = nrOpUpdates.load(); topObj["nrOpUpdateValuesCopied"] = nrOpUpdateValuesCopied.load(); topObj["nrThunks"] = nrThunks.load(); + topObj["nrThunksAwaited"] = nrThunksAwaited.load(); + topObj["nrThunksAwaitedSlow"] = nrThunksAwaitedSlow.load(); + topObj["nrSpuriousWakeups"] = nrSpuriousWakeups.load(); + topObj["maxWaiting"] = maxWaiting.load(); + topObj["waitingTime"] = microsecondsWaiting / (double) 1000000; topObj["nrAvoided"] = nrAvoided.load(); topObj["nrLookups"] = nrLookups.load(); topObj["nrPrimOpCalls"] = nrPrimOpCalls.load(); @@ -3064,10 +3098,10 @@ void EvalState::printStatistics() } if (getEnv("NIX_SHOW_SYMBOLS").value_or("0") != "0") { + auto list = json::array(); + symbols.dump([&](std::string_view s) { list.emplace_back(std::string(s)); }); // XXX: overrides earlier assignment - topObj["symbols"] = json::array(); - auto & list = topObj["symbols"]; - symbols.dump([&](std::string_view s) { list.emplace_back(s); }); + topObj["symbols"] = std::move(list); } if (outPath == "-") { std::cerr << topObj.dump(2) << std::endl; @@ -3254,11 +3288,11 @@ Expr * EvalState::parse( const std::shared_ptr & staticEnv) { DocCommentMap tmpDocComments; // Only used when not origin is not a SourcePath - DocCommentMap * docComments = &tmpDocComments; + auto * docComments = &tmpDocComments; if (auto sourcePath = std::get_if(&origin)) { - auto [it, _] = positionToDocComment.try_emplace(*sourcePath); - docComments = &it->second; + auto [it, _] = positionToDocComment.lock()->try_emplace(*sourcePath, make_ref()); + docComments = &*it->second; } auto result = @@ -3276,12 +3310,14 @@ DocComment EvalState::getDocCommentForPos(PosIdx pos) if (!path) return {}; - auto table = positionToDocComment.find(*path); - if (table == positionToDocComment.end()) + auto positionToDocComment_ = positionToDocComment.readLock(); + + auto table = positionToDocComment_->find(*path); + if (table == positionToDocComment_->end()) return {}; - auto it = table->second.find(pos); - if (it == table->second.end()) + auto it = table->second->find(pos); + if (it == table->second->end()) return {}; return it->second; } @@ -3315,4 +3351,24 @@ void forceNoNullByte(std::string_view s, std::function pos) } } +void EvalState::waitForPath(const StorePath & path) +{ + asyncPathWriter->waitForPath(path); +} + +void EvalState::waitForPath(const SingleDerivedPath & path) +{ + std::visit( + overloaded{ + [&](const DerivedPathOpaque & p) { waitForPath(p.path); }, + [&](const SingleDerivedPathBuilt & p) { waitForPath(*p.drvPath); }, + }, + path.raw()); +} + +void EvalState::waitForAllPaths() +{ + asyncPathWriter->waitForAllPaths(); +} + } // namespace nix diff --git a/src/libexpr/include/nix/expr/attr-path.hh b/src/libexpr/include/nix/expr/attr-path.hh index fd48705b8b7b..25384f5c4c87 100644 --- a/src/libexpr/include/nix/expr/attr-path.hh +++ b/src/libexpr/include/nix/expr/attr-path.hh @@ -25,6 +25,8 @@ struct AttrPath : std::vector static AttrPath parse(EvalState & state, std::string_view s); + static AttrPath fromStrings(EvalState & state, const std::vector & attrNames); + std::string to_string(EvalState & state) const; std::vector resolve(EvalState & state) const; diff --git a/src/libexpr/include/nix/expr/eval-cache.hh b/src/libexpr/include/nix/expr/eval-cache.hh index 6d82f8c7e35a..208eff52b44f 100644 --- a/src/libexpr/include/nix/expr/eval-cache.hh +++ b/src/libexpr/include/nix/expr/eval-cache.hh @@ -35,7 +35,13 @@ class EvalCache : public std::enable_shared_from_this friend struct CachedEvalError; std::shared_ptr db; + +public: EvalState & state; + + std::function cleanupAttrPath = [](AttrPath && attrPath) { return std::move(attrPath); }; + +private: typedef std::function RootLoader; RootLoader rootLoader; RootValue value; @@ -99,7 +105,10 @@ class AttrCursor : public std::enable_shared_from_this friend class EvalCache; friend struct CachedEvalError; +public: ref root; + +private: using Parent = std::optional, Symbol>>; Parent parent; RootValue _value; @@ -127,8 +136,12 @@ public: AttrPath getAttrPath() const; + AttrPath getAttrPathRaw() const; + AttrPath getAttrPath(Symbol name) const; + AttrPath getAttrPathRaw(Symbol name) const; + std::string getAttrPathStr() const; std::string getAttrPathStr(Symbol name) const; diff --git a/src/libexpr/include/nix/expr/eval-inline.hh b/src/libexpr/include/nix/expr/eval-inline.hh index e8aa380fdb05..35b549261573 100644 --- a/src/libexpr/include/nix/expr/eval-inline.hh +++ b/src/libexpr/include/nix/expr/eval-inline.hh @@ -33,6 +33,9 @@ Value * EvalMemory::allocValue() GC_malloc_many returns a linked list of objects of the given size, where the first word of each object is also the pointer to the next object in the list. This also means that we have to explicitly clear the first word of every object we take. */ + thread_local static std::shared_ptr valueAllocCache{ + std::allocate_shared(traceable_allocator(), nullptr)}; + if (!*valueAllocCache) { *valueAllocCache = GC_malloc_many(sizeof(Value)); if (!*valueAllocCache) @@ -63,6 +66,9 @@ Env & EvalMemory::allocEnv(size_t size) #if NIX_USE_BOEHMGC if (size == 1) { /* see allocValue for explanations. */ + thread_local static std::shared_ptr env1AllocCache{ + std::allocate_shared(traceable_allocator(), nullptr)}; + if (!*env1AllocCache) { *env1AllocCache = GC_malloc_many(sizeof(Env) + sizeof(Value *)); if (!*env1AllocCache) @@ -82,27 +88,68 @@ Env & EvalMemory::allocEnv(size_t size) return *env; } -[[gnu::always_inline]] -void EvalState::forceValue(Value & v, const PosIdx pos) +/** + * An identifier of the current thread for deadlock detection, stored + * in p0 of pending/awaited thunks. We're not using std::thread::id + * because it's not guaranteed to fit. + */ +extern thread_local uint32_t myEvalThreadId; + +template +void ValueStorage>>::force( + EvalState & state, PosIdx pos) { - if (v.isThunk()) { - Env * env = v.thunk().env; - assert(env || v.isBlackhole()); - Expr * expr = v.thunk().expr; + auto p0_ = p0.load(std::memory_order_acquire); + + auto pd = static_cast(p0_ & discriminatorMask); + + if (pd == pdThunk) { try { - v.mkBlackhole(); - // checkInterrupt(); - if (env) [[likely]] - expr->eval(*this, *env, v); - else - ExprBlackHole::throwInfiniteRecursionError(*this, v); + // The value we get here is only valid if we can set the + // thunk to pending. + auto p1_ = p1; + + // Atomically set the thunk to "pending". + if (!p0.compare_exchange_strong( + p0_, + pdPending | (myEvalThreadId << discriminatorBits), + std::memory_order_acquire, + std::memory_order_acquire)) { + pd = static_cast(p0_ & discriminatorMask); + if (pd == pdPending || pd == pdAwaited) { + // The thunk is already "pending" or "awaited", so + // we need to wait for it. + p0_ = waitOnThunk(state, p0_); + goto done; + } + assert(pd != pdThunk); + // Another thread finished this thunk, no need to wait. + goto done; + } + + bool isApp = p1_ & discriminatorMask; + if (isApp) { + auto left = untagPointer(p0_); + auto right = untagPointer(p1_); + state.callFunction(*left, *right, (Value &) *this, pos); + } else { + auto env = untagPointer(p0_); + auto expr = untagPointer(p1_); + expr->eval(state, *env, (Value &) *this); + } } catch (...) { - v.mkThunk(env, expr); - tryFixupBlackHolePos(v, pos); + state.tryFixupBlackHolePos((Value &) *this, pos); + setStorage(new Value::Failed{.ex = std::current_exception()}); throw; } - } else if (v.isApp()) - callFunction(*v.app().left, *v.app().right, v, pos); + } + + else if (pd == pdPending || pd == pdAwaited) + p0_ = waitOnThunk(state, p0_); + +done: + if (InternalType(p0_ & 0xff) == tFailed) + std::rethrow_exception((std::bit_cast(p1))->ex); } [[gnu::always_inline]] diff --git a/src/libexpr/include/nix/expr/eval-settings.hh b/src/libexpr/include/nix/expr/eval-settings.hh index 5dbef9272b9f..f367541ec2f6 100644 --- a/src/libexpr/include/nix/expr/eval-settings.hh +++ b/src/libexpr/include/nix/expr/eval-settings.hh @@ -91,7 +91,7 @@ struct EvalSettings : Config - `$HOME/.nix-defexpr/channels` - The [user channel link](@docroot@/command-ref/files/default-nix-expression.md#user-channel-link), pointing to the current state of [channels](@docroot@/command-ref/files/channels.md) for the current user. + The user channel link pointing to the current state of channels for the current user. - `nixpkgs=$NIX_STATE_DIR/profiles/per-user/root/channels/nixpkgs` @@ -101,7 +101,7 @@ struct EvalSettings : Config The current state of all channels for the `root` user. - These files are set up by the [Nix installer](@docroot@/installation/installing-binary.md). + These files are set up by the Nix installer. See [`NIX_STATE_DIR`](@docroot@/command-ref/env-common.md#env-NIX_STATE_DIR) for details on the environment variable. > **Note** @@ -142,7 +142,7 @@ struct EvalSettings : Config R"( If set to `true`, the Nix evaluator doesn't allow access to any files outside of - [`builtins.nixPath`](@docroot@/language/builtins.md#builtins-nixPath), + [`builtins.nixPath`](@docroot@/language/builtins.md#builtins-nixPath) or to URIs outside of [`allowed-uris`](@docroot@/command-ref/conf-file.md#conf-allowed-uris). )"}; @@ -271,7 +271,7 @@ struct EvalSettings : Config "ignore-try", R"( If set to true, ignore exceptions inside 'tryEval' calls when evaluating Nix expressions in - debug mode (using the --debugger flag). By default the debugger pauses on all exceptions. + debug mode (using the --debugger flag). By default, the debugger pauses on all exceptions. )"}; Setting traceVerbose{ @@ -289,7 +289,7 @@ struct EvalSettings : Config "debugger-on-trace", R"( If set to true and the `--debugger` flag is given, the following functions - enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break): + enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break). * [`builtins.trace`](@docroot@/language/builtins.md#builtins-trace) * [`builtins.traceVerbose`](@docroot@/language/builtins.md#builtins-traceVerbose) @@ -305,7 +305,7 @@ struct EvalSettings : Config "debugger-on-warn", R"( If set to true and the `--debugger` flag is given, [`builtins.warn`](@docroot@/language/builtins.md#builtins-warn) - will enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break). + enter the debugger like [`builtins.break`](@docroot@/language/builtins.md#builtins-break). This is useful for debugging warnings in third-party Nix code. @@ -319,7 +319,7 @@ struct EvalSettings : Config R"( If set to true, [`builtins.warn`](@docroot@/language/builtins.md#builtins-warn) throws an error when logging a warning. - This will give you a stack trace that leads to the location of the warning. + This gives you a stack trace that leads to the location of the warning. This is useful for finding information about warnings in third-party Nix code when you can not start the interactive debugger, such as when Nix is called from a non-interactive script. See [`debugger-on-warn`](#conf-debugger-on-warn). @@ -361,6 +361,44 @@ struct EvalSettings : Config The default value is chosen to balance performance and memory usage. On 32 bit systems where memory is scarce, the default is a large value to reduce the amount of allocations. )"}; + + Setting lazyTrees{ + this, + false, + "lazy-trees", + R"( + If set to true, flakes and trees fetched by [`builtins.fetchTree`](@docroot@/language/builtins.md#builtins-fetchTree) are only copied to the Nix store when they're used as a dependency of a derivation. This avoids copying (potentially large) source trees unnecessarily. + )"}; + + // FIXME: this setting should really be in libflake, but it's + // currently needed in mountInput(). + Setting lazyLocks{ + this, + false, + "lazy-locks", + R"( + If enabled, Nix only includes NAR hashes in lock file entries if they're necessary to lock the input (i.e. when there is no other attribute that allows the content to be verified, like a Git revision). + This is not backward compatible with older versions of Nix. + If disabled, lock file entries always contain a NAR hash. + )"}; + + Setting evalCores{ + this, + 1, + "eval-cores", + R"( + The number of threads used to evaluate Nix expressions. This currently affects the following commands: + + * `nix search` + * `nix flake check` + * `nix flake show` + * `nix eval --json` + * Any evaluation that uses `builtins.parallel` + + The value `0` causes Nix to use all available CPU cores in the system. + + Note that enabling the debugger (`--debugger`) disables multi-threaded evaluation. + )"}; }; /** @@ -368,4 +406,10 @@ struct EvalSettings : Config */ std::filesystem::path getNixDefExpr(); +/** + * Stack size for evaluator threads. This used to be 64 MiB, but macOS as deployed on GitHub Actions has a + * hard limit slightly under that, so we round it down a bit. + */ +constexpr size_t evalStackSize = 60 * 1024 * 1024; + } // namespace nix diff --git a/src/libexpr/include/nix/expr/eval.hh b/src/libexpr/include/nix/expr/eval.hh index b70c9db789d5..852ad0f7bcc9 100644 --- a/src/libexpr/include/nix/expr/eval.hh +++ b/src/libexpr/include/nix/expr/eval.hh @@ -51,6 +51,9 @@ struct SingleDerivedPath; enum RepairFlag : bool; struct MemorySourceAccessor; struct MountedSourceAccessor; +struct AsyncPathWriter; +struct Provenance; +struct Executor; namespace eval_cache { class EvalCache; @@ -225,7 +228,7 @@ struct StaticEvalSymbols line, column, functor, toString, right, wrong, structuredAttrs, json, allowedReferences, allowedRequisites, disallowedReferences, disallowedRequisites, maxSize, maxClosureSize, builder, args, contentAddressed, impure, outputHash, outputHashAlgo, outputHashMode, recurseForDerivations, description, self, epsilon, startSet, - operator_, key, path, prefix, outputSpecified; + operator_, key, path, prefix, outputSpecified, __meta; Expr::AstSymbols exprSymbols; @@ -278,6 +281,7 @@ struct StaticEvalSymbols .path = alloc.create("path"), .prefix = alloc.create("prefix"), .outputSpecified = alloc.create("outputSpecified"), + .__meta = alloc.create("__meta"), .exprSymbols = { .sub = alloc.create("__sub"), .lessThan = alloc.create("__lessThan"), @@ -432,6 +436,8 @@ public: std::list debugTraces; boost::unordered_flat_map> exprEnvs; + ref asyncPathWriter; + const std::shared_ptr getStaticEnv(const Expr & expr) const { auto i = exprEnvs.find(&expr); @@ -496,10 +502,11 @@ private: * Associate source positions of certain AST nodes with their preceding doc comment, if they have one. * Grouped by file. */ - boost::unordered_flat_map positionToDocComment; + SharedSync>> positionToDocComment; LookupPath lookupPath; + // FIXME: make thread-safe. boost::unordered_flat_map, StringViewHash, std::equal_to<>> lookupPathResolved; @@ -588,7 +595,12 @@ public: /** * Mount an input on the Nix store. */ - StorePath mountInput(fetchers::Input & input, const fetchers::Input & originalInput, ref accessor); + StorePath mountInput( + fetchers::Input & input, + const fetchers::Input & originalInput, + ref accessor, + bool requireLockable, + bool forceNarHash = false); /** * Parse a Nix expression from the specified file. @@ -650,7 +662,10 @@ public: * application, call the function and overwrite `v` with the * result. Otherwise, this is a no-op. */ - inline void forceValue(Value & v, const PosIdx pos); + inline void forceValue(Value & v, const PosIdx pos) + { + v.force(*this, pos); + } void tryFixupBlackHolePos(Value & v, PosIdx pos); @@ -708,6 +723,12 @@ public: std::optional tryAttrsToString( const PosIdx pos, Value & v, NixStringContext & context, bool coerceMore = false, bool copyToStore = true); + StorePath devirtualize(const StorePath & path, StringMap * rewrites = nullptr); + + SingleDerivedPath devirtualize(const SingleDerivedPath & path, StringMap * rewrites = nullptr); + + std::string devirtualize(std::string_view s, const NixStringContext & context); + /** * String coercion. * @@ -725,7 +746,19 @@ public: bool copyToStore = true, bool canonicalizePath = true); - StorePath copyPathToStore(NixStringContext & context, const SourcePath & path); + StorePath copyPathToStore(NixStringContext & context, const SourcePath & path, PosIdx pos); + + /** + * Compute the base name for a `SourcePath`. For non-store paths, + * this is just `SourcePath::baseName()`. But for store paths, for + * backwards compatibility, it needs to be `-source`, + * i.e. as if the path were copied to the Nix store. This results + * in a "double-copied" store path like + * `/nix/store/--source`. We don't need to + * materialize /nix/store/-source though. Still, this + * requires reading/hashing the path twice. + */ + std::string computeBaseName(const SourcePath & path, PosIdx pos); /** * Path coercion. @@ -868,10 +901,11 @@ private: const std::shared_ptr & staticEnv); /** - * Current Nix call stack depth, used with `max-call-depth` setting to throw stack overflow hopefully before we run - * out of system stack. + * Current Nix call stack depth, used with `max-call-depth` + * setting to throw stack overflow hopefully before we run out of + * system stack. */ - size_t callDepth = 0; + thread_local static size_t callDepth; public: @@ -1002,6 +1036,12 @@ public: [[nodiscard]] StringMap realiseContext(const NixStringContext & context, StorePathSet * maybePaths = nullptr, bool isIFD = true); + /** + * Coerce `v` to a path and realise it, i.e. build anything in the value's string context using `realiseContext()`. + */ + SourcePath realisePath( + const PosIdx pos, Value & v, std::optional resolveSymlinks = SymlinkResolution::Full); + /** * Realise the given string with context, and return the string with outputs instead of downstream output * placeholders. @@ -1018,6 +1058,10 @@ public: DocComment getDocCommentForPos(PosIdx pos); + void waitForPath(const StorePath & path); + void waitForPath(const SingleDerivedPath & path); + void waitForAllPaths(); + private: /** @@ -1043,8 +1087,18 @@ private: Counter nrPrimOpCalls; Counter nrFunctionCalls; +public: + Counter nrThunksAwaited; + Counter nrThunksAwaitedSlow; + Counter microsecondsWaiting; + Counter currentlyWaiting; + Counter maxWaiting; + Counter nrSpuriousWakeups; + +private: bool countCalls; + // FIXME: make thread-safe. typedef boost::unordered_flat_map> PrimOpCalls; PrimOpCalls primOpCalls; @@ -1056,6 +1110,7 @@ private: void incrFunctionCall(ExprLambda * fun); + // FIXME: make thread-safe. typedef boost::unordered_flat_map> AttrSelects; AttrSelects attrSelects; @@ -1073,6 +1128,56 @@ private: friend struct Value; friend class ListBuilder; + +public: + + /** + * Per-thread evaluation context. This context is propagated to worker threads when a value is evaluated + * asynchronously. + */ + struct EvalContext + { + std::shared_ptr provenance; + }; + + thread_local static EvalContext evalContext; + + /** + * Create a work item that propagates the current evaluation context. + */ + template + auto makeWork(T && t) + { + return [this, t{std::move(t)}, evalContext(evalContext)]() { + this->evalContext = evalContext; + t(); + }; + } + + /** + * Add a work item to the given work vector that propagates the current evaluation context. + */ + template + void addWork(WorkItems & work, uint8_t priority, T && t) + { + work.emplace_back(makeWork(std::move(t)), priority); + } + + template + void spawn(FuturesVector & futures, uint8_t priority, T && t) + { + futures.spawn(priority, makeWork(std::move(t))); + } + + /** + * Worker threads manager. + * + * Note: keep this last to ensure that it's destroyed first, so we + * don't have any background work items (e.g. from + * `builtins.parallel`) referring to a partially destroyed + * `EvalState`. + */ + ref executor; }; struct DebugTraceStacker @@ -1109,6 +1214,24 @@ SourcePath resolveExprPath(SourcePath path, bool addDefaultNix = true); */ bool isAllowedURI(std::string_view uri, const Strings & allowedPaths); +struct PushProvenance +{ + EvalState & state; + std::shared_ptr prev; + + PushProvenance(EvalState & state, std::shared_ptr prov) + : state(state) + { + state.evalContext.provenance.swap(prev); + state.evalContext.provenance.swap(prov); + } + + ~PushProvenance() + { + state.evalContext.provenance.swap(prev); + } +}; + } // namespace nix #include "nix/expr/eval-inline.hh" diff --git a/src/libexpr/include/nix/expr/meson.build b/src/libexpr/include/nix/expr/meson.build index 2b0fbc406030..5c707ed4bffe 100644 --- a/src/libexpr/include/nix/expr/meson.build +++ b/src/libexpr/include/nix/expr/meson.build @@ -24,11 +24,13 @@ headers = [ config_pub_h ] + files( 'get-drvs.hh', 'json-to-value.hh', 'nixexpr.hh', + 'parallel-eval.hh', 'parser-state.hh', 'primops.hh', 'print-ambiguous.hh', 'print-options.hh', 'print.hh', + 'provenance.hh', 'repl-exit-status.hh', 'search-path.hh', 'static-string-data.hh', diff --git a/src/libexpr/include/nix/expr/nixexpr.hh b/src/libexpr/include/nix/expr/nixexpr.hh index df39ecdde913..9bce1a9b91ad 100644 --- a/src/libexpr/include/nix/expr/nixexpr.hh +++ b/src/libexpr/include/nix/expr/nixexpr.hh @@ -8,7 +8,6 @@ #include #include -#include "nix/expr/gc-small-vector.hh" #include "nix/expr/value.hh" #include "nix/expr/symbol-table.hh" #include "nix/expr/eval-error.hh" @@ -91,8 +90,6 @@ typedef std::vector AttrSelectionPath; std::string showAttrSelectionPath(const SymbolTable & symbols, std::span attrPath); -using UpdateQueue = SmallTemporaryValueVector; - /* Abstract syntax of Nix expressions. */ struct Expr @@ -123,14 +120,6 @@ struct Expr * of thunks allocated. */ virtual Value * maybeThunk(EvalState & state, Env & env); - - /** - * Only called when performing an attrset update: `//` or similar. - * Instead of writing to a Value &, this function writes to an UpdateQueue. - * This allows the expression to perform multiple updates in a delayed manner, gathering up all the updates before - * applying them. - */ - virtual void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx); virtual void setName(Symbol name); virtual void setDocComment(DocComment docComment) {}; @@ -738,7 +727,7 @@ struct ExprOpNot : Expr struct name : Expr \ { \ MakeBinOpMembers(name, s) \ - } + }; MakeBinOp(ExprOpEq, "=="); MakeBinOp(ExprOpNEq, "!="); @@ -749,14 +738,7 @@ MakeBinOp(ExprOpConcatLists, "++"); struct ExprOpUpdate : Expr { -private: - /** Special case for merging of two attrsets. */ - void eval(EvalState & state, Value & v, Value & v1, Value & v2); - void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q); - -public: - MakeBinOpMembers(ExprOpUpdate, "//"); - virtual void evalForUpdate(EvalState & state, Env & env, UpdateQueue & q, std::string_view errorCtx) override; + MakeBinOpMembers(ExprOpUpdate, "//") }; struct ExprConcatStrings : Expr @@ -811,23 +793,11 @@ struct ExprPos : Expr COMMON_METHODS }; -/* only used to mark thunks as black holes. */ -struct ExprBlackHole : Expr -{ - void show(const SymbolTable & symbols, std::ostream & str) const override {} - - void eval(EvalState & state, Env & env, Value & v) override; - - void bindVars(EvalState & es, const std::shared_ptr & env) override {} - - [[noreturn]] static void throwInfiniteRecursionError(EvalState & state, Value & v); -}; - -extern ExprBlackHole eBlackHole; - class Exprs { - std::pmr::monotonic_buffer_resource buffer; + // FIXME: use std::pmr::monotonic_buffer_resource when parallel + // eval is disabled? + std::pmr::synchronized_pool_resource buffer; public: std::pmr::polymorphic_allocator alloc{&buffer}; diff --git a/src/libexpr/include/nix/expr/parallel-eval.hh b/src/libexpr/include/nix/expr/parallel-eval.hh new file mode 100644 index 000000000000..27d002e69ca2 --- /dev/null +++ b/src/libexpr/include/nix/expr/parallel-eval.hh @@ -0,0 +1,92 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include "nix/util/sync.hh" +#include "nix/util/logging.hh" +#include "nix/util/environment-variables.hh" +#include "nix/util/util.hh" +#include "nix/util/signals.hh" + +#if NIX_USE_BOEHMGC +# include +#endif + +namespace nix { + +struct Executor +{ + using work_t = std::function; + + struct Item + { + std::promise promise; + work_t work; + }; + + struct State + { + std::multimap queue; + std::vector threads; + }; + + std::atomic_bool quit{false}; + + const unsigned int evalCores; + + const bool enabled; + + const std::unique_ptr interruptCallback; + + Sync state_; + + std::condition_variable wakeup; + + static unsigned int getEvalCores(const EvalSettings & evalSettings); + + Executor(const EvalSettings & evalSettings); + + ~Executor(); + + void createWorker(State & state); + + void worker(); + + using WorkItems = std::vector>; + + std::vector> spawn(WorkItems && items); + + static thread_local bool amWorkerThread; +}; + +struct FutureVector +{ + Executor & executor; + + struct State + { + std::vector> futures; + }; + + Sync state_; + + ~FutureVector(); + + // FIXME: add a destructor that cancels/waits for all futures. + + void spawn(Executor::WorkItems && work); + + void spawn(uint8_t prioPrefix, Executor::work_t && work) + { + spawn({{std::move(work), prioPrefix}}); + } + + void finishAll(); +}; + +} // namespace nix diff --git a/src/libexpr/include/nix/expr/print-ambiguous.hh b/src/libexpr/include/nix/expr/print-ambiguous.hh index c0d811d4b939..e64f7f9bf8d0 100644 --- a/src/libexpr/include/nix/expr/print-ambiguous.hh +++ b/src/libexpr/include/nix/expr/print-ambiguous.hh @@ -15,7 +15,6 @@ namespace nix { * * See: https://github.com/NixOS/nix/issues/9730 */ -void printAmbiguous( - Value & v, const SymbolTable & symbols, std::ostream & str, std::set * seen, int depth); +void printAmbiguous(EvalState & state, Value & v, std::ostream & str, std::set * seen, int depth); } // namespace nix diff --git a/src/libexpr/include/nix/expr/provenance.hh b/src/libexpr/include/nix/expr/provenance.hh new file mode 100644 index 000000000000..f4cc887a6b2a --- /dev/null +++ b/src/libexpr/include/nix/expr/provenance.hh @@ -0,0 +1,23 @@ +#pragma once + +#include "nix/util/provenance.hh" + +namespace nix { + +/** + * Provenance indicating that this store path was instantiated by the `derivation` builtin function. Its main purpose is + * to record `meta` fields. + */ +struct DerivationProvenance : Provenance +{ + std::shared_ptr next; + ref meta; + + DerivationProvenance(std::shared_ptr next, ref meta) + : next(std::move(next)) + , meta(std::move(meta)) {}; + + nlohmann::json to_json() const override; +}; + +} // namespace nix diff --git a/src/libexpr/include/nix/expr/symbol-table.hh b/src/libexpr/include/nix/expr/symbol-table.hh index f0220376c53f..231510829331 100644 --- a/src/libexpr/include/nix/expr/symbol-table.hh +++ b/src/libexpr/include/nix/expr/symbol-table.hh @@ -2,13 +2,14 @@ ///@file #include + #include "nix/expr/value.hh" -#include "nix/expr/static-string-data.hh" -#include "nix/util/chunked-vector.hh" #include "nix/util/error.hh" +#include "nix/util/sync.hh" +#include "nix/util/alignment.hh" #include -#include +#include namespace nix { @@ -17,17 +18,27 @@ class SymbolValue : protected Value friend class SymbolStr; friend class SymbolTable; - uint32_t idx; - - SymbolValue() = default; - -public: operator std::string_view() const noexcept { return string_view(); } }; +struct ContiguousArena +{ + const char * data; + const size_t maxSize; + + // Put this in a separate cache line to ensure that a thread + // adding a symbol doesn't slow down threads dereferencing symbols + // by invalidating the read-only `data` field. + alignas(64) std::atomic size{0}; + + ContiguousArena(size_t maxSize); + + size_t allocate(size_t bytes); +}; + class StaticSymbolTable; /** @@ -42,6 +53,7 @@ class Symbol friend class StaticSymbolTable; private: + /// The offset of the symbol in `SymbolTable::arena`. uint32_t id; explicit constexpr Symbol(uint32_t id) noexcept @@ -73,6 +85,8 @@ public: constexpr auto operator<=>(const Symbol & other) const noexcept = default; friend class std::hash; + + constexpr static size_t alignment = alignof(SymbolValue); }; /** @@ -84,25 +98,20 @@ class SymbolStr { friend class SymbolTable; - constexpr static size_t chunkSize{8192}; - using SymbolValueStore = ChunkedVector; - const SymbolValue * s; struct Key { using HashType = boost::hash; - SymbolValueStore & store; std::string_view s; std::size_t hash; - std::pmr::memory_resource & resource; + ContiguousArena & arena; - Key(SymbolValueStore & store, std::string_view s, std::pmr::memory_resource & stringMemory) - : store(store) - , s(s) + Key(std::string_view s, ContiguousArena & arena) + : s(s) , hash(HashType{}(s)) - , resource(stringMemory) + , arena(arena) { } }; @@ -113,22 +122,7 @@ public: { } - SymbolStr(const Key & key) - { - auto size = key.s.size(); - if (size >= std::numeric_limits::max()) { - throw Error("Size of symbol exceeds 4GiB and cannot be stored"); - } - // for multi-threaded implementations: lock store and allocator here - const auto & [v, idx] = key.store.add(SymbolValue{}); - if (size == 0) { - v.mkStringNoCopy(""_sds, nullptr); - } else { - v.mkStringNoCopy(StringData::make(key.resource, key.s)); - } - v.idx = idx; - this->s = &v; - } + SymbolStr(const Key & key); bool operator==(std::string_view s2) const noexcept { @@ -157,11 +151,7 @@ public: [[gnu::always_inline]] bool empty() const noexcept { - auto * p = &s->string_data(); - // Save a dereference in the sentinel value case - if (p == &""_sds) - return true; - return p->size() == 0; + return !s->string_data().size(); } [[gnu::always_inline]] @@ -176,11 +166,6 @@ public: return s; } - explicit operator Symbol() const noexcept - { - return Symbol{s->idx + 1}; - } - struct Hash { using is_transparent = void; @@ -218,6 +203,11 @@ public: return operator()(b, a); } }; + + constexpr static size_t computeSize(std::string_view s) + { + return alignUp(sizeof(Value) + sizeof(StringData) + s.size() + 1, Symbol::alignment); + } }; class SymbolTable; @@ -237,6 +227,7 @@ class StaticSymbolTable std::array symbols; std::size_t size = 0; + std::size_t nextId = alignof(SymbolValue); public: constexpr StaticSymbolTable() = default; @@ -245,8 +236,9 @@ public: { /* No need to check bounds because out of bounds access is a compilation error. */ - auto sym = Symbol(size + 1); //< +1 because Symbol with id = 0 is reserved + auto sym = Symbol(nextId); symbols[size++] = {str, sym}; + nextId += SymbolStr::computeSize(str); return sym; } @@ -264,61 +256,67 @@ private: * SymbolTable is an append only data structure. * During its lifetime the monotonic buffer holds all strings and nodes, if the symbol set is node based. */ - std::pmr::monotonic_buffer_resource buffer; - SymbolStr::SymbolValueStore store{16}; + ContiguousArena arena; /** - * Transparent lookup of string view for a pointer to a ChunkedVector entry -> return offset into the store. - * ChunkedVector references are never invalidated. + * Transparent lookup of string view for a pointer to a + * SymbolValue in the arena. */ - boost::unordered_flat_set symbols{SymbolStr::chunkSize}; + boost::concurrent_flat_set symbols; public: SymbolTable(const StaticSymbolTable & staticSymtab) + : arena(1 << 30) { + // Reserve symbol ID 0 and ensure alignment of the first allocation. + arena.allocate(Symbol::alignment); + staticSymtab.copyIntoSymbolTable(*this); } /** * Converts a string into a symbol. */ - Symbol create(std::string_view s) - { - // Most symbols are looked up more than once, so we trade off insertion performance - // for lookup performance. - // FIXME: make this thread-safe. - return Symbol(*symbols.insert(SymbolStr::Key{store, s, buffer}).first); - } + Symbol create(std::string_view s); std::vector resolve(const std::span & symbols) const { std::vector result; result.reserve(symbols.size()); - for (auto sym : symbols) + for (auto & sym : symbols) result.push_back((*this)[sym]); return result; } SymbolStr operator[](Symbol s) const { - uint32_t idx = s.id - uint32_t(1); - if (idx >= store.size()) - unreachable(); - return store[idx]; + assert(s.id); + // Note: we don't check arena.size here to avoid a dependency + // on other threads creating new symbols. + return SymbolStr(*reinterpret_cast(arena.data + s.id)); } - [[gnu::always_inline]] size_t size() const noexcept { - return store.size(); + return symbols.size(); } - size_t totalSize() const; + size_t totalSize() const + { + return arena.size; + } template void dump(T callback) const { - store.forEach(callback); + std::string_view left{arena.data, arena.size}; + left = left.substr(Symbol::alignment); + while (!left.empty()) { + auto v = reinterpret_cast(left.data()); + callback(v->string_view()); + left = left.substr( + alignUp(sizeof(SymbolValue) + sizeof(StringData) + v->string_view().size() + 1, Symbol::alignment)); + } } }; diff --git a/src/libexpr/include/nix/expr/value.hh b/src/libexpr/include/nix/expr/value.hh index 004dcc43f0f4..10893347bd6e 100644 --- a/src/libexpr/include/nix/expr/value.hh +++ b/src/libexpr/include/nix/expr/value.hh @@ -1,6 +1,7 @@ #pragma once ///@file +#include #include #include #include @@ -26,6 +27,19 @@ namespace nix { struct Value; class BindingsBuilder; +static constexpr int discriminatorBits = 3; + +enum PrimaryDiscriminator : int { + pdSingleDWord = 0, + pdThunk = 1, + pdPending = 2, + pdAwaited = 3, + pdPairOfPointers = 4, + pdListN = 5, // FIXME: get rid of this by putting the size in the first word + pdString = 6, + pdPath = 7, // FIXME: get rid of this by ditching the `accessor` field +}; + /** * Internal type discriminator, which is more detailed than `ValueType`, as * it specifies the exact representation used (for types that have multiple @@ -36,27 +50,50 @@ class BindingsBuilder; * This also restricts the number of internal types represented with distinct memory layouts. */ typedef enum { - tUninitialized = 0, - /* layout: Single/zero field payload */ - tInt = 1, - tBool, - tNull, - tFloat, - tExternal, - tPrimOp, - tAttrs, - /* layout: Pair of pointers payload */ - tListSmall, - tPrimOpApp, - tApp, - tThunk, - tLambda, - /* layout: Single untaggable field */ - tListN, - tString, - tPath, + /* Values that have more type bits in the first word, and the + payload (a single word) in the second word. */ + tUninitialized = PrimaryDiscriminator::pdSingleDWord | (0 << discriminatorBits), + tInt = PrimaryDiscriminator::pdSingleDWord | (1 << discriminatorBits), + tFloat = PrimaryDiscriminator::pdSingleDWord | (2 << discriminatorBits), + tBool = PrimaryDiscriminator::pdSingleDWord | (3 << discriminatorBits), + tNull = PrimaryDiscriminator::pdSingleDWord | (4 << discriminatorBits), + tAttrs = PrimaryDiscriminator::pdSingleDWord | (5 << discriminatorBits), + tPrimOp = PrimaryDiscriminator::pdSingleDWord | (6 << discriminatorBits), + tFailed = PrimaryDiscriminator::pdSingleDWord | (7 << discriminatorBits), + tExternal = PrimaryDiscriminator::pdSingleDWord | (8 << discriminatorBits), + + /* Thunks. */ + tThunk = PrimaryDiscriminator::pdThunk | (0 << discriminatorBits), + tApp = PrimaryDiscriminator::pdThunk | (1 << discriminatorBits), + + tPending = PrimaryDiscriminator::pdPending, + tAwaited = PrimaryDiscriminator::pdAwaited, + + /* Values that consist of two pointers. The second word contains + more type bits in its alignment niche. */ + tListSmall = PrimaryDiscriminator::pdPairOfPointers | (0 << discriminatorBits), + tPrimOpApp = PrimaryDiscriminator::pdPairOfPointers | (1 << discriminatorBits), + tLambda = PrimaryDiscriminator::pdPairOfPointers | (2 << discriminatorBits), + + /* Special values. */ + tListN = PrimaryDiscriminator::pdListN, + tString = PrimaryDiscriminator::pdString, + tPath = PrimaryDiscriminator::pdPath, } InternalType; +/** + * Return true if `type` denotes a "finished" value, i.e. a weak-head + * normal form. + * + * Note that tPrimOpApp is considered "finished" because it represents + * a primop call with an incomplete number of arguments, and therefore + * cannot be evaluated further. + */ +inline bool isFinished(InternalType t) +{ + return t != tUninitialized && t != tThunk && t != tApp && t != tPending && t != tAwaited; +} + /** * This type abstracts over all actual value types in the language, * grouping together implementation details like tList*, different function @@ -64,6 +101,7 @@ typedef enum { */ typedef enum { nThunk, + nFailed, nInt, nFloat, nBool, @@ -80,7 +118,6 @@ class Bindings; struct Env; struct Expr; struct ExprLambda; -struct ExprBlackHole; struct PrimOp; class Symbol; class SymbolStr; @@ -282,7 +319,7 @@ namespace detail { /** * Implementation mixin class for defining the public types - * In can be inherited from by the actual ValueStorage implementations + * In can be inherited by the actual ValueStorage implementations * for free due to Empty Base Class Optimization (EBCO). */ struct ValueBase @@ -417,6 +454,11 @@ struct ValueBase size_t size; Value * const * elems; }; + + struct Failed : gc + { + std::exception_ptr ex; + }; }; template @@ -443,6 +485,7 @@ struct PayloadTypeToInternalType MACRO(PrimOp *, primOp, tPrimOp) \ MACRO(ValueBase::PrimOpApplicationThunk, primOpApp, tPrimOpApp) \ MACRO(ExternalValueBase *, external, tExternal) \ + MACRO(ValueBase::Failed *, failed, tFailed) \ MACRO(NixFloat, fpoint, tFloat) #define NIX_VALUE_PAYLOAD_TYPE(T, FIELD_NAME, DISCRIMINATOR) \ @@ -546,12 +589,44 @@ class alignas(16) ValueStorage::type; - using Payload = std::array; - Payload payload = {}; - static constexpr int discriminatorBits = 3; + /** + * For multithreaded evaluation, we have to make sure that thunks/apps + * (the only mutable types of values) are updated in a safe way. A + * value can have the following states (see `force()`): + * + * * "thunk"/"app". When forced, this value transitions to + * "pending". The current thread will evaluate the + * thunk/app. When done, it will override the value with the + * result. If the value is at that point in the "awaited" state, + * the thread will wake up any waiting threads. + * + * * "pending". This means it's currently being evaluated. If + * another thread forces this value, it transitions to "awaited" + * and the thread will wait for the value to be updated (see + * `waitOnThunk()`). + * + * * "awaited". Like pending, only it means that there already are + * one or more threads waiting for this thunk. + * + * To ensure race-free access, the non-atomic word `p1` must + * always be updated before `p0`. Writes to `p0` should use + * *release* semantics (so that `p1` and any referenced values become + * visible to threads that read `p0`), and reads from `p0` should + * use `*acquire* semantics. + * + * Note: at some point, we may want to switch to 128-bit atomics + * so that `p0` and `p1` can be updated together + * atomically. However, 128-bit atomics are a bit problematic at + * present on x86_64 (see + * e.g. https://ibraheem.ca/posts/128-bit-atomics/). + */ + std::atomic p0{0}; + PackedPointer p1{0}; + static constexpr PackedPointer discriminatorMask = (PackedPointer(1) << discriminatorBits) - 1; + // FIXME: move/update /** * The value is stored as a pair of 8-byte double words. All pointers are assumed * to be 8-byte aligned. This gives us at most 6 bits of discriminator bits @@ -581,15 +656,6 @@ class alignas(16) ValueStorage requires std::is_pointer_v @@ -600,7 +666,7 @@ class alignas(16) ValueStorage(payload[0] & discriminatorMask); + return static_cast(p0 & discriminatorMask); } static void assertAligned(PackedPointer val) noexcept @@ -608,13 +674,30 @@ class alignas(16) ValueStorage(p0_ & discriminatorMask); + if (pd == pdPending) + // Nothing to do; no thread is waiting on this thunk. + ; + else if (pd == pdAwaited) + // Slow path: wake up the threads that are waiting on this + // thunk. + notifyWaiters(); + else if (pd == pdThunk) + unreachable(); + } + template void setSingleDWordPayload(PackedPointer untaggedVal) noexcept { - /* There's plenty of free upper bits in the first dword, which is - used only for the discriminator. */ - payload[0] = static_cast(pdSingleDWord) | (static_cast(type) << discriminatorBits); - payload[1] = untaggedVal; + /* There's plenty of free upper bits in the first byte, which + is used only for the discriminator. */ + finish(static_cast(type), untaggedVal); } template @@ -623,32 +706,42 @@ class alignas(16) ValueStorage= pdListN && discriminator <= pdPath); auto firstFieldPayload = std::bit_cast(firstPtrField); assertAligned(firstFieldPayload); - payload[0] = static_cast(discriminator) | firstFieldPayload; - payload[1] = std::bit_cast(untaggableField); + finish(static_cast(discriminator) | firstFieldPayload, std::bit_cast(untaggableField)); } template void setPairOfPointersPayload(T * firstPtrField, U * secondPtrField) noexcept { static_assert(type >= tListSmall && type <= tLambda); - { - auto firstFieldPayload = std::bit_cast(firstPtrField); - assertAligned(firstFieldPayload); - payload[0] = static_cast(pdPairOfPointers) | firstFieldPayload; - } - { - auto secondFieldPayload = std::bit_cast(secondPtrField); - assertAligned(secondFieldPayload); - payload[1] = (type - tListSmall) | secondFieldPayload; - } + auto firstFieldPayload = std::bit_cast(firstPtrField); + assertAligned(firstFieldPayload); + auto secondFieldPayload = std::bit_cast(secondPtrField); + assertAligned(secondFieldPayload); + finish( + static_cast(pdPairOfPointers) | firstFieldPayload, + ((type - tListSmall) >> discriminatorBits) | secondFieldPayload); + } + + template + void setThunkPayload(T * firstPtrField, U * secondPtrField) noexcept + { + static_assert(type >= tThunk && type <= tApp); + auto secondFieldPayload = std::bit_cast(secondPtrField); + assertAligned(secondFieldPayload); + p1 = ((type - tThunk) >> discriminatorBits) | secondFieldPayload; + auto firstFieldPayload = std::bit_cast(firstPtrField); + assertAligned(firstFieldPayload); + // Note: awaited values can never become a thunk, so no need + // to check for waiters. + p0.store(static_cast(pdThunk) | firstFieldPayload, std::memory_order_release); } template requires std::is_pointer_v && std::is_pointer_v void getPairOfPointersPayload(T & firstPtrField, U & secondPtrField) const noexcept { - firstPtrField = untagPointer(payload[0]); - secondPtrField = untagPointer(payload[1]); + firstPtrField = untagPointer(p0); + secondPtrField = untagPointer(p1); } protected: @@ -656,42 +749,45 @@ protected: InternalType getInternalType() const noexcept { switch (auto pd = getPrimaryDiscriminator()) { - case pdUninitialized: - /* Discriminator value of zero is used to distinguish uninitialized values. */ - return tUninitialized; case pdSingleDWord: - /* Payloads that only use up a single double word store the InternalType - in the upper bits of the first double word. */ - return InternalType(payload[0] >> discriminatorBits); + /* Payloads that only use up a single double word store + the full InternalType in the first byte. */ + return InternalType(p0 & 0xff); + case pdThunk: + return static_cast(tThunk + ((p1 & discriminatorMask) << discriminatorBits)); + case pdPending: + return tPending; + case pdAwaited: + return tAwaited; + case pdPairOfPointers: + return static_cast(tListSmall + ((p1 & discriminatorMask) << discriminatorBits)); /* The order must match that of the enumerations defined in InternalType. */ case pdListN: case pdString: case pdPath: return static_cast(tListN + (pd - pdListN)); - case pdPairOfPointers: - return static_cast(tListSmall + (payload[1] & discriminatorMask)); [[unlikely]] default: unreachable(); } } -#define NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(TYPE, MEMBER_A, MEMBER_B) \ - \ - void getStorage(TYPE & val) const noexcept \ - { \ - getPairOfPointersPayload(val MEMBER_A, val MEMBER_B); \ - } \ - \ - void setStorage(TYPE val) noexcept \ - { \ - setPairOfPointersPayload>(val MEMBER_A, val MEMBER_B); \ +#define NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(TYPE, SET, MEMBER_A, MEMBER_B) \ + \ + void getStorage(TYPE & val) const noexcept \ + { \ + getPairOfPointersPayload(val MEMBER_A, val MEMBER_B); \ + } \ + \ + void setStorage(TYPE val) noexcept \ + { \ + SET>(val MEMBER_A, val MEMBER_B); \ } - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(SmallList, [0], [1]) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(PrimOpApplicationThunk, .left, .right) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(FunctionApplicationThunk, .left, .right) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(ClosureThunk, .env, .expr) - NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(Lambda, .env, .fun) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(SmallList, setPairOfPointersPayload, [0], [1]) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(PrimOpApplicationThunk, setPairOfPointersPayload, .left, .right) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(Lambda, setPairOfPointersPayload, .env, .fun) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(FunctionApplicationThunk, setThunkPayload, .left, .right) + NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS(ClosureThunk, setThunkPayload, .env, .expr) #undef NIX_VALUE_STORAGE_DEF_PAIR_OF_PTRS @@ -699,52 +795,57 @@ protected: { /* PackedPointerType -> int64_t here is well-formed, since the standard requires this conversion to follow 2's complement rules. This is just a no-op. */ - integer = NixInt(payload[1]); + integer = NixInt(p1); } void getStorage(bool & boolean) const noexcept { - boolean = payload[1]; + boolean = p1; } void getStorage(Null & null) const noexcept {} void getStorage(NixFloat & fpoint) const noexcept { - fpoint = std::bit_cast(payload[1]); + fpoint = std::bit_cast(p1); } void getStorage(ExternalValueBase *& external) const noexcept { - external = std::bit_cast(payload[1]); + external = std::bit_cast(p1); } void getStorage(PrimOp *& primOp) const noexcept { - primOp = std::bit_cast(payload[1]); + primOp = std::bit_cast(p1); } void getStorage(Bindings *& attrs) const noexcept { - attrs = std::bit_cast(payload[1]); + attrs = std::bit_cast(p1); } void getStorage(List & list) const noexcept { - list.elems = untagPointer(payload[0]); - list.size = payload[1]; + list.elems = untagPointer(p0); + list.size = p1; } void getStorage(StringWithContext & string) const noexcept { - string.context = untagPointer(payload[0]); - string.str = std::bit_cast(payload[1]); + string.context = untagPointer(p0); + string.str = std::bit_cast(p1); } void getStorage(Path & path) const noexcept { - path.accessor = untagPointer(payload[0]); - path.path = std::bit_cast(payload[1]); + path.accessor = untagPointer(p0); + path.path = std::bit_cast(p1); + } + + void getStorage(Failed *& failed) const noexcept + { + failed = std::bit_cast(p1); } void setStorage(NixInt integer) noexcept @@ -796,8 +897,85 @@ protected: { setUntaggablePayload(path.accessor, path.path); } + + void setStorage(Failed * failed) noexcept + { + setSingleDWordPayload(std::bit_cast(failed)); + } + + ValueStorage() {} + + ValueStorage(const ValueStorage & v) + { + *this = v; + } + + /** + * Copy a value. This is not allowed to be a thunk to avoid + * accidental work duplication. + */ + ValueStorage & operator=(const ValueStorage & v) + { + auto p0_ = v.p0.load(std::memory_order_acquire); + auto p1_ = v.p1; // must be loaded after p0 + auto pd = static_cast(p0_ & discriminatorMask); + if (pd == pdThunk || pd == pdPending || pd == pdAwaited) + unreachable(); + finish(p0_, p1_); + return *this; + } + +public: + + /** + * Check whether forcing this value requires a trivial amount of + * computation. A value is trivial if it's finished or if it's a + * thunk whose expression is an attrset with no dynamic + * attributes, a lambda or a list. Note that it's up to the caller + * to check whether the members of those attrsets or lists must be + * trivial. + */ + bool isTrivial() const; + + inline void reset() + { + p1 = 0; + p0.store(0, std::memory_order_relaxed); + } + + /// Only used for testing. + inline void mkBlackhole() + { + p0.store(pdPending, std::memory_order_relaxed); + } + + void force(EvalState & state, PosIdx pos); + +private: + + /** + * Given a thunk that was observed to be in the pending or awaited + * state, wait for it to finish. Returns the first word of the + * value. + */ + PackedPointer waitOnThunk(EvalState & state, PackedPointer p0); + + /** + * Wake up any threads that are waiting on this value. + */ + void notifyWaiters(); }; +template<> +void ValueStorage::notifyWaiters(); + +template<> +ValueStorage::PackedPointer +ValueStorage::waitOnThunk(EvalState & state, PackedPointer p0); + +template<> +bool ValueStorage::isTrivial() const; + /** * View into a list of Value * that is itself immutable. * @@ -1039,47 +1217,58 @@ public: void print(EvalState & state, std::ostream & str, PrintOptions options = PrintOptions{}); + // FIXME: optimize, only look at first word + inline bool isFinished() const + { + return nix::isFinished(getInternalType()); + } + // Functions needed to distinguish the type // These should be removed eventually, by putting the functionality that's // needed by callers into methods of this type - // type() == nThunk inline bool isThunk() const { return isa(); - }; + } inline bool isApp() const { return isa(); - }; + } - inline bool isBlackhole() const; + inline bool isBlackhole() const + { + auto t = getInternalType(); + return t == tPending || t == tAwaited; + } // type() == nFunction inline bool isLambda() const { return isa(); - }; + } inline bool isPrimOp() const { return isa(); - }; + } inline bool isPrimOpApp() const { return isa(); - }; + } + + inline bool isFailed() const + { + return isa(); + } /** * Returns the normal type of a Value. This only returns nThunk if * the Value hasn't been forceValue'd - * - * @param invalidIsThunk Instead of aborting an an invalid (probably - * 0, so uninitialized) internal type, return `nThunk`. */ - inline ValueType type(bool invalidIsThunk = false) const + inline ValueType type() const { switch (getInternalType()) { case tUninitialized: @@ -1107,14 +1296,15 @@ public: return nExternal; case tFloat: return nFloat; + case tFailed: + return nFailed; case tThunk: case tApp: + case tPending: + case tAwaited: return nThunk; } - if (invalidIsThunk) - return nThunk; - else - unreachable(); + unreachable(); } /** @@ -1205,8 +1395,6 @@ public: setStorage(Lambda{.env = e, .fun = f}); } - inline void mkBlackhole(); - void mkPrimOp(PrimOp * p); inline void mkPrimOpApp(Value * l, Value * r) noexcept @@ -1229,6 +1417,11 @@ public: setStorage(n); } + inline void mkFailed() noexcept + { + setStorage(new Value::Failed{.ex = std::current_exception()}); + } + bool isList() const noexcept { return isa(); @@ -1246,13 +1439,6 @@ public: PosIdx determinePos(const PosIdx pos) const; - /** - * Check whether forcing this value requires a trivial amount of - * computation. In particular, function applications are - * non-trivial. - */ - bool isTrivial() const; - SourcePath path() const { return SourcePath( @@ -1314,6 +1500,7 @@ public: return getStorage(); } + // FIXME: remove this since reading it is racy. ClosureThunk thunk() const noexcept { return getStorage(); @@ -1324,6 +1511,7 @@ public: return getStorage(); } + // FIXME: remove this since reading it is racy. FunctionApplicationThunk app() const noexcept { return getStorage(); @@ -1343,19 +1531,12 @@ public: { return getStorage().accessor; } -}; -extern ExprBlackHole eBlackHole; - -bool Value::isBlackhole() const -{ - return isThunk() && thunk().expr == (Expr *) &eBlackHole; -} - -void Value::mkBlackhole() -{ - mkThunk(nullptr, (Expr *) &eBlackHole); -} + Failed * failed() const noexcept + { + return getStorage(); + } +}; typedef std::vector> ValueVector; typedef boost::unordered_flat_map< diff --git a/src/libexpr/include/nix/expr/value/context.hh b/src/libexpr/include/nix/expr/value/context.hh index 054516bc2687..fa3d4e87c0f2 100644 --- a/src/libexpr/include/nix/expr/value/context.hh +++ b/src/libexpr/include/nix/expr/value/context.hh @@ -64,7 +64,31 @@ struct NixStringContextElem */ using Built = SingleDerivedPath::Built; - using Raw = std::variant; + /** + * A store path that will not result in a store reference when + * used in a derivation or toFile. + * + * When you apply `builtins.toString` to a path value representing + * a path in the Nix store (as is the case with flake inputs), + * historically you got a string without context + * (e.g. `/nix/store/...-source`). This is broken, since it allows + * you to pass a store path to a derivation/toFile without a + * proper store reference. This is especially a problem with lazy + * trees, since the store path is a virtual path that doesn't + * exist. + * + * For backwards compatibility, and to warn users about this + * unsafe use of `toString`, we keep track of such strings as a + * special type of context. + */ + struct Path + { + StorePath storePath; + + GENERATE_CMP(Path, me->storePath); + }; + + using Raw = std::variant; Raw raw; @@ -92,4 +116,10 @@ struct NixStringContextElem */ typedef std::set NixStringContext; +/** + * Returns false if `context` has no elements other than + * `NixStringContextElem::Path`. + */ +bool hasContext(const NixStringContext & context); + } // namespace nix diff --git a/src/libexpr/meson.build b/src/libexpr/meson.build index 18c4c7fa32c8..941cb0a8a442 100644 --- a/src/libexpr/meson.build +++ b/src/libexpr/meson.build @@ -7,6 +7,8 @@ project( # TODO(Qyriad): increase the warning level 'warning_level=1', 'errorlogs=true', # Please print logs for tests that fail + 'unity=on', + 'unity_size=1024', ], meson_version : '>= 1.1', license : 'LGPL-2.1-or-later', @@ -43,6 +45,7 @@ boost = dependency( modules : [ 'container', 'context', + 'thread', ], include_type : 'system', ) @@ -62,7 +65,6 @@ bdw_gc = dependency('bdw-gc', required : bdw_gc_required) if bdw_gc.found() deps_public += bdw_gc foreach funcspec : [ - 'pthread_attr_get_np', 'pthread_getattr_np', ] define_name = 'HAVE_' + funcspec.underscorify().to_upper() @@ -77,6 +79,17 @@ endif # Used in public header. Affects ABI! configdata_pub.set('NIX_USE_BOEHMGC', bdw_gc.found().to_int()) +link_args = [] + +wasmtime_required = get_option('wasm').disable_if( + get_option('default_library') == 'static', + error_message : 'Building with wasmtime and static linking is not supported', +) + +if wasmtime_required.enabled() + link_args += '-lwasmtime' +endif + toml11 = dependency( 'toml11', version : '>=3.7.0', @@ -156,20 +169,21 @@ sources = files( 'eval-cache.cc', 'eval-error.cc', 'eval-gc.cc', - 'eval-profiler-settings.cc', 'eval-profiler.cc', 'eval-settings.cc', 'eval.cc', 'function-trace.cc', 'get-drvs.cc', 'json-to-value.cc', - 'lexer-helpers.cc', 'nixexpr.cc', + 'parallel-eval.cc', 'paths.cc', 'primops.cc', 'print-ambiguous.cc', 'print.cc', + 'provenance.cc', 'search-path.cc', + 'symbol-table.cc', 'value-to-json.cc', 'value-to-xml.cc', 'value.cc', @@ -214,6 +228,8 @@ parser_library = static_library( 'nixexpr-parser', parser_tab, lexer_tab, + # Putting eval-profiler-settings.cc here to work around an inscrutable gcc compiler error when doing a unity build. + files('eval-profiler-settings.cc', 'lexer-helpers.cc'), cpp_args : parser_library_cpp_args, dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, @@ -224,6 +240,7 @@ parser_library = static_library( override_options : [ 'b_ndebug=@0@'.format(not get_option('debug')), 'b_lto=@0@'.format(get_option('b_lto') and cxx.get_id() != 'gcc'), + 'unity=off', ], ) @@ -237,9 +254,9 @@ this_library = library( soversion : nix_soversion, dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, - link_args : linker_export_flags, + link_args : linker_export_flags + link_args, link_whole : [ parser_library ], - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, cpp_pch : do_pch ? [ 'pch/precompiled-headers.hh' ] : [], ) diff --git a/src/libexpr/meson.options b/src/libexpr/meson.options index 847bb211d302..2defbf77aaa0 100644 --- a/src/libexpr/meson.options +++ b/src/libexpr/meson.options @@ -3,3 +3,9 @@ option( type : 'feature', description : 'enable garbage collection in the Nix expression evaluator (requires Boehm GC)', ) + +option( + 'wasm', + type : 'feature', + description : 'enable wasmtime integration into the Nix expression evaluator', +) diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index 4a2f71a11b8f..b52370816f5a 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -13,8 +13,6 @@ namespace nix { Counter Expr::nrExprs; -ExprBlackHole eBlackHole; - // FIXME: remove, because *symbols* are abstract and do not have a single // textual representation; see printIdentifier() std::ostream & operator<<(std::ostream & str, const SymbolStr & symbol) @@ -626,15 +624,6 @@ void ExprLambda::setDocComment(DocComment docComment) // belongs in the same conditional. body->setDocComment(docComment); } -}; - -/* Symbol table. */ - -size_t SymbolTable::totalSize() const -{ - size_t n = 0; - dump([&](SymbolStr s) { n += s.size(); }); - return n; } std::string DocComment::getInnerText(const PosTable & positions) const diff --git a/src/libexpr/package.nix b/src/libexpr/package.nix index d0aef34e95de..46c617bbcb19 100644 --- a/src/libexpr/package.nix +++ b/src/libexpr/package.nix @@ -14,6 +14,7 @@ boehmgc, nlohmann_json, toml11, + wasmtime, # Configuration Options @@ -29,6 +30,11 @@ # Temporarily disabled on Windows because the `GC_throw_bad_alloc` # symbol is missing during linking. enableGC ? !stdenv.hostPlatform.isWindows, + + # Whether to use wasmtime for wasm integration in the Nix language evaluator + # + # Temporarily disabled when static linking due to Rust not compiling + enableWasm ? !stdenv.hostPlatform.isStatic, }: let @@ -36,7 +42,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-expr"; + pname = "determinate-nix-expr"; inherit version; workDir = ./.; @@ -64,7 +70,8 @@ mkMesonLibrary (finalAttrs: { buildInputs = [ toml11 - ]; + ] + ++ lib.optional enableWasm wasmtime; propagatedBuildInputs = [ nix-util @@ -77,8 +84,20 @@ mkMesonLibrary (finalAttrs: { mesonFlags = [ (lib.mesonEnable "gc" enableGC) + (lib.mesonEnable "wasm" enableWasm) ]; + # Fixes a problem with the "nix-expr-libcxxStdenv-static" package output. + # For some reason that is not clear, it is wanting to use libgcc_eh which is not available. + # Force this to be built with compiler-rt over libgcc_eh works. + # Issue: https://github.com/NixOS/nixpkgs/issues/177129 + NIX_CFLAGS_COMPILE = lib.optional ( + stdenv.cc.isClang + && stdenv.hostPlatform.isStatic + && stdenv.cc.libcxx != null + && stdenv.cc.libcxx.isLLVM + ) "-rtlib=compiler-rt"; + meta = { platforms = lib.platforms.unix ++ lib.platforms.windows; }; diff --git a/src/libexpr/parallel-eval.cc b/src/libexpr/parallel-eval.cc new file mode 100644 index 000000000000..3dfcc54c40fb --- /dev/null +++ b/src/libexpr/parallel-eval.cc @@ -0,0 +1,300 @@ +#include "nix/expr/eval.hh" +#include "nix/expr/parallel-eval.hh" +#include "nix/store/globals.hh" +#include "nix/expr/primops.hh" + +namespace nix { + +// cache line alignment to prevent false sharing +struct alignas(64) WaiterDomain +{ + std::condition_variable cv; +}; + +static std::array, 128> waiterDomains; + +thread_local bool Executor::amWorkerThread{false}; + +unsigned int Executor::getEvalCores(const EvalSettings & evalSettings) +{ + return evalSettings.evalCores == 0UL ? Settings::getDefaultCores() : evalSettings.evalCores; +} + +Executor::Executor(const EvalSettings & evalSettings) + : evalCores(getEvalCores(evalSettings)) + , enabled(evalCores > 1) + , interruptCallback(createInterruptCallback([&]() { + for (auto & domain : waiterDomains) + domain.lock()->cv.notify_all(); + })) +{ + debug("executor using %d threads", evalCores); + auto state(state_.lock()); + for (size_t n = 0; n < evalCores; ++n) + createWorker(*state); +} + +Executor::~Executor() +{ + std::vector threads; + { + auto state(state_.lock()); + quit = true; + std::swap(threads, state->threads); + debug("executor shutting down with %d items left", state->queue.size()); + } + + wakeup.notify_all(); + + for (auto & thr : threads) + thr.join(); +} + +void Executor::createWorker(State & state) +{ + boost::thread::attributes attrs; + attrs.set_stack_size(evalStackSize); + state.threads.push_back(boost::thread(attrs, [&]() { +#if NIX_USE_BOEHMGC + GC_stack_base sb; + GC_get_stack_base(&sb); + GC_register_my_thread(&sb); +#endif + worker(); +#if NIX_USE_BOEHMGC + GC_unregister_my_thread(); +#endif + })); +} + +void Executor::worker() +{ + ReceiveInterrupts receiveInterrupts; + + unix::interruptCheck = [&]() { return (bool) quit; }; + + amWorkerThread = true; + + while (true) { + Item item; + + while (true) { + auto state(state_.lock()); + if (quit) { + // Set an `Interrupted` exception on all promises so + // we get a nicer error than "std::future_error: + // Broken promise". + auto ex = std::make_exception_ptr(Interrupted("interrupted by the user")); + for (auto & item : state->queue) + item.second.promise.set_exception(ex); + state->queue.clear(); + return; + } + if (!state->queue.empty()) { + item = std::move(state->queue.begin()->second); + state->queue.erase(state->queue.begin()); + break; + } + state.wait(wakeup); + } + + try { + item.work(); + item.promise.set_value(); + } catch (const Interrupted &) { + quit = true; + item.promise.set_exception(std::current_exception()); + } catch (...) { + item.promise.set_exception(std::current_exception()); + } + } +} + +std::vector> Executor::spawn(WorkItems && items) +{ + if (items.empty()) + return {}; + + std::vector> futures; + + { + auto state(state_.lock()); + for (auto & item : items) { + std::promise promise; + futures.push_back(promise.get_future()); + thread_local std::random_device rd; + thread_local std::uniform_int_distribution dist(0, 1ULL << 48); + auto key = (uint64_t(item.second) << 48) | dist(rd); + state->queue.emplace(key, Item{.promise = std::move(promise), .work = std::move(item.first)}); + } + } + + if (items.size() == 1) + wakeup.notify_one(); + else + wakeup.notify_all(); + + return futures; +} + +FutureVector::~FutureVector() +{ + try { + finishAll(); + } catch (...) { + ignoreExceptionInDestructor(); + } +} + +void FutureVector::spawn(Executor::WorkItems && work) +{ + auto futures = executor.spawn(std::move(work)); + auto state(state_.lock()); + for (auto & future : futures) + state->futures.push_back(std::move(future)); +} + +void FutureVector::finishAll() +{ + std::exception_ptr ex; + while (true) { + std::vector> futures; + { + auto state(state_.lock()); + std::swap(futures, state->futures); + } + debug("got %d futures", futures.size()); + if (futures.empty()) + break; + for (auto & future : futures) + try { + future.get(); + } catch (...) { + if (ex) { + if (!getInterrupted()) + ignoreExceptionExceptInterrupt(); + } else + ex = std::current_exception(); + } + } + if (ex) + std::rethrow_exception(ex); +} + +static Sync & getWaiterDomain(detail::ValueBase & v) +{ + auto domain = (((size_t) &v) >> 5) % waiterDomains.size(); + return waiterDomains[domain]; +} + +static std::atomic nextEvalThreadId{1}; +thread_local uint32_t myEvalThreadId(nextEvalThreadId++); + +template<> +ValueStorage::PackedPointer +ValueStorage::waitOnThunk(EvalState & state, PackedPointer expectedP0) +{ + state.nrThunksAwaited++; + + auto domain = getWaiterDomain(*this).lock(); + + auto threadId = expectedP0 >> discriminatorBits; + + if (static_cast(expectedP0 & discriminatorMask) == pdAwaited) { + /* Make sure that the value is still awaited, now that we're + holding the domain lock. */ + auto p0_ = p0.load(std::memory_order_acquire); + auto pd = static_cast(p0_ & discriminatorMask); + + /* If the value has been finalized in the meantime (i.e. is no + longer pending), we're done. */ + if (pd != pdAwaited) { + assert(pd != pdThunk && pd != pdPending); + return p0_; + } + } else { + /* Mark this value as being waited on. */ + PackedPointer p0_ = expectedP0; + if (!p0.compare_exchange_strong( + p0_, + pdAwaited | (threadId << discriminatorBits), + std::memory_order_acquire, + std::memory_order_acquire)) { + /* If the value has been finalized in the meantime (i.e. is + no longer pending), we're done. */ + auto pd = static_cast(p0_ & discriminatorMask); + if (pd != pdAwaited) { + assert(pd != pdThunk && pd != pdPending); + return p0_; + } + /* The value was already in the "waited on" state, so we're + not the only thread waiting on it. */ + } + } + + /* Wait for another thread to finish this value. */ + if (threadId == myEvalThreadId) + state.error("infinite recursion encountered") + .atPos(((Value &) *this).determinePos(noPos)) + .debugThrow(); + + state.nrThunksAwaitedSlow++; + state.currentlyWaiting++; + state.maxWaiting = std::max(state.maxWaiting, state.currentlyWaiting); + + auto now1 = std::chrono::steady_clock::now(); + + while (true) { + domain.wait(domain->cv); + auto p0_ = p0.load(std::memory_order_acquire); + auto pd = static_cast(p0_ & discriminatorMask); + if (pd != pdAwaited) { + assert(pd != pdThunk && pd != pdPending); + auto now2 = std::chrono::steady_clock::now(); + state.microsecondsWaiting += std::chrono::duration_cast(now2 - now1).count(); + state.currentlyWaiting--; + return p0_; + } + state.nrSpuriousWakeups++; + checkInterrupt(); + } +} + +template<> +void ValueStorage::notifyWaiters() +{ + auto domain = getWaiterDomain(*this).lock(); + + domain->cv.notify_all(); +} + +static void prim_parallel(EvalState & state, const PosIdx pos, Value ** args, Value & v) +{ + state.forceList(*args[0], pos, "while evaluating the first argument passed to builtins.parallel"); + + if (state.executor->evalCores > 1) { + Executor::WorkItems work; + for (auto value : args[0]->listView()) + if (!value->isFinished()) + state.addWork( + work, 0, [value(allocRootValue(value)), &state, pos]() { state.forceValue(**value, pos); }); + state.executor->spawn(std::move(work)); + } + + state.forceValue(*args[1], pos); + v = *args[1]; +} + +// FIXME: gate this behind an experimental feature. +static RegisterPrimOp r_parallel({ + .name = "__parallel", + .args = {"xs", "x"}, + .arity = 2, + .doc = R"( + Start evaluation of the values `xs` in the background and return `x`. + )", + .fun = prim_parallel, + .experimentalFeature = Xp::ParallelEval, +}); + +} // namespace nix diff --git a/src/libexpr/paths.cc b/src/libexpr/paths.cc index 8622ab20885e..dd3e9bb3c9ed 100644 --- a/src/libexpr/paths.cc +++ b/src/libexpr/paths.cc @@ -20,25 +20,97 @@ SourcePath EvalState::storePath(const StorePath & path) return {rootFS, CanonPath{store->printStorePath(path)}}; } -StorePath -EvalState::mountInput(fetchers::Input & input, const fetchers::Input & originalInput, ref accessor) +StorePath EvalState::devirtualize(const StorePath & path, StringMap * rewrites) { - auto storePath = fetchToStore(fetchSettings, *store, accessor, FetchMode::Copy, input.getName()); + if (auto mount = storeFS->getMount(CanonPath(store->printStorePath(path)))) { + auto storePath = fetchToStore( + fetchSettings, + *store, + SourcePath{ref(mount)}, + settings.readOnlyMode ? FetchMode::DryRun : FetchMode::Copy, + path.name()); + assert(storePath.name() == path.name()); + if (rewrites) + rewrites->emplace(path.hashPart(), storePath.hashPart()); + return storePath; + } else + return path; +} + +SingleDerivedPath EvalState::devirtualize(const SingleDerivedPath & path, StringMap * rewrites) +{ + if (auto o = std::get_if(&path.raw())) + return SingleDerivedPath::Opaque{devirtualize(o->path, rewrites)}; + else + return path; +} + +std::string EvalState::devirtualize(std::string_view s, const NixStringContext & context) +{ + StringMap rewrites; + + for (auto & c : context) + if (auto o = std::get_if(&c.raw)) + devirtualize(o->path, &rewrites); + + return rewriteStrings(std::string(s), rewrites); +} + +std::string EvalState::computeBaseName(const SourcePath & path, PosIdx pos) +{ + if (path.accessor == rootFS) { + if (auto storePath = store->maybeParseStorePath(path.path.abs())) { + debug( + "Copying '%s' to the store again.\n" + "You can make Nix evaluate faster and copy fewer files by replacing `./.` with the `self` flake input, " + "or `builtins.path { path = ./.; name = \"source\"; }`.\n", + path); + return std::string( + fetchToStore(fetchSettings, *store, path, FetchMode::DryRun, storePath->name()).to_string()); + } + } + return std::string(path.baseName()); +} + +StorePath EvalState::mountInput( + fetchers::Input & input, + const fetchers::Input & originalInput, + ref accessor, + bool requireLockable, + bool forceNarHash) +{ + auto storePath = settings.lazyTrees + ? StorePath::random(input.getName()) + : fetchToStore(fetchSettings, *store, accessor, FetchMode::Copy, input.getName()); allowPath(storePath); // FIXME: should just whitelist the entire virtual store + std::optional _narHash; + + auto getNarHash = [&]() { + if (!_narHash) { + if (store->isValidPath(storePath)) + _narHash = store->queryPathInfo(storePath)->narHash; + else + _narHash = fetchToStore2(fetchSettings, *store, accessor, FetchMode::DryRun, input.getName()).second; + } + return _narHash; + }; + storeFS->mount(CanonPath(store->printStorePath(storePath)), accessor); - auto narHash = store->queryPathInfo(storePath)->narHash; - input.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); + if (forceNarHash + || (requireLockable && (!settings.lazyTrees || !settings.lazyLocks || !input.isLocked(fetchSettings)) + && !input.getNarHash())) + input.attrs.insert_or_assign("narHash", getNarHash()->to_string(HashFormat::SRI, true)); - if (originalInput.getNarHash() && narHash != *originalInput.getNarHash()) + if (originalInput.getNarHash() && *getNarHash() != *originalInput.getNarHash()) throw Error( (unsigned int) 102, "NAR hash mismatch in input '%s', expected '%s' but got '%s'", originalInput.to_string(), - narHash.to_string(HashFormat::SRI, true), - originalInput.getNarHash()->to_string(HashFormat::SRI, true)); + originalInput.getNarHash()->to_string(HashFormat::SRI, true), + getNarHash()->to_string(HashFormat::SRI, true)); return storePath; } diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 759b33ac6fda..e404538a1a2c 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -17,6 +17,9 @@ #include "nix/expr/primops.hh" #include "nix/fetchers/fetch-to-store.hh" #include "nix/util/sort.hh" +#include "nix/util/mounted-source-accessor.hh" +#include "nix/expr/provenance.hh" +#include "nix/util/override-provenance-source-accessor.hh" #include #include @@ -73,6 +76,7 @@ StringMap EvalState::realiseContext(const NixStringContext & context, StorePathS for (auto & c : context) { auto ensureValid = [&](const StorePath & p) { + waitForPath(p); if (!store->isValidPath(p)) error(store->printStorePath(p)).debugThrow(); }; @@ -87,7 +91,10 @@ StringMap EvalState::realiseContext(const NixStringContext & context, StorePathS ensureValid(b.drvPath->getBaseStorePath()); }, [&](const NixStringContextElem::Opaque & o) { - ensureValid(o.path); + // We consider virtual store paths valid here. They'll + // be devirtualized if needed elsewhere. + if (!storeFS->getMount(CanonPath(store->printStorePath(o.path)))) + ensureValid(o.path); if (maybePathsOut) maybePathsOut->emplace(o.path); }, @@ -97,6 +104,9 @@ StringMap EvalState::realiseContext(const NixStringContext & context, StorePathS if (maybePathsOut) maybePathsOut->emplace(d.drvPath); }, + [&](const NixStringContextElem::Path & p) { + // FIXME: do something? + }, }, c.raw); } @@ -157,24 +167,20 @@ StringMap EvalState::realiseContext(const NixStringContext & context, StorePathS return res; } -static SourcePath realisePath( - EvalState & state, - const PosIdx pos, - Value & v, - std::optional resolveSymlinks = SymlinkResolution::Full) +SourcePath EvalState::realisePath(const PosIdx pos, Value & v, std::optional resolveSymlinks) { NixStringContext context; - auto path = state.coerceToPath(noPos, v, context, "while realising the context of a path"); + auto path = coerceToPath(noPos, v, context, "while realising the context of a path"); try { - if (!context.empty() && path.accessor == state.rootFS) { - auto rewrites = state.realiseContext(context); + if (!context.empty() && path.accessor == rootFS) { + auto rewrites = realiseContext(context); path = {path.accessor, CanonPath(rewriteStrings(path.path.abs(), rewrites))}; } return resolveSymlinks ? path.resolveSymlinks(*resolveSymlinks) : path; } catch (Error & e) { - e.addTrace(state.positions[pos], "while realising the context of path '%s'", path); + e.addTrace(positions[pos], "while realising the context of path '%s'", path); throw; } } @@ -294,7 +300,7 @@ static void scopedImport(EvalState & state, const PosIdx pos, SourcePath & path, argument. */ static void import(EvalState & state, const PosIdx pos, Value & vPath, Value * vScope, Value & v) { - auto path = realisePath(state, pos, vPath, std::nullopt); + auto path = state.realisePath(pos, vPath, std::nullopt); auto path2 = path.path.abs(); // FIXME @@ -302,6 +308,7 @@ static void import(EvalState & state, const PosIdx pos, Value & vPath, Value * v if (!state.store->isStorePath(path2)) return std::nullopt; auto storePath = state.store->parseStorePath(path2); + state.waitForPath(storePath); if (!(state.store->isValidPath(storePath) && isDerivation(path2))) return std::nullopt; return storePath; @@ -447,7 +454,7 @@ extern "C" typedef void (*ValueInitializer)(EvalState & state, Value & v); /* Load a ValueInitializer from a DSO and return whatever it initializes */ void prim_importNative(EvalState & state, const PosIdx pos, Value ** args, Value & v) { - auto path = realisePath(state, pos, *args[0]); + auto path = state.realisePath(pos, *args[0]); std::string sym( state.forceStringNoCtx(*args[1], pos, "while evaluating the second argument passed to builtins.importNative")); @@ -566,6 +573,7 @@ static void prim_typeOf(EvalState & state, const PosIdx pos, Value ** args, Valu v.mkStringNoCopy("float"_sds); break; case nThunk: + case nFailed: unreachable(); } } @@ -1150,7 +1158,7 @@ static RegisterPrimOp primop_floor({ a NixInt and if `*number* < -9007199254740992` or `*number* > 9007199254740992`. If the datatype of *number* is neither a NixInt (signed 64-bit integer) nor a NixFloat - (IEEE-754 double-precision floating-point number), an evaluation error will be thrown. + (IEEE-754 double-precision floating-point number), an evaluation error is thrown. )", .fun = prim_floor, }); @@ -1197,7 +1205,7 @@ static RegisterPrimOp primop_tryEval({ `false` if an error was thrown) and `value`, equalling *e* if successful and `false` otherwise. `tryEval` only prevents errors created by `throw` or `assert` from being thrown. - Errors `tryEval` doesn't catch are, for example, those created + Errors that `tryEval` doesn't catch are, for example, those created by `abort` and type errors generated by builtins. Also note that this doesn't evaluate *e* deeply, so `let e = { x = throw ""; }; in (builtins.tryEval e).success` is `true`. Using @@ -1349,7 +1357,7 @@ static RegisterPrimOp primop_warn({ [`debugger-on-trace`](@docroot@/command-ref/conf-file.md#conf-debugger-on-trace) or [`debugger-on-warn`](@docroot@/command-ref/conf-file.md#conf-debugger-on-warn) option is set to `true` and the `--debugger` flag is given, the - interactive debugger will be started when `warn` is called (like + interactive debugger is started when `warn` is called (like [`break`](@docroot@/language/builtins.md#builtins-break)). If the @@ -1373,16 +1381,15 @@ static void prim_second(EvalState & state, const PosIdx pos, Value ** args, Valu * Derivations *************************************************************/ -static void derivationStrictInternal(EvalState & state, std::string_view name, const Bindings * attrs, Value & v); +static void derivationStrictInternal( + EvalState & state, + std::string_view name, + const Bindings * attrs, + Value & v, + std::shared_ptr provenance, + bool acceptMeta); -/* Construct (as a unobservable side effect) a Nix derivation - expression that performs the derivation described by the argument - set. Returns the original set extended with the following - attributes: `outPath' containing the primary output path of the - derivation; `drvPath' containing the path of the Nix expression; - and `type' set to `derivation' to indicate that this is a - derivation. */ -static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value ** args, Value & v) +static void prim_derivationStrictGeneric(EvalState & state, const PosIdx pos, Value ** args, Value & v, bool acceptMeta) { state.forceAttrs(*args[0], pos, "while evaluating the argument passed to builtins.derivationStrict"); @@ -1402,7 +1409,7 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value ** } try { - derivationStrictInternal(state, drvName, attrs, v); + derivationStrictInternal(state, drvName, attrs, v, state.evalContext.provenance, acceptMeta); } catch (Error & e) { Pos pos = state.positions[nameAttr->pos]; /* @@ -1433,6 +1440,18 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value ** } } +/* Construct a Nix derivation with metadata provenance */ +static RegisterPrimOp primop_derivationStrictWithMeta( + PrimOp{ + .name = "derivationStrictWithMeta", + .arity = 1, + .fun = + [](EvalState & state, const PosIdx pos, Value ** args, Value & v) { + prim_derivationStrictGeneric(state, pos, args, v, /*acceptMeta=*/true); + }, + .internal = true, + }); + /** * Early validation for the derivation name, for better error message. * It is checked again when constructing store paths. @@ -1456,7 +1475,13 @@ static void checkDerivationName(EvalState & state, std::string_view drvName) } } -static void derivationStrictInternal(EvalState & state, std::string_view drvName, const Bindings * attrs, Value & v) +static void derivationStrictInternal( + EvalState & state, + std::string_view drvName, + const Bindings * attrs, + Value & v, + std::shared_ptr provenance, + bool acceptMeta) { checkDerivationName(state, drvName); @@ -1581,7 +1606,19 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName the environment. */ default: - if (jsonObject) { + if (acceptMeta && i->name == EvalState::s.__meta) { + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) { + state.forceAttrs(*i->value, pos, "while evaluating __meta"); + NixStringContext ctx; + auto obj = printValueAsJSON(state, true, *i->value, pos, ctx); + + if (!ctx.empty()) + throw Error("Derivation __meta provenance can't contain string context like store paths."); + + provenance = + std::make_shared(provenance, make_ref(obj)); + } + } else if (jsonObject) { if (i->name == state.s.structuredAttrs) continue; @@ -1705,6 +1742,10 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName /* Everything in the context of the strings in the derivation attributes should be added as dependencies of the resulting derivation. */ + StringMap rewrites; + + std::optional drvS; + for (auto & c : context) { std::visit( overloaded{ @@ -1716,6 +1757,8 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName [&](const NixStringContextElem::DrvDeep & d) { /* !!! This doesn't work if readOnlyMode is set. */ StorePathSet refs; + // FIXME: don't need to wait, we only need the references. + state.waitForPath(d.drvPath); state.store->computeFSClosure(d.drvPath, refs); for (auto & j : refs) { drv.inputSrcs.insert(j); @@ -1727,11 +1770,27 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName [&](const NixStringContextElem::Built & b) { drv.inputDrvs.ensureSlot(*b.drvPath).value.insert(b.output); }, - [&](const NixStringContextElem::Opaque & o) { drv.inputSrcs.insert(o.path); }, + [&](const NixStringContextElem::Opaque & o) { + drv.inputSrcs.insert(state.devirtualize(o.path, &rewrites)); + }, + [&](const NixStringContextElem::Path & p) { + if (!drvS) + drvS = drv.unparse(*state.store, true); + if (drvS->find(p.storePath.to_string()) != drvS->npos) { + auto devirtualized = state.devirtualize(p.storePath, &rewrites); + warn( + "Using 'builtins.derivation' to create a derivation named '%s' that references the store path '%s' without a proper context. " + "The resulting derivation will not have a correct store reference, so this is unreliable and may stop working in the future.", + drvName, + state.store->printStorePath(devirtualized)); + } + }, }, c.raw); } + drv.applyRewrites(rewrites); + /* Do we have all required attributes? */ if (drv.builder == "") state.error("required attribute 'builder' missing").atPos(v).debugThrow(); @@ -1819,7 +1878,7 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName } /* Write the resulting term into the Nix store directory. */ - auto drvPath = writeDerivation(*state.store, drv, state.repair); + auto drvPath = writeDerivation(*state.store, *state.asyncPathWriter, drv, state.repair, false, provenance); auto drvPathS = state.store->printStorePath(drvPath); printMsg(lvlChatty, "instantiated '%1%' -> '%2%'", drvName, drvPathS); @@ -1846,11 +1905,21 @@ static void derivationStrictInternal(EvalState & state, std::string_view drvName v.mkAttrs(result); } +/* Construct (as a unobservable side effect) a Nix derivation + expression that performs the derivation described by the argument + set. Returns the original set extended with the following + attributes: `outPath' containing the primary output path of the + derivation; `drvPath' containing the path of the Nix expression; + and `type' set to `derivation' to indicate that this is a + derivation. */ static RegisterPrimOp primop_derivationStrict( PrimOp{ .name = "derivationStrict", .arity = 1, - .fun = prim_derivationStrict, + .fun = + [](EvalState & state, const PosIdx pos, Value ** args, Value & v) { + prim_derivationStrictGeneric(state, pos, args, v, /*acceptMeta=*/false); + }, }); /* Return a placeholder string for the specified output that will be @@ -1971,7 +2040,7 @@ static void prim_pathExists(EvalState & state, const PosIdx pos, Value ** args, arg.type() == nString && (arg.string_view().ends_with("/") || arg.string_view().ends_with("/.")); auto symlinkResolution = mustBeDir ? SymlinkResolution::Full : SymlinkResolution::Ancestors; - auto path = realisePath(state, pos, arg, symlinkResolution); + auto path = state.realisePath(pos, arg, symlinkResolution); auto st = path.maybeLstat(); auto exists = st && (!mustBeDir || st->type == SourceAccessor::tDirectory); @@ -2078,7 +2147,7 @@ static RegisterPrimOp primop_dirOf({ /* Return the contents of a file as a string. */ static void prim_readFile(EvalState & state, const PosIdx pos, Value ** args, Value & v) { - auto path = realisePath(state, pos, *args[0]); + auto path = state.realisePath(pos, *args[0]); auto s = path.readFile(); if (s.find((char) 0) != std::string::npos) state.error("the contents of the file '%1%' cannot be represented as a Nix string", path) @@ -2086,14 +2155,17 @@ static void prim_readFile(EvalState & state, const PosIdx pos, Value ** args, Va .debugThrow(); StorePathSet refs; if (state.store->isInStore(path.path.abs())) { - try { - refs = state.store->queryPathInfo(state.store->toStorePath(path.path.abs()).first)->references; - } catch (Error &) { // FIXME: should be InvalidPathError + auto storePath = state.store->toStorePath(path.path.abs()).first; + // Skip virtual paths since they don't have references and + // don't exist anyway. + if (!state.storeFS->getMount(CanonPath(state.store->printStorePath(storePath)))) { + if (auto info = state.store->maybeQueryPathInfo(state.store->toStorePath(path.path.abs()).first)) { + // Re-scan references to filter down to just the ones that actually occur in the file. + auto refsSink = PathRefScanSink::fromPaths(info->references); + refsSink << s; + refs = refsSink.getResultPaths(); + } } - // Re-scan references to filter down to just the ones that actually occur in the file. - auto refsSink = PathRefScanSink::fromPaths(refs); - refsSink << s; - refs = refsSink.getResultPaths(); } NixStringContext context; for (auto && p : std::move(refs)) { @@ -2313,7 +2385,7 @@ static void prim_hashFile(EvalState & state, const PosIdx pos, Value ** args, Va if (!ha) state.error("unknown hash algorithm '%1%'", algo).atPos(pos).debugThrow(); - auto path = realisePath(state, pos, *args[1]); + auto path = state.realisePath(pos, *args[1]); v.mkString(hashString(*ha, path.readFile()).to_string(HashFormat::Base16, false), state.mem); } @@ -2329,6 +2401,23 @@ static RegisterPrimOp primop_hashFile({ .fun = prim_hashFile, }); +static RegisterPrimOp primop_narHash({ + .name = "__narHash", + .args = {"p"}, + .doc = R"( + Return an SRI representation of the SHA-256 hash of the NAR serialisation of the path *p*. + )", + .fun = + [](EvalState & state, const PosIdx pos, Value ** args, Value & v) { + auto path = state.realisePath(pos, *args[0]); + auto hash = + fetchToStore2(state.fetchSettings, *state.store, path.resolveSymlinks(), FetchMode::DryRun).second; + v.mkString(hash.to_string(HashFormat::SRI, true), state.mem); + }, + // FIXME: may be useful to expose to the user. + .internal = true, +}); + static const Value & fileTypeToString(EvalState & state, SourceAccessor::Type type) { struct Constants @@ -2365,7 +2454,7 @@ static const Value & fileTypeToString(EvalState & state, SourceAccessor::Type ty static void prim_readFileType(EvalState & state, const PosIdx pos, Value ** args, Value & v) { - auto path = realisePath(state, pos, *args[0], std::nullopt); + auto path = state.realisePath(pos, *args[0], std::nullopt); /* Retrieve the directory entry type and stringize it. */ v = fileTypeToString(state, path.lstat().type); } @@ -2383,7 +2472,7 @@ static RegisterPrimOp primop_readFileType({ /* Read a directory (without . or ..) */ static void prim_readDir(EvalState & state, const PosIdx pos, Value ** args, Value & v) { - auto path = realisePath(state, pos, *args[0]); + auto path = state.realisePath(pos, *args[0]); // Retrieve directory entries for all nodes in a directory. // This is similar to `getFileType` but is optimized to reduce system calls @@ -2656,15 +2745,25 @@ static void prim_toFile(EvalState & state, const PosIdx pos, Value ** args, Valu { NixStringContext context; auto name = state.forceStringNoCtx(*args[0], pos, "while evaluating the first argument passed to builtins.toFile"); - auto contents = - state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.toFile"); + std::string contents( + state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.toFile")); StorePathSet refs; + StringMap rewrites; for (auto c : context) { if (auto p = std::get_if(&c.raw)) refs.insert(p->path); - else + else if (auto p = std::get_if(&c.raw)) { + if (contents.find(p->storePath.to_string()) != contents.npos) { + auto devirtualized = state.devirtualize(p->storePath, &rewrites); + warn( + "Using 'builtins.toFile' to create a file named '%s' that references the store path '%s' without a proper context. " + "The resulting file will not have a correct store reference, so this is unreliable and may stop working in the future.", + name, + state.store->printStorePath(devirtualized)); + } + } else state .error( "files created by %1% may not reference derivations, but %2% references %3%", @@ -2675,6 +2774,8 @@ static void prim_toFile(EvalState & state, const PosIdx pos, Value ** args, Valu .debugThrow(); } + contents = rewriteStrings(contents, rewrites); + auto storePath = settings.readOnlyMode ? state.store->makeFixedOutputPathFromCA( name, TextInfo{ @@ -2690,7 +2791,8 @@ static void prim_toFile(EvalState & state, const PosIdx pos, Value ** args, Valu ContentAddressMethod::Raw::Text, HashAlgorithm::SHA256, refs, - state.repair); + state.repair, + state.evalContext.provenance); }); /* Note: we don't need to add `context' to the context of the @@ -2834,11 +2936,23 @@ static void addPath( name, ContentAddressWithReferences::fromParts(method, *expectedHash, {refs})); if (!expectedHash || !state.store->isValidPath(*expectedStorePath)) { + // FIXME: make this lazy? // FIXME: support refs in fetchToStore()? + auto path2 = path.resolveSymlinks(); + // Don't use source path provenance if we have a filter applied, since we can't accurately + // record that. Instead, use the current global provenance, since it's better than nothing. + auto path3 = filter + ? SourcePath{ + make_ref( + path2.accessor, state.evalContext.provenance), + path2.path + } + : path2; + auto dstPath = refs.empty() ? fetchToStore( state.fetchSettings, *state.store, - path.resolveSymlinks(), + path3, settings.readOnlyMode ? FetchMode::DryRun : FetchMode::Copy, name, method, @@ -2846,7 +2960,7 @@ static void addPath( state.repair) : state.store->addToStore( name, - path.resolveSymlinks(), + path3, method, HashAlgorithm::SHA256, refs, @@ -2876,7 +2990,15 @@ static void prim_filterSource(EvalState & state, const PosIdx pos, Value ** args state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.filterSource"); addPath( - state, pos, path.baseName(), path, args[0], ContentAddressMethod::Raw::NixArchive, std::nullopt, v, context); + state, + pos, + state.computeBaseName(path, pos), + path, + args[0], + ContentAddressMethod::Raw::NixArchive, + std::nullopt, + v, + context); } static RegisterPrimOp primop_filterSource({ @@ -3529,6 +3651,49 @@ static RegisterPrimOp primop_mapAttrs({ .fun = prim_mapAttrs, }); +static void prim_filterAttrs(EvalState & state, const PosIdx pos, Value ** args, Value & v) +{ + state.forceAttrs(*args[1], pos, "while evaluating the second argument passed to builtins.filterAttrs"); + + if (args[1]->attrs()->empty()) { + v = *args[1]; + return; + } + + state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.filterAttrs"); + + auto attrs = state.buildBindings(args[1]->attrs()->size()); + + for (auto & i : *args[1]->attrs()) { + Value * vName = Value::toPtr(state.symbols[i.name]); + Value * callArgs[] = {vName, i.value}; + Value res; + state.callFunction(*args[0], callArgs, res, noPos); + if (state.forceBool( + res, pos, "while evaluating the return value of the filtering function passed to builtins.filterAttrs")) + attrs.insert(i.name, i.value); + } + + v.mkAttrs(attrs.alreadySorted()); +} + +static RegisterPrimOp primop_filterAttrs({ + .name = "__filterAttrs", + .args = {"f", "attrset"}, + .doc = R"( + Return an attribute set consisting of the attributes in *attrset* for which + the function *f* returns `true`. The function *f* is called with two arguments: + the name of the attribute and the value of the attribute. For example, + + ```nix + builtins.filterAttrs (name: value: name == "foo") { foo = 1; bar = 2; } + ``` + + evaluates to `{ foo = 1; }`. + )", + .fun = prim_filterAttrs, +}); + static void prim_zipAttrsWith(EvalState & state, const PosIdx pos, Value ** args, Value & v) { // we will first count how many values are present for each given key. @@ -3909,8 +4074,8 @@ static void anyOrAll(bool any, EvalState & state, const PosIdx pos, Value ** arg std::string_view errorCtx = any ? "while evaluating the return value of the function passed to builtins.any" : "while evaluating the return value of the function passed to builtins.all"; - Value vTmp; for (auto elem : args[1]->listView()) { + Value vTmp; state.callFunction(*args[0], *elem, vTmp, pos); bool res = state.forceBool(vTmp, pos, errorCtx); if (res == any) { @@ -4576,8 +4741,9 @@ static void prim_hashString(EvalState & state, const PosIdx pos, Value ** args, state.error("unknown hash algorithm '%1%'", algo).atPos(pos).debugThrow(); NixStringContext context; // discarded - auto s = - state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.hashString"); + auto s = state.devirtualize( + state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.hashString"), + context); v.mkString(hashString(*ha, s).to_string(HashFormat::Base16, false), state.mem); } @@ -5199,9 +5365,7 @@ void EvalState::createBaseEnv(const EvalSettings & evalSettings) )", }); - if (!settings.pureEval) { - v.mkInt(time(0)); - } + v.mkInt(time(0)); addConstant( "__currentTime", v, @@ -5229,8 +5393,7 @@ void EvalState::createBaseEnv(const EvalSettings & evalSettings) .impureOnly = true, }); - if (!settings.pureEval) - v.mkString(settings.getCurrentSystem(), mem); + v.mkString(settings.getCurrentSystem(), mem); addConstant( "__currentSystem", v, @@ -5416,6 +5579,16 @@ void EvalState::createBaseEnv(const EvalSettings & evalSettings) .type = nFunction, }); + auto vDerivationWithMeta = allocValue(); + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) { + addConstant( + "derivationWithMeta", + vDerivationWithMeta, + { + .type = nFunction, + }); + } + /* Now that we've added all primops, sort the `builtins' set, because attribute lookups expect it to be sorted. */ const_cast(getBuiltins().attrs())->sort(); @@ -5424,7 +5597,14 @@ void EvalState::createBaseEnv(const EvalSettings & evalSettings) /* Note: we have to initialize the 'derivation' constant *after* building baseEnv/staticBaseEnv because it uses 'builtins'. */ - evalFile(derivationInternal, *vDerivation); + auto vDerivationValue = allocValue(); + evalFile(derivationInternal, *vDerivationValue); + + callFunction(*vDerivationValue, getBuiltin("derivationStrict"), *vDerivation, PosIdx()); + + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) + callFunction( + *vDerivationValue, **get(internalPrimOps, "derivationStrictWithMeta"), *vDerivationWithMeta, PosIdx()); } } // namespace nix diff --git a/src/libexpr/primops/context.cc b/src/libexpr/primops/context.cc index 70c13e2985b9..d4824d9b9e50 100644 --- a/src/libexpr/primops/context.cc +++ b/src/libexpr/primops/context.cc @@ -8,10 +8,16 @@ namespace nix { static void prim_unsafeDiscardStringContext(EvalState & state, const PosIdx pos, Value ** args, Value & v) { - NixStringContext context; + NixStringContext context, filtered; + auto s = state.coerceToString( pos, *args[0], context, "while evaluating the argument passed to builtins.unsafeDiscardStringContext"); - v.mkString(*s, state.mem); + + for (auto & c : context) + if (auto * p = std::get_if(&c.raw)) + filtered.insert(*p); + + v.mkString(*s, filtered, state.mem); } static RegisterPrimOp primop_unsafeDiscardStringContext({ @@ -23,11 +29,19 @@ static RegisterPrimOp primop_unsafeDiscardStringContext({ .fun = prim_unsafeDiscardStringContext, }); +bool hasContext(const NixStringContext & context) +{ + for (auto & c : context) + if (!std::get_if(&c.raw)) + return true; + return false; +} + static void prim_hasContext(EvalState & state, const PosIdx pos, Value ** args, Value & v) { NixStringContext context; state.forceString(*args[0], context, pos, "while evaluating the argument passed to builtins.hasContext"); - v.mkBool(!context.empty()); + v.mkBool(hasContext(context)); } static RegisterPrimOp primop_hasContext( @@ -62,6 +76,7 @@ static void prim_unsafeDiscardOutputDependency(EvalState & state, const PosIdx p NixStringContext context2; for (auto && c : context) { if (auto * ptr = std::get_if(&c.raw)) { + state.waitForPath(ptr->drvPath); // FIXME: why? context2.emplace(NixStringContextElem::Opaque{.path = ptr->drvPath}); } else { /* Can reuse original item */ @@ -133,6 +148,11 @@ static void prim_addDrvOutputDependencies(EvalState & state, const PosIdx pos, V above does not make much sense. */ return std::move(c); }, + [&](const NixStringContextElem::Path & p) -> NixStringContextElem::DrvDeep { + state.error("`addDrvOutputDependencies` does not work on a string without context") + .atPos(pos) + .debugThrow(); + }, }, context.begin()->raw)}), }; @@ -201,6 +221,7 @@ static void prim_getContext(EvalState & state, const PosIdx pos, Value ** args, contextInfos[std::move(drvPath)].outputs.emplace_back(std::move(b.output)); }, [&](NixStringContextElem::Opaque && o) { contextInfos[std::move(o.path)].path = true; }, + [&](NixStringContextElem::Path && p) {}, }, ((NixStringContextElem &&) i).raw); } diff --git a/src/libexpr/primops/derivation.nix b/src/libexpr/primops/derivation.nix index dbb8c2186889..d3b341a23713 100644 --- a/src/libexpr/primops/derivation.nix +++ b/src/libexpr/primops/derivation.nix @@ -26,6 +26,7 @@ Note that `derivation` is very bare-bones, and provides almost no commands during the build. Most likely, you'll want to use functions like `stdenv.mkDerivation` in Nixpkgs to set up a basic environment. */ +drvFunc: drvAttrs@{ outputs ? [ "out" ], ... @@ -33,7 +34,7 @@ drvAttrs@{ let - strict = derivationStrict drvAttrs; + strict = drvFunc drvAttrs; commonAttrs = drvAttrs diff --git a/src/libexpr/primops/fetchClosure.cc b/src/libexpr/primops/fetchClosure.cc index 6e1389814fc0..f849d0debb87 100644 --- a/src/libexpr/primops/fetchClosure.cc +++ b/src/libexpr/primops/fetchClosure.cc @@ -136,7 +136,7 @@ static void prim_fetchClosure(EvalState & state, const PosIdx pos, Value ** args std::optional inputAddressedMaybe; for (auto & attr : *args[0]->attrs()) { - const auto & attrName = state.symbols[attr.name]; + std::string_view attrName = state.symbols[attr.name]; auto attrHint = [&]() -> std::string { return fmt("while evaluating the attribute '%s' passed to builtins.fetchClosure", attrName); }; @@ -243,7 +243,7 @@ static RegisterPrimOp primop_fetchClosure({ ```nix builtins.fetchClosure { fromStore = "https://cache.nixos.org"; - fromPath = /nix/store/r2jd6ygnmirm2g803mksqqjm4y39yi6i-git-2.33.1; + fromPath = /nix/store/nph9br6y2dmciy6q3dj3fwk2brdlr4gh-git-2.33.1; toPath = /nix/store/ldbhlwhh39wha58rm61bkiiwm6j7211j-git-2.33.1; } ``` @@ -258,8 +258,8 @@ static RegisterPrimOp primop_fetchClosure({ use [`nix store make-content-addressed`](@docroot@/command-ref/new-cli/nix3-store-make-content-addressed.md): ```console - # nix store make-content-addressed --from https://cache.nixos.org /nix/store/r2jd6ygnmirm2g803mksqqjm4y39yi6i-git-2.33.1 - rewrote '/nix/store/r2jd6ygnmirm2g803mksqqjm4y39yi6i-git-2.33.1' to '/nix/store/ldbhlwhh39wha58rm61bkiiwm6j7211j-git-2.33.1' + # nix store make-content-addressed --from https://cache.nixos.org /nix/store/nph9br6y2dmciy6q3dj3fwk2brdlr4gh-git-2.33.1 + rewrote '/nix/store/nph9br6y2dmciy6q3dj3fwk2brdlr4gh-git-2.33.1' to '/nix/store/ldbhlwhh39wha58rm61bkiiwm6j7211j-git-2.33.1' ``` Alternatively, set `toPath = ""` and find the correct `toPath` in the error message. @@ -271,7 +271,7 @@ static RegisterPrimOp primop_fetchClosure({ ```nix builtins.fetchClosure { fromStore = "https://cache.nixos.org"; - fromPath = /nix/store/r2jd6ygnmirm2g803mksqqjm4y39yi6i-git-2.33.1; + fromPath = /nix/store/nph9br6y2dmciy6q3dj3fwk2brdlr4gh-git-2.33.1; inputAddressed = true; } ``` diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc index cc42931a61ee..4ab060f7807f 100644 --- a/src/libexpr/primops/fetchMercurial.cc +++ b/src/libexpr/primops/fetchMercurial.cc @@ -81,7 +81,7 @@ static void prim_fetchMercurial(EvalState & state, const PosIdx pos, Value ** ar attrs.insert_or_assign("rev", rev->gitRev()); auto input = fetchers::Input::fromAttrs(state.fetchSettings, std::move(attrs)); - auto [storePath, input2] = input.fetchToStore(state.fetchSettings, *state.store); + auto [storePath, accessor, input2] = input.fetchToStore(state.fetchSettings, *state.store); auto attrs2 = state.buildBindings(8); state.mkStorePathString(storePath, attrs2.alloc(state.s.outPath)); diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index 1614fcc595d4..691d13404f66 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -30,12 +30,16 @@ void emitTreeAttrs( { auto attrs = state.buildBindings(100); - state.mkStorePathString(storePath, attrs.alloc(state.s.outPath)); + auto & vStorePath = attrs.alloc(state.s.outPath); + state.mkStorePathString(storePath, vStorePath); // FIXME: support arbitrary input attributes. if (auto narHash = input.getNarHash()) attrs.alloc("narHash").mkString(narHash->to_string(HashFormat::SRI, true), state.mem); + else + // Lazily compute the NAR hash for backward compatibility. + attrs.alloc("narHash").mkApp(*get(state.internalPrimOps, "narHash"), &vStorePath); if (input.getType() == "git") attrs.alloc("submodules").mkBool(fetchers::maybeGetBoolAttr(input.attrs, "submodules").value_or(false)); @@ -77,7 +81,6 @@ struct FetchTreeParams bool emptyRevFallback = false; bool allowNameArgument = false; bool isFetchGit = false; - bool isFinal = false; }; static void fetchTree( @@ -151,11 +154,6 @@ static void fetchTree( attrs.emplace("exportIgnore", Explicit{true}); } - // fetchTree should fetch git repos with shallow = true by default - if (type == "git" && !params.isFetchGit && !attrs.contains("shallow")) { - attrs.emplace("shallow", Explicit{true}); - } - if (!params.allowNameArgument) if (auto nameIter = attrs.find("name"); nameIter != attrs.end()) state.error("argument 'name' isn’t supported in call to '%s'", fetcher) @@ -184,17 +182,11 @@ static void fetchTree( } input = fetchers::Input::fromAttrs(state.fetchSettings, std::move(attrs)); } else { - if (!experimentalFeatureSettings.isEnabled(Xp::Flakes)) - state - .error( - "passing a string argument to '%s' requires the 'flakes' experimental feature", fetcher) - .atPos(pos) - .debugThrow(); input = fetchers::Input::fromURL(state.fetchSettings, url); } } - if (!state.settings.pureEval && !input.isDirect() && experimentalFeatureSettings.isEnabled(Xp::Flakes)) + if (!state.settings.pureEval && !input.isDirect()) input = lookupInRegistries(state.fetchSettings, *state.store, input, fetchers::UseRegistries::Limited).first; if (state.settings.pureEval && !input.isLocked(state.fetchSettings)) { @@ -213,17 +205,13 @@ static void fetchTree( state.checkURI(input.toURLString()); - if (params.isFinal) { + if (input.getNarHash()) input.attrs.insert_or_assign("__final", Explicit(true)); - } else { - if (input.isFinal()) - throw Error("input '%s' is not allowed to use the '__final' attribute", input.to_string()); - } auto cachedInput = state.inputCache->getAccessor(state.fetchSettings, *state.store, input, fetchers::UseRegistries::No); - auto storePath = state.mountInput(cachedInput.lockedInput, input, cachedInput.accessor); + auto storePath = state.mountInput(cachedInput.lockedInput, input, cachedInput.accessor, true); emitTreeAttrs(state, storePath, cachedInput.lockedInput, v, params.emptyRevFallback, false); } @@ -318,7 +306,6 @@ static RegisterPrimOp primop_fetchTree({ - `"mercurial"` *input* can also be a [URL-like reference](@docroot@/command-ref/new-cli/nix3-flake.md#flake-references). - The additional input types and the URL-like syntax requires the [`flakes` experimental feature](@docroot@/development/experimental-features.md#xp-feature-flakes) to be enabled. > **Example** > @@ -358,19 +345,6 @@ static RegisterPrimOp primop_fetchTree({ return doc; }(), .fun = prim_fetchTree, - .experimentalFeature = Xp::FetchTree, -}); - -void prim_fetchFinalTree(EvalState & state, const PosIdx pos, Value ** args, Value & v) -{ - fetchTree(state, pos, args, v, {.isFinal = true}); -} - -static RegisterPrimOp primop_fetchFinalTree({ - .name = "fetchFinalTree", - .args = {"input"}, - .fun = prim_fetchFinalTree, - .internal = true, }); static void fetch( @@ -719,7 +693,7 @@ static RegisterPrimOp primop_fetchGit({ name in the `ref` attribute. However, if the revision you're looking for is in a future - branch for the non-default branch you will need to specify the + branch for the non-default branch you need to specify the the `ref` attribute as well. ```nix diff --git a/src/libexpr/primops/meson.build b/src/libexpr/primops/meson.build index b8abc6409af9..d62b6df4ea20 100644 --- a/src/libexpr/primops/meson.build +++ b/src/libexpr/primops/meson.build @@ -10,3 +10,7 @@ sources += files( 'fetchTree.cc', 'fromTOML.cc', ) + +if wasmtime_required.enabled() + sources += files('wasm.cc') +endif diff --git a/src/libexpr/primops/wasm.cc b/src/libexpr/primops/wasm.cc new file mode 100644 index 000000000000..c0dd9f40e08d --- /dev/null +++ b/src/libexpr/primops/wasm.cc @@ -0,0 +1,709 @@ +#include "nix/expr/primops.hh" +#include "nix/expr/eval-inline.hh" + +#include +#include + +using namespace wasmtime; + +namespace nix { + +using ValueId = uint32_t; + +template +T unwrap(Result && res) +{ + if (res) + return res.ok(); + throw Error(res.err().message()); +} + +static Engine & getEngine() +{ + static Engine engine = []() { + wasmtime::Config config; + config.pooling_allocation_strategy(PoolAllocationConfig()); + config.memory_init_cow(true); + return Engine(std::move(config)); + }(); + return engine; +} + +static std::span string2span(std::string_view s) +{ + return std::span((uint8_t *) s.data(), s.size()); +} + +static std::string_view span2string(std::span s) +{ + return std::string_view((char *) s.data(), s.size()); +} + +template +static std::span subspan(std::span s, size_t len) +{ + if (s.size() < len * sizeof(T)) + throw Error("Wasm memory access out of bounds"); + return std::span((T *) s.data(), len); +} + +// FIXME: move to wasmtime C++ wrapper. +class InstancePre +{ + WASMTIME_OWN_WRAPPER(InstancePre, wasmtime_instance_pre); + +public: + TrapResult instantiate(wasmtime::Store::Context cx) + { + wasmtime_instance_t instance; + wasm_trap_t * trap = nullptr; + auto * error = wasmtime_instance_pre_instantiate(ptr.get(), cx.capi(), &instance, &trap); + if (error != nullptr) { + return TrapError(wasmtime::Error(error)); + } + if (trap != nullptr) { + return TrapError(Trap(trap)); + } + return Instance(instance); + } +}; + +TrapResult instantiate_pre(Linker & linker, const Module & m) +{ + wasmtime_instance_pre_t * instance_pre; + auto * error = wasmtime_linker_instantiate_pre(linker.capi(), m.capi(), &instance_pre); + if (error != nullptr) { + return TrapError(wasmtime::Error(error)); + } + return InstancePre(instance_pre); +} + +static void regFuns(Linker & linker, bool useWasi); + +struct NixWasmInstancePre +{ + Engine & engine; + SourcePath wasmPath; + bool useWasi; + InstancePre instancePre; + + NixWasmInstancePre(SourcePath _wasmPath) + : engine(getEngine()) + , wasmPath(_wasmPath) + , useWasi(false) + , instancePre(({ + // Compile the module + auto module = unwrap(Module::compile(engine, string2span(wasmPath.readFile()))); + + // Auto-detect WASI by checking for wasi_snapshot_preview1 imports. + for (const auto & ref : module.imports()) + if (const_cast &>(ref).module() == "wasi_snapshot_preview1") { + useWasi = true; + break; + } + + // Create linker with appropriate WASI support + Linker linker(engine); + if (useWasi) + unwrap(linker.define_wasi()); + regFuns(linker, useWasi); + + unwrap(instantiate_pre(linker, module)); + })) + { + } +}; + +struct NixWasmInstance +{ + EvalState & state; + ref pre; + wasmtime::Store wasmStore; + wasmtime::Store::Context wasmCtx; + Instance instance; + Memory memory_; + + ValueVector values; + std::exception_ptr ex; + + std::optional functionName; + + ValueId resultId = 0; + + std::string logPrefix; + + NixWasmInstance(EvalState & _state, ref _pre) + : state(_state) + , pre(_pre) + , wasmStore(pre->engine) + , wasmCtx(wasmStore) + , instance(unwrap(pre->instancePre.instantiate(wasmCtx))) + , memory_(getExport("memory")) + , logPrefix(pre->wasmPath.baseName()) + { + wasmCtx.set_data(this); + + /* Reserve value ID 0 so it can be used in functions like get_attr() to denote a missing attribute. */ + values.push_back(nullptr); + } + + ValueId addValue(Value * v) + { + auto id = values.size(); + values.emplace_back(v); + return id; + } + + std::pair allocValue() + { + auto v = state.allocValue(); + auto id = addValue(v); + return {id, *v}; + } + + Value & getValue(ValueId id) + { + if (id >= values.size() || id == 0) + throw Error("invalid ValueId %d", id); + return *values[id]; + } + + template + T getExport(std::string_view name) + { + auto ext = instance.get(wasmCtx, name); + if (!ext) + throw Error("Wasm module '%s' does not export '%s'", pre->wasmPath, name); + auto res = std::get_if(&*ext); + if (!res) + throw Error("export '%s' of Wasm module '%s' does not have the right type", name, pre->wasmPath); + return *res; + } + + std::vector runFunction(std::string_view name, const std::vector & args) + { + functionName = name; + return unwrap(getExport(name).call(wasmCtx, args)); + } + + auto memory() + { + return memory_.data(wasmCtx); + } + + std::monostate panic(uint32_t ptr, uint32_t len) + { + throw Error("Wasm panic: %s", Uncolored(span2string(memory().subspan(ptr, len)))); + } + + std::monostate warn(uint32_t ptr, uint32_t len) + { + doWarn(span2string(memory().subspan(ptr, len))); + return {}; + } + + void doWarn(std::string_view s) + { + if (functionName) + nix::warn("'%s' function '%s': %s", logPrefix, functionName.value_or(""), s); + else + nix::warn("'%s': %s", logPrefix, s); + } + + uint32_t get_type(ValueId valueId) + { + auto & value = getValue(valueId); + state.forceValue(value, noPos); + auto t = value.type(); + return t == nInt ? 1 + : t == nFloat ? 2 + : t == nBool ? 3 + : t == nString ? 4 + : t == nPath ? 5 + : t == nNull ? 6 + : t == nAttrs ? 7 + : t == nList ? 8 + : t == nFunction ? 9 + : []() -> int { throw Error("unsupported type"); }(); + } + + ValueId make_int(int64_t n) + { + auto [valueId, value] = allocValue(); + value.mkInt(n); + return valueId; + } + + int64_t get_int(ValueId valueId) + { + return state.forceInt(getValue(valueId), noPos, "while evaluating a value from Wasm").value; + } + + ValueId make_float(double x) + { + auto [valueId, value] = allocValue(); + value.mkFloat(x); + return valueId; + } + + double get_float(ValueId valueId) + { + return state.forceFloat(getValue(valueId), noPos, "while evaluating a value from Wasm"); + } + + ValueId make_string(uint32_t ptr, uint32_t len) + { + auto [valueId, value] = allocValue(); + value.mkString(span2string(memory().subspan(ptr, len)), state.mem); + return valueId; + } + + uint32_t copy_string(ValueId valueId, uint32_t ptr, uint32_t maxLen) + { + auto s = state.forceString(getValue(valueId), noPos, "while evaluating a value from Wasm"); + if (s.size() <= maxLen) { + auto buf = memory().subspan(ptr, maxLen); + memcpy(buf.data(), s.data(), s.size()); + } + return s.size(); + } + + ValueId make_path(ValueId baseId, uint32_t ptr, uint32_t len) + { + auto & baseValue = getValue(baseId); + state.forceValue(baseValue, noPos); + if (baseValue.type() != nPath) + throw Error("make_path expects a path value"); + auto base = baseValue.path(); + + auto [valueId, value] = allocValue(); + value.mkPath({base.accessor, CanonPath(span2string(memory().subspan(ptr, len)), base.path)}, state.mem); + return valueId; + } + + uint32_t copy_path(ValueId valueId, uint32_t ptr, uint32_t maxLen) + { + auto & v = getValue(valueId); + state.forceValue(v, noPos); + if (v.type() != nPath) + throw Error("copy_path expects a path value"); + auto path = v.path().path; + auto s = path.abs(); + if (s.size() <= maxLen) { + auto buf = memory().subspan(ptr, maxLen); + memcpy(buf.data(), s.data(), s.size()); + } + return s.size(); + } + + ValueId make_bool(int32_t b) + { + return addValue(state.getBool(b)); + } + + int32_t get_bool(ValueId valueId) + { + return state.forceBool(getValue(valueId), noPos, "while evaluating a value from Wasm"); + } + + ValueId make_null() + { + return addValue(&Value::vNull); + } + + ValueId make_list(uint32_t ptr, uint32_t len) + { + auto vs = subspan(memory().subspan(ptr), len); + + auto [valueId, value] = allocValue(); + + auto list = state.buildList(len); + for (const auto & [n, v] : enumerate(list)) + v = &getValue(vs[n]); // FIXME: endianness + value.mkList(list); + + return valueId; + } + + uint32_t copy_list(ValueId valueId, uint32_t ptr, uint32_t maxLen) + { + auto & value = getValue(valueId); + state.forceList(value, noPos, "while getting a list from Wasm"); + + if (value.listSize() <= maxLen) { + auto out = subspan(memory().subspan(ptr), value.listSize()); + + for (const auto & [n, elem] : enumerate(value.listView())) + out[n] = addValue(elem); + } + + return value.listSize(); + } + + ValueId make_attrset(uint32_t ptr, uint32_t len) + { + auto mem = memory(); + + struct Attr + { + // FIXME: endianness + uint32_t attrNamePtr; + uint32_t attrNameLen; + ValueId value; + }; + + auto attrs = subspan(mem.subspan(ptr), len); + + auto [valueId, value] = allocValue(); + auto builder = state.buildBindings(len); + for (auto & attr : attrs) + builder.insert( + state.symbols.create(span2string(mem.subspan(attr.attrNamePtr, attr.attrNameLen))), + &getValue(attr.value)); + value.mkAttrs(builder); + + return valueId; + } + + uint32_t copy_attrset(ValueId valueId, uint32_t ptr, uint32_t maxLen) + { + auto & value = getValue(valueId); + state.forceAttrs(value, noPos, "while copying an attrset into Wasm"); + + if (value.attrs()->size() <= maxLen) { + // FIXME: endianness. + struct Attr + { + ValueId value; + uint32_t nameLen; + }; + + auto buf = subspan(memory().subspan(ptr), maxLen); + + // FIXME: for determinism, we should return attributes in lexicographically sorted order. + for (const auto & [n, attr] : enumerate(*value.attrs())) { + buf[n].value = addValue(attr.value); + buf[n].nameLen = state.symbols[attr.name].size(); + } + } + + return value.attrs()->size(); + } + + std::monostate copy_attrname(ValueId valueId, uint32_t attrIdx, uint32_t ptr, uint32_t len) + { + auto & value = getValue(valueId); + state.forceAttrs(value, noPos, "while copying an attr name into Wasm"); + + auto & attrs = *value.attrs(); + + if ((size_t) attrIdx >= attrs.size()) + throw Error("copy_attrname: attribute index out of bounds"); + + std::string_view name = state.symbols[attrs[attrIdx].name]; + + if ((size_t) len != name.size()) + throw Error("copy_attrname: buffer length does not match attribute name length"); + + memcpy(memory().subspan(ptr, len).data(), name.data(), name.size()); + + return {}; + } + + ValueId get_attr(ValueId valueId, uint32_t ptr, uint32_t len) + { + auto attrName = span2string(memory().subspan(ptr, len)); + + auto & value = getValue(valueId); + state.forceAttrs(value, noPos, "while getting an attribute from Wasm"); + + auto attr = value.attrs()->get(state.symbols.create(attrName)); + + return attr ? addValue(attr->value) : 0; + } + + ValueId call_function(ValueId funId, uint32_t ptr, uint32_t len) + { + auto & fun = getValue(funId); + state.forceFunction(fun, noPos, "while calling a function from Wasm"); + + ValueVector args; + for (auto argId : subspan(memory().subspan(ptr), len)) + args.push_back(&getValue(argId)); + + auto [valueId, value] = allocValue(); + + state.callFunction(fun, args, value, noPos); + + return valueId; + } + + ValueId make_app(ValueId funId, uint32_t ptr, uint32_t len) + { + if (!len) + return funId; + + auto args = subspan(memory().subspan(ptr), len); + + auto res = &getValue(funId); + + while (!args.empty()) { + auto arg = &getValue(args[0]); + auto tmp = state.allocValue(); + tmp->mkApp(res, {arg}); + res = tmp; + args = args.subspan(1); + } + + return addValue(res); + } + + /** + * Read the contents of a file into Wasm memory. This is like calling `builtins.readFile`, except that it can handle + * binary files that cannot be represented as Nix strings. + */ + uint32_t read_file(ValueId pathId, uint32_t ptr, uint32_t len) + { + auto & pathValue = getValue(pathId); + auto path = state.realisePath(noPos, pathValue); + + auto contents = path.readFile(); + + if (contents.size() > std::numeric_limits::max()) + throw Error("file '%s' is too large to process in Wasm (size: %d)", path, contents.size()); + + // FIXME: this is an inefficient interface since it may cause the file to be read twice. + if (contents.size() <= len) { + auto buf = memory().subspan(ptr, len); + memcpy(buf.data(), contents.data(), contents.size()); + } + + return contents.size(); + } +}; + +template +static void regFun(Linker & linker, std::string_view name, R (NixWasmInstance::*f)(Args...)) +{ + unwrap(linker.func_wrap("env", name, [f](Caller caller, Args... args) -> Result { + try { + auto instance = std::any_cast(caller.context().get_data()); + return (*instance.*f)(args...); + } catch (std::exception & e) { + return Trap(e.what()); + } catch (...) { + return Trap("unknown exception"); + } + })); +} + +static void regFuns(Linker & linker, bool useWasi) +{ + regFun(linker, "panic", &NixWasmInstance::panic); + regFun(linker, "warn", &NixWasmInstance::warn); + regFun(linker, "get_type", &NixWasmInstance::get_type); + regFun(linker, "make_int", &NixWasmInstance::make_int); + regFun(linker, "get_int", &NixWasmInstance::get_int); + regFun(linker, "make_float", &NixWasmInstance::make_float); + regFun(linker, "get_float", &NixWasmInstance::get_float); + regFun(linker, "make_string", &NixWasmInstance::make_string); + regFun(linker, "copy_string", &NixWasmInstance::copy_string); + regFun(linker, "make_path", &NixWasmInstance::make_path); + regFun(linker, "copy_path", &NixWasmInstance::copy_path); + regFun(linker, "make_bool", &NixWasmInstance::make_bool); + regFun(linker, "get_bool", &NixWasmInstance::get_bool); + regFun(linker, "make_null", &NixWasmInstance::make_null); + regFun(linker, "make_list", &NixWasmInstance::make_list); + regFun(linker, "copy_list", &NixWasmInstance::copy_list); + regFun(linker, "make_attrset", &NixWasmInstance::make_attrset); + regFun(linker, "copy_attrset", &NixWasmInstance::copy_attrset); + regFun(linker, "copy_attrname", &NixWasmInstance::copy_attrname); + regFun(linker, "get_attr", &NixWasmInstance::get_attr); + regFun(linker, "call_function", &NixWasmInstance::call_function); + regFun(linker, "make_app", &NixWasmInstance::make_app); + regFun(linker, "read_file", &NixWasmInstance::read_file); + + if (useWasi) { + unwrap(linker.func_wrap( + "env", "return_to_nix", [](Caller caller, ValueId resultId) -> Result { + auto instance = std::any_cast(caller.context().get_data()); + instance->resultId = resultId; + return Trap("return_to_nix"); + })); + } +} + +template +struct LazyMakeRef +{ + ref p; + + template + LazyMakeRef(Args &&... args) + : p(make_ref(std::move(args...))) + { + } +}; + +static NixWasmInstance instantiateWasm(EvalState & state, const SourcePath & wasmPath) +{ + // FIXME: make this a weak Boehm GC pointer so that it can be freed during GC. + // FIXME: move to EvalState? + // Note: InstancePre in Rust is Send+Sync so it should be safe to share between threads. + static boost::concurrent_flat_map> instancesPre; + + std::shared_ptr instancePre; + + instancesPre.try_emplace_and_cvisit( + wasmPath, wasmPath, [&](auto & i) { instancePre = i.second.p; }, [&](auto & i) { instancePre = i.second.p; }); + + return NixWasmInstance{state, ref(instancePre)}; +} + +/** + * Callback for WASI stdout/stderr writes. It splits the output into lines and logs each line separately. + */ +struct WasiLogger +{ + NixWasmInstance & instance; + + std::string data; + + ~WasiLogger() + { + if (!data.empty()) + instance.doWarn(data); + } + + void operator()(std::string_view s) + { + data.append(s); + + while (true) { + auto pos = data.find('\n'); + if (pos == std::string_view::npos) + break; + instance.doWarn(data.substr(0, pos)); + data.erase(0, pos + 1); + } + } +}; + +static void prim_wasm(EvalState & state, const PosIdx pos, Value ** args, Value & v) +{ + state.forceAttrs(*args[0], pos, "while evaluating the first argument to `builtins.wasm`"); + + // Extract 'path' attribute + auto pathAttr = args[0]->attrs()->get(state.symbols.create("path")); + if (!pathAttr) + throw Error("missing required 'path' attribute in first argument to `builtins.wasm`"); + auto wasmPath = state.realisePath(pos, *pathAttr->value); + + // Check for unknown attributes + for (auto & attr : *args[0]->attrs()) { + auto name = state.symbols[attr.name]; + if (name != "path" && name != "function") + throw Error("unknown attribute '%s' in first argument to `builtins.wasm`", name); + } + + // Second argument is the value to pass to the function + auto argValue = args[1]; + + try { + auto instance = instantiateWasm(state, wasmPath); + + // Extract 'function' attribute (optional for wasi, required for non-wasi) + std::string functionName; + auto functionAttr = args[0]->attrs()->get(state.symbols.create("function")); + if (instance.pre->useWasi) { + functionName = "_start"; + if (functionAttr) + throw Error("'function' attribute is not allowed for WASI modules"); + } else { + if (!functionAttr) + throw Error( + "missing required 'function' attribute in first argument to `builtins.wasm` for non-WASI modules"); + functionName = std::string( + state.forceStringNoCtx(*functionAttr->value, pos, "while evaluating the 'function' attribute")); + } + + debug("calling wasm module"); + + auto argId = instance.addValue(argValue); + + if (instance.pre->useWasi) { + WasiLogger logger{instance}; + + auto loggerTrampoline = [](void * data, const unsigned char * buf, size_t len) -> ptrdiff_t { + auto logger = static_cast(data); + (*logger)(std::string_view((const char *) buf, len)); + return len; + }; + + WasiConfig wasiConfig; + wasi_config_set_stdout_custom(wasiConfig.capi(), loggerTrampoline, &logger, nullptr); + wasi_config_set_stderr_custom(wasiConfig.capi(), loggerTrampoline, &logger, nullptr); + wasiConfig.argv({"wasi", std::to_string(argId)}); + unwrap(instance.wasmStore.context().set_wasi(std::move(wasiConfig))); + + auto res = instance.getExport(functionName).call(instance.wasmCtx, {}); + if (!instance.resultId) { + unwrap(std::move(res)); + throw Error("Wasm function '%s' from '%s' finished without returning a value", functionName, wasmPath); + } + + auto & vRes = instance.getValue(instance.resultId); + state.forceValue(vRes, pos); + v = vRes; + } else { + // FIXME: use the "start" function if present. + instance.runFunction("nix_wasm_init_v1", {}); + + auto res = instance.runFunction(functionName, {(int32_t) argId}); + if (res.size() != 1) + throw Error("Wasm function '%s' from '%s' did not return exactly one value", functionName, wasmPath); + if (res[0].kind() != ValKind::I32) + throw Error("Wasm function '%s' from '%s' did not return an i32 value", functionName, wasmPath); + auto & vRes = instance.getValue(res[0].i32()); + state.forceValue(vRes, pos); + v = vRes; + } + } catch (Error & e) { + e.addTrace(state.positions[pos], "while executing the Wasm function from '%s'", wasmPath); + throw; + } +} + +static RegisterPrimOp primop_wasm( + {.name = "__wasm", + .args = {"config", "arg"}, + .doc = R"( + Call a Wasm function with the specified argument. + + The first argument must be an attribute set with the following attributes: + - `path`: Path to the Wasm module (required) + - `function`: Function name to call (required for non-WASI modules, not allowed for WASI modules) + + The second argument is the value to pass to the function. + + WASI mode is automatically enabled if the module imports from `wasi_snapshot_preview1`. + + Example (non-WASI): + ```nix + builtins.wasm { + path = ./foo.wasm; + function = "fib"; + } 33 + ``` + + Example (WASI): + ```nix + builtins.wasm { + path = ./bar.wasm; + } { x = 42; } + ``` + )", + .fun = prim_wasm, + .experimentalFeature = Xp::WasmBuiltin}); + +} // namespace nix diff --git a/src/libexpr/print-ambiguous.cc b/src/libexpr/print-ambiguous.cc index 8b80e2a66345..f80ef2b044bf 100644 --- a/src/libexpr/print-ambiguous.cc +++ b/src/libexpr/print-ambiguous.cc @@ -6,8 +6,7 @@ namespace nix { // See: https://github.com/NixOS/nix/issues/9730 -void printAmbiguous( - Value & v, const SymbolTable & symbols, std::ostream & str, std::set * seen, int depth) +void printAmbiguous(EvalState & state, Value & v, std::ostream & str, std::set * seen, int depth) { checkInterrupt(); @@ -22,9 +21,13 @@ void printAmbiguous( case nBool: printLiteralBool(str, v.boolean()); break; - case nString: - printLiteralString(str, v.string_view()); + case nString: { + NixStringContext context; + copyContext(v, context); + // FIXME: make devirtualization configurable? + printLiteralString(str, state.devirtualize(v.string_view(), context)); break; + } case nPath: str << v.path().to_string(); // !!! escaping? break; @@ -36,9 +39,9 @@ void printAmbiguous( str << "«repeated»"; else { str << "{ "; - for (auto & i : v.attrs()->lexicographicOrder(symbols)) { - str << symbols[i->name] << " = "; - printAmbiguous(*i->value, symbols, str, seen, depth - 1); + for (auto & i : v.attrs()->lexicographicOrder(state.symbols)) { + str << state.symbols[i->name] << " = "; + printAmbiguous(state, *i->value, str, seen, depth - 1); str << "; "; } str << "}"; @@ -54,7 +57,7 @@ void printAmbiguous( str << "[ "; for (auto v2 : v.listView()) { if (v2) - printAmbiguous(*v2, symbols, str, seen, depth - 1); + printAmbiguous(state, *v2, str, seen, depth - 1); else str << "(nullptr)"; str << " "; @@ -75,6 +78,9 @@ void printAmbiguous( str << "«potential infinite recursion»"; } break; + case nFailed: + str << "«failed»"; + break; case nFunction: if (v.isLambda()) { str << ""; diff --git a/src/libexpr/print.cc b/src/libexpr/print.cc index 4776be033851..bf856db45d52 100644 --- a/src/libexpr/print.cc +++ b/src/libexpr/print.cc @@ -249,7 +249,11 @@ class Printer void printString(Value & v) { - printLiteralString(output, v.string_view(), options.maxStringLength, options.ansiColors); + NixStringContext context; + copyContext(v, context); + std::ostringstream s; + printLiteralString(s, v.string_view(), options.maxStringLength, options.ansiColors); + output << state.devirtualize(s.str(), context); } void printPath(Value & v) @@ -498,7 +502,7 @@ class Printer output << "«potential infinite recursion»"; if (options.ansiColors) output << ANSI_NORMAL; - } else if (v.isThunk() || v.isApp()) { + } else if (!v.isFinished()) { if (options.ansiColors) output << ANSI_MAGENTA; output << "«thunk»"; @@ -509,6 +513,11 @@ class Printer } } + void printFailed(Value & v) + { + output << "«failed»"; + } + void printExternal(Value & v) { v.external()->print(output); @@ -584,6 +593,10 @@ class Printer printThunk(v); break; + case nFailed: + printFailed(v); + break; + case nExternal: printExternal(v); break; diff --git a/src/libexpr/provenance.cc b/src/libexpr/provenance.cc new file mode 100644 index 000000000000..8bce4f120763 --- /dev/null +++ b/src/libexpr/provenance.cc @@ -0,0 +1,25 @@ +#include "nix/expr/provenance.hh" +#include "nix/util/json-utils.hh" + +#include + +namespace nix { + +nlohmann::json DerivationProvenance::to_json() const +{ + return nlohmann::json{ + {"type", "derivation"}, + {"meta", *meta}, + {"next", next ? next->to_json() : nlohmann::json(nullptr)}, + }; +} + +Provenance::Register registerDerivationProvenance("derivation", [](nlohmann::json json) { + auto & obj = getObject(json); + std::shared_ptr next; + if (auto p = optionalValueAt(obj, "next"); p && !p->is_null()) + next = Provenance::from_json(*p); + return make_ref(next, make_ref(valueAt(obj, "meta"))); +}); + +} // namespace nix diff --git a/src/libexpr/symbol-table.cc b/src/libexpr/symbol-table.cc new file mode 100644 index 000000000000..052c72570371 --- /dev/null +++ b/src/libexpr/symbol-table.cc @@ -0,0 +1,63 @@ +#include "nix/expr/symbol-table.hh" +#include "nix/util/logging.hh" + +#include + +namespace nix { + +#ifndef MAP_NORESERVE +# define MAP_NORESERVE 0 +#endif + +static void * allocateLazyMemory(size_t maxSize) +{ + auto p = mmap(nullptr, maxSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0); + if (p == MAP_FAILED) + throw SysError("allocating arena using mmap"); + return p; +} + +ContiguousArena::ContiguousArena(size_t maxSize) + : data((char *) allocateLazyMemory(maxSize)) + , maxSize(maxSize) +{ +} + +size_t ContiguousArena::allocate(size_t bytes) +{ + auto offset = size.fetch_add(bytes); + if (offset + bytes > maxSize) + throw Error("arena ran out of space"); + return offset; +} + +Symbol SymbolTable::create(std::string_view s) +{ + uint32_t idx; + + auto visit = [&](const SymbolStr & sym) { idx = ((const char *) sym.s) - arena.data; }; + + symbols.insert_and_visit(SymbolStr::Key{s, arena}, visit, visit); + + return Symbol(idx); +} + +SymbolStr::SymbolStr(const SymbolStr::Key & key) +{ + auto size = SymbolStr::computeSize(key.s); + + auto id = key.arena.allocate(size); + + auto v = (SymbolValue *) (const_cast(key.arena.data) + id); + + auto s = (StringData *) (v + 1); + s->size_ = key.s.size(); + std::memcpy(s->data_, key.s.data(), key.s.size()); + s->data_[key.s.size()] = '\0'; + + v->mkStringNoCopy(*s); + + this->s = v; +} + +} // namespace nix diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc index b2cc482c6e36..58e3b3e53993 100644 --- a/src/libexpr/value-to-json.cc +++ b/src/libexpr/value-to-json.cc @@ -2,106 +2,148 @@ #include "nix/expr/eval-inline.hh" #include "nix/store/store-api.hh" #include "nix/util/signals.hh" +#include "nix/expr/parallel-eval.hh" #include #include #include namespace nix { + using json = nlohmann::json; +#pragma GCC diagnostic ignored "-Wswitch-enum" + +static void parallelForceDeep(EvalState & state, Value & v, PosIdx pos) +{ + state.forceValue(v, pos); + + Executor::WorkItems work; + + switch (v.type()) { + + case nAttrs: { + NixStringContext context; + if (state.tryAttrsToString(pos, v, context, false, false)) + return; + if (v.attrs()->get(state.s.outPath)) + return; + for (auto & a : *v.attrs()) + state.addWork(work, 0, [value(allocRootValue(a.value)), pos(a.pos), &state]() { + parallelForceDeep(state, **value, pos); + }); + break; + } + + default: + break; + } + + state.executor->spawn(std::move(work)); +} + // TODO: rename. It doesn't print. json printValueAsJSON( EvalState & state, bool strict, Value & v, const PosIdx pos, NixStringContext & context, bool copyToStore) { - checkInterrupt(); + if (strict && state.executor->enabled && !Executor::amWorkerThread) + parallelForceDeep(state, v, pos); - auto _level = state.addCallDepth(pos); + auto recurse = [&](this const auto & recurse, json & res, Value & v, PosIdx pos) -> void { + checkInterrupt(); - if (strict) - state.forceValue(v, pos); + auto _level = state.addCallDepth(pos); - json out; + if (strict) + state.forceValue(v, pos); - switch (v.type()) { + switch (v.type()) { - case nInt: - out = v.integer().value; - break; + case nInt: + res = v.integer().value; + break; - case nBool: - out = v.boolean(); - break; + case nBool: + res = v.boolean(); + break; - case nString: - copyContext(v, context); - out = v.string_view(); - break; + case nString: { + copyContext(v, context); + res = v.string_view(); + break; + } - case nPath: - if (copyToStore) - out = state.store->printStorePath(state.copyPathToStore(context, v.path())); - else - out = v.path().path.abs(); - break; + case nPath: + if (copyToStore) + res = state.store->printStorePath(state.copyPathToStore(context, v.path(), v.determinePos(pos))); + else + res = v.path().path.abs(); + break; - case nNull: - // already initialized as null - break; + case nNull: + // already initialized as null + break; - case nAttrs: { - auto maybeString = state.tryAttrsToString(pos, v, context, false, false); - if (maybeString) { - out = *maybeString; + case nAttrs: { + auto maybeString = state.tryAttrsToString(pos, v, context, false, false); + if (maybeString) { + res = *maybeString; + break; + } + if (auto i = v.attrs()->get(state.s.outPath)) + return recurse(res, *i->value, i->pos); + else { + res = json::object(); + for (auto & a : v.attrs()->lexicographicOrder(state.symbols)) { + json & j = res.emplace(state.symbols[a->name], json()).first.value(); + try { + recurse(j, *a->value, a->pos); + } catch (Error & e) { + e.addTrace( + state.positions[a->pos], + HintFmt("while evaluating attribute '%1%'", state.symbols[a->name])); + throw; + } + } + } break; } - if (auto i = v.attrs()->get(state.s.outPath)) - return printValueAsJSON(state, strict, *i->value, i->pos, context, copyToStore); - else { - out = json::object(); - for (auto & a : v.attrs()->lexicographicOrder(state.symbols)) { + + case nList: { + res = json::array(); + for (const auto & [i, elem] : enumerate(v.listView())) { try { - out.emplace( - state.symbols[a->name], - printValueAsJSON(state, strict, *a->value, a->pos, context, copyToStore)); + res.push_back(json()); + recurse(res.back(), *elem, pos); } catch (Error & e) { - e.addTrace( - state.positions[a->pos], HintFmt("while evaluating attribute '%1%'", state.symbols[a->name])); + e.addTrace(state.positions[pos], HintFmt("while evaluating list element at index %1%", i)); throw; } } + break; } - break; - } - case nList: { - out = json::array(); - int i = 0; - for (auto elem : v.listView()) { - try { - out.push_back(printValueAsJSON(state, strict, *elem, pos, context, copyToStore)); - } catch (Error & e) { - e.addTrace(state.positions[pos], HintFmt("while evaluating list element at index %1%", i)); - throw; - } - i++; + case nExternal: { + res = v.external()->printValueAsJSON(state, strict, context, copyToStore); + break; } - break; - } - case nExternal: - return v.external()->printValueAsJSON(state, strict, context, copyToStore); - break; + case nFloat: + res = v.fpoint(); + break; - case nFloat: - out = v.fpoint(); - break; + case nThunk: + case nFailed: + case nFunction: + state.error("cannot convert %1% to JSON", showType(v)).atPos(v.determinePos(pos)).debugThrow(); + } + }; - case nThunk: - case nFunction: - state.error("cannot convert %1% to JSON", showType(v)).atPos(v.determinePos(pos)).debugThrow(); - } - return out; + json res; + + recurse(res, v, pos); + + return res; } void printValueAsJSON( diff --git a/src/libexpr/value-to-xml.cc b/src/libexpr/value-to-xml.cc index 0a7a334f41b8..21de85a17173 100644 --- a/src/libexpr/value-to-xml.cc +++ b/src/libexpr/value-to-xml.cc @@ -170,6 +170,11 @@ static void printValueAsXML( case nThunk: doc.writeEmptyElement("unevaluated"); + break; + + case nFailed: + doc.writeEmptyElement("failed"); + break; } } diff --git a/src/libexpr/value/context.cc b/src/libexpr/value/context.cc index dcc577f056ce..a06d79ddebf6 100644 --- a/src/libexpr/value/context.cc +++ b/src/libexpr/value/context.cc @@ -50,6 +50,11 @@ NixStringContextElem NixStringContextElem::parse(std::string_view s0, const Expe .drvPath = StorePath{s.substr(1)}, }; } + case '@': { + return NixStringContextElem::Path{ + .storePath = StorePath{s.substr(1)}, + }; + } default: { // Ensure no '!' if (s.find("!") != std::string_view::npos) { @@ -90,6 +95,10 @@ std::string NixStringContextElem::to_string() const res += '='; res += d.drvPath.to_string(); }, + [&](const NixStringContextElem::Path & p) { + res += '@'; + res += p.storePath.to_string(); + }, }, raw); diff --git a/src/libfetchers-c/meson.build b/src/libfetchers-c/meson.build index db415d9173e7..c029eb0d1e59 100644 --- a/src/libfetchers-c/meson.build +++ b/src/libfetchers-c/meson.build @@ -57,7 +57,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libfetchers-c/package.nix b/src/libfetchers-c/package.nix index 9a601d70417c..13ec30d566eb 100644 --- a/src/libfetchers-c/package.nix +++ b/src/libfetchers-c/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-fetchers-c"; + pname = "determinate-nix-fetchers-c"; inherit version; workDir = ./.; diff --git a/src/libfetchers-tests/access-tokens.cc b/src/libfetchers-tests/access-tokens.cc index 7127434db9df..26cdcfb83fc9 100644 --- a/src/libfetchers-tests/access-tokens.cc +++ b/src/libfetchers-tests/access-tokens.cc @@ -15,10 +15,7 @@ class AccessKeysTest : public ::testing::Test protected: public: - void SetUp() override - { - experimentalFeatureSettings.experimentalFeatures.get().insert(Xp::Flakes); - } + void SetUp() override {} void TearDown() override {} }; diff --git a/src/libfetchers-tests/git-utils.cc b/src/libfetchers-tests/git-utils.cc index 762e39ad6eae..0b21fd0c67d5 100644 --- a/src/libfetchers-tests/git-utils.cc +++ b/src/libfetchers-tests/git-utils.cc @@ -48,7 +48,7 @@ class GitUtilsTest : public ::testing::Test ref openRepo() { - return GitRepo::openRepo(tmpDir, true, false); + return GitRepo::openRepo(tmpDir, {.create = true}); } std::string getRepoName() const @@ -115,9 +115,10 @@ TEST_F(GitUtilsTest, sink_hardlink) try { sink->createHardlink(CanonPath("foo-1.1/link"), CanonPath("hello")); + sink->flush(); FAIL() << "Expected an exception"; } catch (const nix::Error & e) { - ASSERT_THAT(e.msg(), testing::HasSubstr("cannot find hard link target")); + ASSERT_THAT(e.msg(), testing::HasSubstr("does not exist")); ASSERT_THAT(e.msg(), testing::HasSubstr("/hello")); ASSERT_THAT(e.msg(), testing::HasSubstr("foo-1.1/link")); } diff --git a/src/libfetchers/attrs.cc b/src/libfetchers/attrs.cc index 841808bd16a9..648d48545431 100644 --- a/src/libfetchers/attrs.cc +++ b/src/libfetchers/attrs.cc @@ -27,6 +27,9 @@ nlohmann::json attrsToJSON(const Attrs & attrs) { nlohmann::json json; for (auto & attr : attrs) { + /* The __final attribute is purely internal, so never serialize it. */ + if (attr.first == "__final") + continue; if (auto v = std::get_if(&attr.second)) { json[attr.first] = *v; } else if (auto v = std::get_if(&attr.second)) { diff --git a/src/libfetchers/builtin-flake-registry.json b/src/libfetchers/builtin-flake-registry.json new file mode 100644 index 000000000000..65e973290a05 --- /dev/null +++ b/src/libfetchers/builtin-flake-registry.json @@ -0,0 +1,425 @@ +{ + "flakes": [ + { + "from": { + "id": "agda", + "type": "indirect" + }, + "to": { + "owner": "agda", + "repo": "agda", + "type": "github" + } + }, + { + "from": { + "id": "agenix", + "type": "indirect" + }, + "to": { + "owner": "ryantm", + "repo": "agenix", + "type": "github" + } + }, + { + "from": { + "id": "arion", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "arion", + "type": "github" + } + }, + { + "from": { + "id": "blender-bin", + "type": "indirect" + }, + "to": { + "dir": "blender", + "owner": "edolstra", + "repo": "nix-warez", + "type": "github" + } + }, + { + "from": { + "id": "bundlers", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "bundlers", + "type": "github" + } + }, + { + "from": { + "id": "cachix", + "type": "indirect" + }, + "to": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, + { + "from": { + "id": "composable", + "type": "indirect" + }, + "to": { + "owner": "ComposableFi", + "repo": "composable", + "type": "github" + } + }, + { + "from": { + "id": "disko", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "disko", + "type": "github" + } + }, + { + "from": { + "id": "dreampkgs", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "dreampkgs", + "type": "github" + } + }, + { + "from": { + "id": "dwarffs", + "type": "indirect" + }, + "to": { + "owner": "edolstra", + "repo": "dwarffs", + "type": "github" + } + }, + { + "from": { + "id": "emacs-overlay", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "emacs-overlay", + "type": "github" + } + }, + { + "from": { + "id": "fenix", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "fenix", + "type": "github" + } + }, + { + "from": { + "id": "flake-parts", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + { + "from": { + "id": "flake-utils", + "type": "indirect" + }, + "to": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + { + "from": { + "id": "helix", + "type": "indirect" + }, + "to": { + "owner": "helix-editor", + "repo": "helix", + "type": "github" + } + }, + { + "from": { + "id": "hercules-ci-agent", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "hercules-ci-agent", + "type": "github" + } + }, + { + "from": { + "id": "hercules-ci-effects", + "type": "indirect" + }, + "to": { + "owner": "hercules-ci", + "repo": "hercules-ci-effects", + "type": "github" + } + }, + { + "from": { + "id": "home-manager", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "home-manager", + "type": "github" + } + }, + { + "from": { + "id": "hydra", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "hydra", + "type": "github" + } + }, + { + "from": { + "id": "mach-nix", + "type": "indirect" + }, + "to": { + "owner": "DavHau", + "repo": "mach-nix", + "type": "github" + } + }, + { + "from": { + "id": "ngipkgs", + "type": "indirect" + }, + "to": { + "owner": "ngi-nix", + "repo": "ngipkgs", + "type": "github" + } + }, + { + "from": { + "id": "nickel", + "type": "indirect" + }, + "to": { + "owner": "tweag", + "repo": "nickel", + "type": "github" + } + }, + { + "from": { + "id": "nix", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nix", + "type": "github" + } + }, + { + "from": { + "id": "nix-darwin", + "type": "indirect" + }, + "to": { + "owner": "nix-darwin", + "repo": "nix-darwin", + "type": "github" + } + }, + { + "from": { + "id": "nix-serve", + "type": "indirect" + }, + "to": { + "owner": "edolstra", + "repo": "nix-serve", + "type": "github" + } + }, + { + "from": { + "id": "nixops", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixops", + "type": "github" + } + }, + { + "from": { + "id": "nixos-anywhere", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "nixos-anywhere", + "type": "github" + } + }, + { + "from": { + "id": "nixos-hardware", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixos-hardware", + "type": "github" + } + }, + { + "from": { + "id": "nixos-homepage", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixos-homepage", + "type": "github" + } + }, + { + "from": { + "id": "nixos-search", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "nixos-search", + "type": "github" + } + }, + { + "from": { + "id": "nixpkgs", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + { + "from": { + "id": "nur", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "NUR", + "type": "github" + } + }, + { + "from": { + "id": "patchelf", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "patchelf", + "type": "github" + } + }, + { + "from": { + "id": "poetry2nix", + "type": "indirect" + }, + "to": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, + { + "from": { + "id": "pridefetch", + "type": "indirect" + }, + "to": { + "owner": "SpyHoodle", + "repo": "pridefetch", + "type": "github" + } + }, + { + "from": { + "id": "sops-nix", + "type": "indirect" + }, + "to": { + "owner": "Mic92", + "repo": "sops-nix", + "type": "github" + } + }, + { + "from": { + "id": "systems", + "type": "indirect" + }, + "to": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + { + "from": { + "id": "templates", + "type": "indirect" + }, + "to": { + "owner": "NixOS", + "repo": "templates", + "type": "github" + } + } + ], + "version": 2 +} diff --git a/src/libfetchers/builtin.cc b/src/libfetchers/builtin.cc new file mode 100644 index 000000000000..44b3baf0b1aa --- /dev/null +++ b/src/libfetchers/builtin.cc @@ -0,0 +1,60 @@ +#include "nix/store/builtins.hh" +#include "nix/store/parsed-derivations.hh" +#include "nix/fetchers/fetchers.hh" +#include "nix/fetchers/fetch-settings.hh" +#include "nix/util/archive.hh" +#include "nix/store/filetransfer.hh" +#include "nix/store/store-open.hh" + +#include + +namespace nix { + +static void builtinFetchTree(const BuiltinBuilderContext & ctx) +{ + experimentalFeatureSettings.require(Xp::BuildTimeFetchTree); + + auto out = get(ctx.drv.outputs, "out"); + if (!out) + throw Error("'builtin:fetch-tree' requires an 'out' output"); + + if (!(ctx.drv.type().isFixed() || ctx.drv.type().isImpure())) + throw Error("'builtin:fetch-tree' must be a fixed-output or impure derivation"); + + if (!ctx.drv.structuredAttrs) + throw Error("'builtin:fetch-tree' must have '__structuredAttrs = true'"); + + setenv("NIX_CACHE_HOME", ctx.tmpDirInSandbox.c_str(), 1); + + using namespace fetchers; + + fetchers::Settings myFetchSettings; + myFetchSettings.accessTokens = fetchSettings.accessTokens.get(); + + // Make sure we don't use the FileTransfer object of the parent + // since it's in a broken state after the fork. We also must not + // delete it, so hang on to the shared_ptr. + // FIXME: move FileTransfer into fetchers::Settings. + static auto prevFileTransfer = resetFileTransfer(); + + // FIXME: disable use of the git/tarball cache + + auto input = Input::fromAttrs(myFetchSettings, jsonToAttrs(ctx.drv.structuredAttrs->structuredAttrs.at("input"))); + + std::cerr << fmt("fetching '%s'...\n", input.to_string()); + + /* Functions like downloadFile() expect a store. We can't use the + real one since we're in a forked process. FIXME: use recursive + Nix's daemon so we can use the real store? */ + auto tmpStore = openStore(ctx.tmpDirInSandbox + "/nix"); + + auto [accessor, lockedInput] = input.getAccessor(myFetchSettings, *tmpStore); + + auto source = sinkToSource([&](Sink & sink) { accessor->dumpPath(CanonPath::root, sink); }); + + restorePath(ctx.outputs.at("out"), *source); +} + +static RegisterBuiltinBuilder registerUnpackChannel("fetch-tree", builtinFetchTree); + +} // namespace nix diff --git a/src/libfetchers/cache.cc b/src/libfetchers/cache.cc index 183f106a5d3b..1db3ed8dc896 100644 --- a/src/libfetchers/cache.cc +++ b/src/libfetchers/cache.cc @@ -109,7 +109,7 @@ struct CacheImpl : Cache upsert(key, value); } - std::optional lookupStorePath(Key key, Store & store) override + std::optional lookupStorePath(Key key, Store & store, bool allowInvalid) override { key.second.insert_or_assign("store", store.storeDir); @@ -123,7 +123,7 @@ struct CacheImpl : Cache ResultWithStorePath res2(*res, StorePath(storePathS)); store.addTempRoot(res2.storePath); - if (!store.isValidPath(res2.storePath)) { + if (!allowInvalid && !store.isValidPath(res2.storePath)) { // FIXME: we could try to substitute 'storePath'. debug( "ignoring disappeared cache entry '%s:%s' -> '%s'", @@ -145,7 +145,7 @@ struct CacheImpl : Cache std::optional lookupStorePathWithTTL(Key key, Store & store) override { - auto res = lookupStorePath(std::move(key), store); + auto res = lookupStorePath(std::move(key), store, false); return res && !res->expired ? res : std::nullopt; } }; diff --git a/src/libfetchers/fetch-settings.cc b/src/libfetchers/fetch-settings.cc index f92b94a0b3bd..f50177f094e3 100644 --- a/src/libfetchers/fetch-settings.cc +++ b/src/libfetchers/fetch-settings.cc @@ -1,7 +1,16 @@ #include "nix/fetchers/fetch-settings.hh" +#include "nix/util/config-global.hh" namespace nix::fetchers { Settings::Settings() {} } // namespace nix::fetchers + +namespace nix { + +fetchers::Settings fetchSettings; + +static GlobalConfig::Register rFetchSettings(&fetchSettings); + +} // namespace nix diff --git a/src/libfetchers/fetch-to-store.cc b/src/libfetchers/fetch-to-store.cc index b1e8b9d72bbc..8dfb74a9c5af 100644 --- a/src/libfetchers/fetch-to-store.cc +++ b/src/libfetchers/fetch-to-store.cc @@ -5,12 +5,11 @@ namespace nix { -fetchers::Cache::Key makeFetchToStoreCacheKey( - const std::string & name, const std::string & fingerprint, ContentAddressMethod method, const std::string & path) +fetchers::Cache::Key +makeSourcePathToHashCacheKey(const std::string & fingerprint, ContentAddressMethod method, const std::string & path) { return fetchers::Cache::Key{ - "fetchToStore", - {{"name", name}, {"fingerprint", fingerprint}, {"method", std::string{method.render()}}, {"path", path}}}; + "sourcePathToHash", {{"fingerprint", fingerprint}, {"method", std::string{method.render()}}, {"path", path}}}; } StorePath fetchToStore( @@ -23,23 +22,49 @@ StorePath fetchToStore( PathFilter * filter, RepairFlag repair) { - // FIXME: add an optimisation for the case where the accessor is - // a `PosixSourceAccessor` pointing to a store path. + return fetchToStore2(settings, store, path, mode, name, method, filter, repair).first; +} +std::pair fetchToStore2( + const fetchers::Settings & settings, + Store & store, + const SourcePath & path, + FetchMode mode, + std::string_view name, + ContentAddressMethod method, + PathFilter * filter, + RepairFlag repair) +{ std::optional cacheKey; auto [subpath, fingerprint] = filter ? std::pair>{path.path, std::nullopt} : path.accessor->getFingerprint(path.path); if (fingerprint) { - cacheKey = makeFetchToStoreCacheKey(std::string{name}, *fingerprint, method, subpath.abs()); - if (auto res = settings.getCache()->lookupStorePath(*cacheKey, store)) { - debug("store path cache hit for '%s'", path); - return res->storePath; + cacheKey = makeSourcePathToHashCacheKey(*fingerprint, method, subpath.abs()); + if (auto res = settings.getCache()->lookup(*cacheKey)) { + auto hash = Hash::parseSRI(fetchers::getStrAttr(*res, "hash")); + auto storePath = + store.makeFixedOutputPathFromCA(name, ContentAddressWithReferences::fromParts(method, hash, {})); + + /* Add a temproot before the call to isValidPath to prevent accidental GC in case the + input is cached. Note that this must be done before to avoid races. */ + if (mode != FetchMode::DryRun) + store.addTempRoot(storePath); + + if (mode == FetchMode::DryRun || store.maybeQueryPathInfo(storePath)) { + debug( + "source path '%s' cache hit in '%s' (hash '%s')", + path, + store.printStorePath(storePath), + hash.to_string(HashFormat::SRI, true)); + return {storePath, hash}; + } + debug("source path '%s' not in store", path); } } else { static auto barf = getEnv("_NIX_TEST_BARF_ON_UNCACHEABLE").value_or("") == "1"; - if (barf && !filter) + if (barf && !filter && !(path.to_string().starts_with("/") || path.to_string().starts_with("«path:/"))) throw Error("source path '%s' is uncacheable (filter=%d)", path, (bool) filter); // FIXME: could still provide in-memory caching keyed on `SourcePath`. debug("source path '%s' is uncacheable", path); @@ -53,16 +78,41 @@ StorePath fetchToStore( auto filter2 = filter ? *filter : defaultPathFilter; - auto storePath = mode == FetchMode::DryRun - ? store.computeStorePath(name, path, method, HashAlgorithm::SHA256, {}, filter2).first - : store.addToStore(name, path, method, HashAlgorithm::SHA256, {}, filter2, repair); - - debug(mode == FetchMode::DryRun ? "hashed '%s'" : "copied '%s' to '%s'", path, store.printStorePath(storePath)); + auto [storePath, hash] = + mode == FetchMode::DryRun + ? ({ + auto [storePath, hash] = + store.computeStorePath(name, path, method, HashAlgorithm::SHA256, {}, filter2); + debug( + "hashed '%s' to '%s' (hash '%s')", + path, + store.printStorePath(storePath), + hash.to_string(HashFormat::SRI, true)); + std::make_pair(storePath, hash); + }) + : ({ + // FIXME: ideally addToStore() would return the hash + // right away (like computeStorePath()). + auto storePath = store.addToStore(name, path, method, HashAlgorithm::SHA256, {}, filter2, repair); + auto info = store.queryPathInfo(storePath); + assert(info->references.empty()); + auto hash = method == ContentAddressMethod::Raw::NixArchive ? info->narHash : ({ + if (!info->ca || info->ca->method != method) + throw Error("path '%s' lacks a CA field", store.printStorePath(storePath)); + info->ca->hash; + }); + debug( + "copied '%s' to '%s' (hash '%s')", + path, + store.printStorePath(storePath), + hash.to_string(HashFormat::SRI, true)); + std::make_pair(storePath, hash); + }); - if (cacheKey && mode == FetchMode::Copy) - settings.getCache()->upsert(*cacheKey, store, {}, storePath); + if (cacheKey) + settings.getCache()->upsert(*cacheKey, {{"hash", hash.to_string(HashFormat::SRI, true)}}); - return storePath; + return {storePath, hash}; } } // namespace nix diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index 7e091ef1071e..4534cf54c3ee 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -4,8 +4,9 @@ #include "nix/fetchers/fetch-to-store.hh" #include "nix/util/json-utils.hh" #include "nix/fetchers/fetch-settings.hh" -#include "nix/fetchers/fetch-to-store.hh" +#include "nix/fetchers/provenance.hh" #include "nix/util/url.hh" +#include "nix/util/forwarding-source-accessor.hh" #include "nix/util/archive.hh" #include @@ -126,24 +127,30 @@ std::optional Input::getFingerprint(Store & store) const return fingerprint; } -ParsedURL Input::toURL() const +ParsedURL Input::toURL(bool abbreviate) const { if (!scheme) throw Error("cannot show unsupported input '%s'", attrsToJSON(attrs)); - return scheme->toURL(*this); + + auto url = scheme->toURL(*this, abbreviate); + + if (abbreviate) + url.query.erase("narHash"); + + return url; } -std::string Input::toURLString(const StringMap & extraQuery) const +std::string Input::toURLString(const StringMap & extraQuery, bool abbreviate) const { - auto url = toURL(); + auto url = toURL(abbreviate); for (auto & attr : extraQuery) url.query.insert(attr); return url.to_string(); } -std::string Input::to_string() const +std::string Input::to_string(bool abbreviate) const { - return toURL().to_string(); + return toURL(abbreviate).to_string(); } bool Input::isDirect() const @@ -189,36 +196,30 @@ bool Input::contains(const Input & other) const return false; } -// FIXME: remove -std::pair Input::fetchToStore(const Settings & settings, Store & store) const +std::tuple, Input> Input::fetchToStore(const Settings & settings, Store & store) const { if (!scheme) throw Error("cannot fetch unsupported input '%s'", attrsToJSON(toAttrs())); - auto [storePath, input] = [&]() -> std::pair { - try { - auto [accessor, result] = getAccessorUnchecked(settings, store); - - auto storePath = - nix::fetchToStore(settings, store, SourcePath(accessor), FetchMode::Copy, result.getName()); + try { + auto [accessor, result] = getAccessorUnchecked(settings, store); - auto narHash = store.queryPathInfo(storePath)->narHash; - result.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); + auto storePath = nix::fetchToStore(settings, store, SourcePath(accessor), FetchMode::Copy, result.getName()); - result.attrs.insert_or_assign("__final", Explicit(true)); + auto narHash = store.queryPathInfo(storePath)->narHash; + result.attrs.insert_or_assign("narHash", narHash.to_string(HashFormat::SRI, true)); - assert(result.isFinal()); + result.attrs.insert_or_assign("__final", Explicit(true)); - checkLocks(*this, result); + assert(result.isFinal()); - return {storePath, result}; - } catch (Error & e) { - e.addTrace({}, "while fetching the input '%s'", to_string()); - throw; - } - }(); + checkLocks(*this, result); - return {std::move(storePath), input}; + return {std::move(storePath), accessor, result}; + } catch (Error & e) { + e.addTrace({}, "while fetching the input '%s'", to_string()); + throw; + } } void Input::checkLocks(Input specified, Input & result) @@ -236,6 +237,9 @@ void Input::checkLocks(Input specified, Input & result) if (auto prevNarHash = specified.getNarHash()) specified.attrs.insert_or_assign("narHash", prevNarHash->to_string(HashFormat::SRI, true)); + if (auto narHash = result.getNarHash()) + result.attrs.insert_or_assign("narHash", narHash->to_string(HashFormat::SRI, true)); + for (auto & field : specified.attrs) { auto field2 = result.attrs.find(field.first); if (field2 != result.attrs.end() && field.second != field2->second) @@ -269,24 +273,10 @@ void Input::checkLocks(Input specified, Input & result) } } - if (auto prevLastModified = specified.getLastModified()) { - if (result.getLastModified() != prevLastModified) - throw Error( - "'lastModified' attribute mismatch in input '%s', expected %d, got %d", - result.to_string(), - *prevLastModified, - result.getLastModified().value_or(-1)); - } - if (auto prevRev = specified.getRev()) { if (result.getRev() != prevRev) throw Error("'rev' attribute mismatch in input '%s', expected %s", result.to_string(), prevRev->gitRev()); } - - if (auto prevRevCount = specified.getRevCount()) { - if (result.getRevCount() != prevRevCount) - throw Error("'revCount' attribute mismatch in input '%s', expected %d", result.to_string(), *prevRevCount); - } } std::pair, Input> Input::getAccessor(const Settings & settings, Store & store) const @@ -305,6 +295,21 @@ std::pair, Input> Input::getAccessor(const Settings & settin } } +/** + * Helper class that ensures that paths in substituted source trees + * are rendered as `«input»/path` rather than + * `«input»/nix/store/-source/path`. + */ +struct SubstitutedSourceAccessor : ForwardingSourceAccessor +{ + using ForwardingSourceAccessor::ForwardingSourceAccessor; + + std::string showPath(const CanonPath & path) override + { + return displayPrefix + path.abs() + displaySuffix; + } +}; + std::pair, Input> Input::getAccessorUnchecked(const Settings & settings, Store & store) const { // FIXME: cache the accessor @@ -312,54 +317,79 @@ std::pair, Input> Input::getAccessorUnchecked(const Settings if (!scheme) throw Error("cannot fetch unsupported input '%s'", attrsToJSON(toAttrs())); - /* The tree may already be in the Nix store, or it could be - substituted (which is often faster than fetching from the - original source). So check that. We only do this for final - inputs, otherwise there is a risk that we don't return the - same attributes (like `lastModified`) that the "real" fetcher - would return. - - FIXME: add a setting to disable this. - FIXME: substituting may be slower than fetching normally, - e.g. for fetchers like Git that are incremental! - */ - if (isFinal() && getNarHash()) { - try { - auto storePath = computeStorePath(store); + std::optional storePath; + if (isFinal() && getNarHash()) + storePath = computeStorePath(store); + + auto makeStoreAccessor = [&]() -> std::pair, Input> { + auto accessor = make_ref(store.requireStoreObjectAccessor(*storePath)); + + // FIXME: use the NAR hash for fingerprinting Git trees since it may have a .gitattributes file and we don't + // know if we used `git archive` or libgit2 to fetch it. + accessor->fingerprint = getType() == "git" ? std::optional(storePath->hashPart()) : getFingerprint(store); + cachedFingerprint = accessor->fingerprint; + + // Store a cache entry for the substituted tree so later fetches + // can reuse the existing nar instead of copying the unpacked + // input back into the store on every evaluation. + if (accessor->fingerprint) { + settings.getCache()->upsert( + makeSourcePathToHashCacheKey(*accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, "/"), + {{"hash", store.queryPathInfo(*storePath)->narHash.to_string(HashFormat::SRI, true)}}); + } + + accessor->provenance = std::make_shared(*this); + + // FIXME: ideally we would use the `showPath()` of the + // "real" accessor for this fetcher type. + accessor->setPathDisplay("«" + to_string(true) + "»"); - store.ensurePath(storePath); + return {accessor, *this}; + }; - debug("using substituted/cached input '%s' in '%s'", to_string(), store.printStorePath(storePath)); + /* If a tree with the expected hash is already in the Nix store, + reuse it. We only do this for final inputs, since otherwise + there is a risk that we don't return the same attributes (like + `lastModified`) that the "real" fetcher would return. */ + if (storePath && store.isValidPath(*storePath)) { + debug("using input '%s' in '%s'", to_string(), store.printStorePath(*storePath)); + return makeStoreAccessor(); + } - auto accessor = store.requireStoreObjectAccessor(storePath); + auto fixupAccessor = [&](ref accessor, Input result) -> std::pair, Input> { + if (auto fp = accessor->getFingerprint(CanonPath::root).second) + result.cachedFingerprint = *fp; + else + accessor->fingerprint = result.getFingerprint(store); - accessor->fingerprint = getFingerprint(store); + accessor->provenance = std::make_shared(result); - // Store a cache entry for the substituted tree so later fetches - // can reuse the existing nar instead of copying the unpacked - // input back into the store on every evaluation. - if (accessor->fingerprint) { - ContentAddressMethod method = ContentAddressMethod::Raw::NixArchive; - auto cacheKey = makeFetchToStoreCacheKey(getName(), *accessor->fingerprint, method, "/"); - settings.getCache()->upsert(cacheKey, store, {}, storePath); - } + return {accessor, result}; + }; - accessor->setPathDisplay("«" + to_string() + "»"); + /* See if the input is in the cache of the fetcher. */ + try { + if (auto res = scheme->getAccessor(settings, store, *this, true)) + return fixupAccessor(res->first, std::move(res->second)); + } catch (...) { + } - return {accessor, *this}; - } catch (Error & e) { - debug("substitution of input '%s' failed: %s", to_string(), e.what()); + /* If not, try to substitute the input. */ + if (storePath) { + try { + store.ensurePath(*storePath); + return makeStoreAccessor(); + } + // Ignore any substitution error. + catch (Error & e2) { + debug("substitution of input '%s' failed: %s", to_string(), e2.info().msg); + } catch (...) { } } + /* If we can't substitute, then fetch normally. */ auto [accessor, result] = scheme->getAccessor(settings, store, *this); - - if (!accessor->fingerprint) - accessor->fingerprint = result.getFingerprint(store); - else - result.cachedFingerprint = accessor->fingerprint; - - return {accessor, std::move(result)}; + return fixupAccessor(accessor, result); } Input Input::applyOverrides(std::optional ref, std::optional rev) const @@ -460,7 +490,7 @@ std::optional Input::getLastModified() const return {}; } -ParsedURL InputScheme::toURL(const Input & input) const +ParsedURL InputScheme::toURL(const Input & input, bool abbreviate) const { throw Error("don't know how to convert input '%s' to a URL", attrsToJSON(input.attrs)); } diff --git a/src/libfetchers/filtering-source-accessor.cc b/src/libfetchers/filtering-source-accessor.cc index 8f1b50eb9371..68eb21566939 100644 --- a/src/libfetchers/filtering-source-accessor.cc +++ b/src/libfetchers/filtering-source-accessor.cc @@ -1,4 +1,5 @@ #include "nix/fetchers/filtering-source-accessor.hh" +#include "nix/util/sync.hh" #include @@ -67,6 +68,18 @@ std::pair> FilteringSourceAccessor::getFin return next->getFingerprint(prefix / path); } +std::shared_ptr FilteringSourceAccessor::getProvenance(const CanonPath & path) +{ + if (provenance) + return SourceAccessor::getProvenance(path); + return next->getProvenance(prefix / path); +} + +void FilteringSourceAccessor::invalidateCache(const CanonPath & path) +{ + next->invalidateCache(prefix / path); +} + void FilteringSourceAccessor::checkAccess(const CanonPath & path) { if (!isAllowed(path)) @@ -76,8 +89,8 @@ void FilteringSourceAccessor::checkAccess(const CanonPath & path) struct AllowListSourceAccessorImpl : AllowListSourceAccessor { - std::set allowedPrefixes; - boost::unordered_flat_set allowedPaths; + SharedSync> allowedPrefixes; + SharedSync> allowedPaths; AllowListSourceAccessorImpl( ref next, @@ -92,12 +105,12 @@ struct AllowListSourceAccessorImpl : AllowListSourceAccessor bool isAllowed(const CanonPath & path) override { - return allowedPaths.contains(path) || path.isAllowed(allowedPrefixes); + return allowedPaths.readLock()->contains(path) || path.isAllowed(*allowedPrefixes.readLock()); } void allowPrefix(CanonPath prefix) override { - allowedPrefixes.insert(std::move(prefix)); + allowedPrefixes.lock()->insert(std::move(prefix)); } }; diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index 115539e6b6a1..f21313a10404 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -12,6 +12,7 @@ #include "nix/util/util.hh" #include "nix/util/thread-pool.hh" #include "nix/util/pool.hh" +#include "nix/util/executable-path.hh" #include #include @@ -203,16 +204,19 @@ static git_packbuilder_progress PACKBUILDER_PROGRESS_CHECK_INTERRUPT = &packBuil } // extern "C" -static void initRepoAtomically(std::filesystem::path & path, bool bare) +static void initRepoAtomically(std::filesystem::path & path, GitRepo::Options options) { if (pathExists(path.string())) return; + if (!options.create) + throw Error("Git repository %s does not exist.", path); + std::filesystem::path tmpDir = createTempDir(path.parent_path()); AutoDelete delTmpDir(tmpDir, true); Repository tmpRepo; - if (git_repository_init(Setter(tmpRepo), tmpDir.string().c_str(), bare)) + if (git_repository_init(Setter(tmpRepo), tmpDir.string().c_str(), options.bare)) throw Error("creating Git repository %s: %s", path, git_error_last()->message); try { std::filesystem::rename(tmpDir, path); @@ -234,7 +238,7 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this /** Location of the repository on disk. */ std::filesystem::path path; - bool bare; + Options options; /** * libgit2 repository. Note that new objects are not written to disk, @@ -255,18 +259,18 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this */ git_odb_backend * packBackend = nullptr; - GitRepoImpl(std::filesystem::path _path, bool create, bool bare, bool packfilesOnly = false) + GitRepoImpl(std::filesystem::path _path, Options _options) : path(std::move(_path)) - , bare(bare) + , options(_options) { initLibGit2(); - initRepoAtomically(path, bare); + initRepoAtomically(path, options); if (git_repository_open(Setter(repo), path.string().c_str())) throw Error("opening Git repository %s: %s", path, git_error_last()->message); ObjectDb odb; - if (packfilesOnly) { + if (options.packfilesOnly) { /* Create a fresh object database because by default the repo also loose object backends. We are not using any of those for the tarball cache, but libgit2 still does a bunch of unnecessary @@ -295,7 +299,7 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this if (git_odb_add_backend(odb.get(), mempackBackend, 999)) throw Error("adding mempack backend to Git object database: %s", git_error_last()->message); - if (packfilesOnly) { + if (options.packfilesOnly) { if (git_repository_set_odb(repo.get(), odb.get())) throw Error("setting Git object database: %s", git_error_last()->message); } @@ -366,7 +370,26 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this { // TODO: as an optimization, it would be nice to include `this` in the pool. return Pool(std::numeric_limits::max(), [this]() -> ref { - return make_ref(path, false, bare); + auto repo = make_ref(path, options); + + /* Monkey-patching the pack backend to only read the pack directory + once. Otherwise it will do a readdir for each added oid when it's + not found and that translates to ~6 syscalls. Since we are never + writing pack files until flushing we can force the odb backend to + read the directory just once. It's very convenient that the vtable is + semi-public interface and is up for grabs. + + This is purely an optimization for our use-case with a tarball cache. + libgit2 calls refresh() if the backend provides it when an oid isn't found. + We are only writing objects to a mempack (it has higher priority) and there isn't + a realistic use-case where a previously missing object would appear from thin air + on the disk (unless another process happens to be unpacking a similar tarball to + the cache at the same time, but that's a very unrealistic scenario). + */ + if (auto * backend = repo->packBackend) + backend->refresh = nullptr; + + return repo; }); } @@ -382,7 +405,7 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this ThreadPool pool; - auto process = [&done, &pool, &repoPool](this const auto & process, const git_oid & oid) -> void { + auto process = [&done, &pool, &repoPool](this auto const & process, const git_oid & oid) -> void { auto repo(repoPool.get()); auto _commit = lookupObject(*repo, oid, GIT_OBJECT_COMMIT); @@ -593,16 +616,37 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this // that) // then use code that was removed in this commit (see blame) - auto dir = this->path; - Strings gitArgs{"-C", dir.string(), "--git-dir", ".", "fetch", "--progress", "--force"}; - if (shallow) - append(gitArgs, {"--depth", "1"}); - append(gitArgs, {std::string("--"), url, refspec}); + if (ExecutablePath::load().findName("git")) { + auto dir = this->path; + Strings gitArgs{"-C", dir.string(), "--git-dir", ".", "fetch", "--progress", "--force"}; + if (shallow) + append(gitArgs, {"--depth", "1"}); + append(gitArgs, {std::string("--"), url, refspec}); + + auto status = runProgram(RunOptions{.program = "git", .args = gitArgs, .isInteractive = true}).first; + + if (status > 0) + throw Error("Failed to fetch git repository '%s'", url); + } else { + // Fall back to using libgit2 for fetching. This does not + // support SSH very well. + Remote remote; + + if (git_remote_create_anonymous(Setter(remote), *this, url.c_str())) + throw Error("cannot create Git remote '%s': %s", url, git_error_last()->message); + + char * refspecs[] = {(char *) refspec.c_str()}; + git_strarray refspecs2{.strings = refspecs, .count = 1}; - auto status = runProgram(RunOptions{.program = "git", .args = gitArgs, .isInteractive = true}).first; + git_fetch_options opts = GIT_FETCH_OPTIONS_INIT; + // FIXME: for some reason, shallow fetching over ssh barfs + // with "could not read from remote repository". + opts.depth = shallow && parseURL(url).scheme != "ssh" ? 1 : GIT_FETCH_DEPTH_FULL; + opts.callbacks.payload = &act; - if (status > 0) - throw Error("Failed to fetch git repository '%s'", url); + if (git_remote_fetch(remote.get(), &refspecs2, &opts, nullptr)) + throw Error("fetching '%s' from '%s': %s", refspec, url, git_error_last()->message); + } } void verifyCommit(const Hash & rev, const std::vector & publicKeys) override @@ -665,6 +709,7 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this keyDecoded = base64::decode(k.key); } catch (Error & e) { e.addTrace({}, "while decoding public key '%s' used for git signature", k.key); + throw; } auto fingerprint = trim(hashString(HashAlgorithm::SHA256, keyDecoded).to_string(nix::HashFormat::Base64, false), "="); @@ -712,15 +757,19 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this } }; -ref GitRepo::openRepo(const std::filesystem::path & path, bool create, bool bare, bool packfilesOnly) +ref GitRepo::openRepo(const std::filesystem::path & path, GitRepo::Options options) +{ + return make_ref(path, options); +} + +std::string GitAccessorOptions::makeFingerprint(const Hash & rev) const { - return make_ref(path, create, bare, packfilesOnly); + return "git:" + rev.gitRev() + (exportIgnore ? ";e" : "") + (smudgeLfs ? ";l" : ""); } /** * Raw git tree input accessor. */ - struct GitSourceAccessor : SourceAccessor { struct State @@ -741,6 +790,7 @@ struct GitSourceAccessor : SourceAccessor .options = options, }} { + fingerprint = options.makeFingerprint(rev); } std::string readBlob(const CanonPath & path, bool symlink) @@ -1055,185 +1105,155 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink { ref repo; - struct PendingDir - { - std::string name; - TreeBuilder builder; - }; - - std::vector pendingDirs; + Pool repoPool; - /** - * Temporary buffer used by createRegularFile for storing small file contents. - */ - std::string regularFileContentsBuffer; + unsigned int concurrency = std::min(std::thread::hardware_concurrency(), 10U); - /** - * If repo has a non-null packBackend, this has a copy of the refresh function - * from the backend virtual table. This is needed to restore it after we've flushed - * the sink. We modify it to avoid unnecessary I/O on non-existent oids. - */ - decltype(::git_odb_backend::refresh) packfileOdbRefresh = nullptr; + ThreadPool workers{concurrency}; - void pushBuilder(std::string name) - { - const git_tree_entry * entry; - Tree prevTree = nullptr; - - if (!pendingDirs.empty() && (entry = git_treebuilder_get(pendingDirs.back().builder.get(), name.c_str()))) { - /* Clone a tree that we've already finished. This happens - if a tarball has directory entries that are not - contiguous. */ - if (git_tree_entry_type(entry) != GIT_OBJECT_TREE) - throw Error("parent of '%s' is not a directory", name); - - if (git_tree_entry_to_object((git_object **) (git_tree **) Setter(prevTree), *repo, entry)) - throw Error("looking up parent of '%s': %s", name, git_error_last()->message); - } + /** Total file contents in flight. */ + std::atomic totalBufSize{0}; - git_treebuilder * b; - if (git_treebuilder_new(&b, *repo, prevTree.get())) - throw Error("creating a tree builder: %s", git_error_last()->message); - pendingDirs.push_back({.name = std::move(name), .builder = TreeBuilder(b)}); - }; + static constexpr std::size_t maxBufSize = 16 * 1024 * 1024; GitFileSystemObjectSinkImpl(ref repo) : repo(repo) + , repoPool(repo->getPool()) { - /* Monkey-patching the pack backend to only read the pack directory - once. Otherwise it will do a readdir for each added oid when it's - not found and that translates to ~6 syscalls. Since we are never - writing pack files until flushing we can force the odb backend to - read the directory just once. It's very convenient that the vtable is - semi-public interface and is up for grabs. - - This is purely an optimization for our use-case with a tarball cache. - libgit2 calls refresh() if the backend provides it when an oid isn't found. - We are only writing objects to a mempack (it has higher priority) and there isn't - a realistic use-case where a previously missing object would appear from thin air - on the disk (unless another process happens to be unpacking a similar tarball to - the cache at the same time, but that's a very unrealistic scenario). - */ - if (auto * backend = repo->packBackend) { - if (backend->refresh(backend)) /* Refresh just once manually. */ - throw Error("refreshing packfiles: %s", git_error_last()->message); - /* Save the function pointer to restore it later in flush() and - unset it in the vtable. libgit2 does nothing if it's a nullptr: - https://github.com/libgit2/libgit2/blob/58d9363f02f1fa39e46d49b604f27008e75b72f2/src/libgit2/odb.c#L1922 - */ - packfileOdbRefresh = std::exchange(backend->refresh, nullptr); - } - pushBuilder(""); } - std::pair popBuilder() + ~GitFileSystemObjectSinkImpl() { - assert(!pendingDirs.empty()); - auto pending = std::move(pendingDirs.back()); - git_oid oid; - if (git_treebuilder_write(&oid, pending.builder.get())) - throw Error("creating a tree object: %s", git_error_last()->message); - pendingDirs.pop_back(); - return {oid, pending.name}; - }; + // Make sure the worker threads are destroyed before any state + // they're referring to. + workers.shutdown(); + } - void addToTree(const std::string & name, const git_oid & oid, git_filemode_t mode) + struct Child; + + /// A directory to be written as a Git tree. + struct Directory { - assert(!pendingDirs.empty()); - auto & pending = pendingDirs.back(); - if (git_treebuilder_insert(nullptr, pending.builder.get(), name.c_str(), &oid, mode)) - throw Error("adding a file to a tree builder: %s", git_error_last()->message); + std::map children; + std::optional oid; + + Child & lookup(const CanonPath & path) + { + assert(!path.isRoot()); + auto parent = path.parent(); + auto cur = this; + for (auto & name : *parent) { + auto i = cur->children.find(std::string(name)); + if (i == cur->children.end()) + throw Error("path '%s' does not exist", path); + auto dir = std::get_if(&i->second.file); + if (!dir) + throw Error("path '%s' has a non-directory parent", path); + cur = dir; + } + + auto i = cur->children.find(std::string(*path.baseName())); + if (i == cur->children.end()) + throw Error("path '%s' does not exist", path); + return i->second; + } }; - void updateBuilders(std::span names) + size_t nextId = 0; // for Child.id + + struct Child { - // Find the common prefix of pendingDirs and names. - size_t prefixLen = 0; - for (; prefixLen < names.size() && prefixLen + 1 < pendingDirs.size(); ++prefixLen) - if (names[prefixLen] != pendingDirs[prefixLen + 1].name) - break; - - // Finish the builders that are not part of the common prefix. - for (auto n = pendingDirs.size(); n > prefixLen + 1; --n) { - auto [oid, name] = popBuilder(); - addToTree(name, oid, GIT_FILEMODE_TREE); - } + git_filemode_t mode; + std::variant file; - // Create builders for the new directories. - for (auto n = prefixLen; n < names.size(); ++n) - pushBuilder(names[n]); + /// Sequential numbering of the file in the tarball. This is + /// used to make sure we only import the latest version of a + /// path. + size_t id{0}; }; - bool prepareDirs(const std::vector & pathComponents, bool isDir) + struct State { - std::span pathComponents2{pathComponents}; + Directory root; + }; - updateBuilders(isDir ? pathComponents2 : pathComponents2.first(pathComponents2.size() - 1)); + Sync _state; - return true; + void addNode(State & state, const CanonPath & path, Child && child) + { + assert(!path.isRoot()); + auto parent = path.parent(); + + Directory * cur = &state.root; + + for (auto & i : *parent) { + auto child = std::get_if( + &cur->children.emplace(std::string(i), Child{GIT_FILEMODE_TREE, {Directory()}}).first->second.file); + assert(child); + cur = child; + } + + std::string name(*path.baseName()); + + if (auto prev = cur->children.find(name); prev == cur->children.end() || prev->second.id < child.id) + cur->children.insert_or_assign(name, std::move(child)); } void createRegularFile(const CanonPath & path, std::function func) override { - auto pathComponents = tokenizeString>(path.rel(), "/"); - if (!prepareDirs(pathComponents, false)) - return; + checkInterrupt(); + + /* Multithreaded blob writing. We read the incoming file data into memory and asynchronously write it to a Git + blob object. However, to avoid unbounded memory usage, if the amount of data in flight exceeds a threshold, + we switch to writing directly to a Git write stream. */ using WriteStream = std::unique_ptr<::git_writestream, decltype([](::git_writestream * stream) { if (stream) stream->free(stream); })>; - /* Maximum file size that gets buffered in memory before flushing to a WriteStream, - that's backed by a temporary objects/streamed_git2_* file. We should avoid that - for common cases, since creating (and deleting) a temporary file for each blob - is insanely expensive. */ - static constexpr std::size_t maxBufferSize = 1024 * 1024; /* 1 MiB */ - struct CRF : CreateRegularFileSink { - const CanonPath & path; - GitFileSystemObjectSinkImpl & back; + CanonPath path; + GitFileSystemObjectSinkImpl & parent; WriteStream stream; - std::string & contents; + std::optional repo; + + std::string contents; bool executable = false; - CRF(const CanonPath & path, GitFileSystemObjectSinkImpl & back, std::string & regularFileContentsBuffer) - : path(path) - , back(back) - , stream(nullptr) - , contents(regularFileContentsBuffer) + CRF(CanonPath path, GitFileSystemObjectSinkImpl & parent) + : path(std::move(path)) + , parent(parent) { - contents.clear(); } - void writeToStream(std::string_view data) + ~CRF() { - /* Lazily create the stream. */ - if (!stream) { - ::git_writestream * stream2 = nullptr; - if (git_blob_create_from_stream(&stream2, *back.repo, nullptr)) - throw Error("creating a blob stream object: %s", git_error_last()->message); - stream = WriteStream{stream2}; - assert(stream); - } - - if (stream->write(stream.get(), data.data(), data.size())) - throw Error("writing a blob for tarball member '%s': %s", path, git_error_last()->message); + parent.totalBufSize -= contents.size(); } void operator()(std::string_view data) override { - /* Already in slow path. Just write to the slow stream. */ - if (stream) { - writeToStream(data); - return; - } + if (!stream) { + contents.append(data); + parent.totalBufSize += data.size(); - contents += data; - if (contents.size() > maxBufferSize) { - writeToStream(contents); /* Will initialize stream. */ - contents.clear(); + if (parent.totalBufSize > parent.maxBufSize) { + repo.emplace(parent.repoPool.get()); + + if (git_blob_create_from_stream(Setter(stream), **repo, nullptr)) + throw Error("creating a blob stream object: %s", git_error_last()->message); + + if (stream->write(stream.get(), contents.data(), contents.size())) + throw Error("writing a blob for tarball member '%s': %s", path, git_error_last()->message); + + parent.totalBufSize -= contents.size(); + contents.clear(); + } + } else { + if (stream->write(stream.get(), data.data(), data.size())) + throw Error("writing a blob for tarball member '%s': %s", path, git_error_last()->message); } } @@ -1241,112 +1261,140 @@ struct GitFileSystemObjectSinkImpl : GitFileSystemObjectSink { executable = true; } - } crf{path, *this, regularFileContentsBuffer}; + }; + + auto crf = std::make_shared(path, *this); - func(crf); + func(*crf); - git_oid oid; - if (crf.stream) { - /* Call .release(), since git_blob_create_from_stream_commit + auto id = nextId++; + + if (crf->stream) { + /* Finish the slow path by creating the blob object synchronously. + Call .release(), since git_blob_create_from_stream_commit acquires ownership and frees the stream. */ - if (git_blob_create_from_stream_commit(&oid, crf.stream.release())) + git_oid oid; + if (git_blob_create_from_stream_commit(&oid, crf->stream.release())) throw Error("creating a blob object for '%s': %s", path, git_error_last()->message); - } else { - if (git_blob_create_from_buffer(&oid, *repo, crf.contents.data(), crf.contents.size())) - throw Error( - "creating a blob object for '%s' from in-memory buffer: %s", path, git_error_last()->message); + addNode( + *_state.lock(), + crf->path, + Child{crf->executable ? GIT_FILEMODE_BLOB_EXECUTABLE : GIT_FILEMODE_BLOB, oid, id}); + return; } - addToTree(*pathComponents.rbegin(), oid, crf.executable ? GIT_FILEMODE_BLOB_EXECUTABLE : GIT_FILEMODE_BLOB); + /* Fast path: create the blob object in a separate thread. */ + workers.enqueue([this, crf{std::move(crf)}, id]() { + auto repo(repoPool.get()); + + git_oid oid; + if (git_blob_create_from_buffer(&oid, *repo, crf->contents.data(), crf->contents.size())) + throw Error( + "creating a blob object for '%s' from in-memory buffer: %s", crf->path, git_error_last()->message); + + addNode( + *_state.lock(), + crf->path, + Child{crf->executable ? GIT_FILEMODE_BLOB_EXECUTABLE : GIT_FILEMODE_BLOB, oid, id}); + }); } void createDirectory(const CanonPath & path) override { - auto pathComponents = tokenizeString>(path.rel(), "/"); - (void) prepareDirs(pathComponents, true); + if (path.isRoot()) + return; + auto state(_state.lock()); + addNode(*state, path, {GIT_FILEMODE_TREE, Directory()}); } void createSymlink(const CanonPath & path, const std::string & target) override { - auto pathComponents = tokenizeString>(path.rel(), "/"); - if (!prepareDirs(pathComponents, false)) - return; + workers.enqueue([this, path, target]() { + auto repo(repoPool.get()); - git_oid oid; - if (git_blob_create_from_buffer(&oid, *repo, target.c_str(), target.size())) - throw Error("creating a blob object for tarball symlink member '%s': %s", path, git_error_last()->message); + git_oid oid; + if (git_blob_create_from_buffer(&oid, *repo, target.c_str(), target.size())) + throw Error( + "creating a blob object for tarball symlink member '%s': %s", path, git_error_last()->message); - addToTree(*pathComponents.rbegin(), oid, GIT_FILEMODE_LINK); + auto state(_state.lock()); + addNode(*state, path, Child{GIT_FILEMODE_LINK, oid}); + }); } + std::map hardLinks; + void createHardlink(const CanonPath & path, const CanonPath & target) override { - std::vector pathComponents; - for (auto & c : path) - pathComponents.emplace_back(c); + hardLinks.insert_or_assign(path, target); + } - if (!prepareDirs(pathComponents, false)) - return; + Hash flush() override + { + workers.process(); - // We can't just look up the path from the start of the root, since - // some parent directories may not have finished yet, so we compute - // a relative path that helps us find the right git_tree_builder or object. - auto relTarget = CanonPath(path).parent()->makeRelative(target); - - auto dir = pendingDirs.rbegin(); - - // For each ../ component at the start, go up one directory. - // CanonPath::makeRelative() always puts all .. elements at the start, - // so they're all handled by this loop: - std::string_view relTargetLeft(relTarget); - while (hasPrefix(relTargetLeft, "../")) { - if (dir == pendingDirs.rend()) - throw Error("invalid hard link target '%s' for path '%s'", target, path); - ++dir; - relTargetLeft = relTargetLeft.substr(3); - } - if (dir == pendingDirs.rend()) - throw Error("invalid hard link target '%s' for path '%s'", target, path); - - // Look up the remainder of the target, starting at the - // top-most `git_treebuilder`. - std::variant curDir{dir->builder.get()}; - Object tree; // needed to keep `entry` alive - const git_tree_entry * entry = nullptr; - - for (auto & c : CanonPath(relTargetLeft)) { - if (auto builder = std::get_if(&curDir)) { - assert(*builder); - if (!(entry = git_treebuilder_get(*builder, std::string(c).c_str()))) - throw Error("cannot find hard link target '%s' for path '%s'", target, path); - curDir = *git_tree_entry_id(entry); - } else if (auto oid = std::get_if(&curDir)) { - tree = lookupObject(*repo, *oid, GIT_OBJECT_TREE); - if (!(entry = git_tree_entry_byname((const git_tree *) &*tree, std::string(c).c_str()))) - throw Error("cannot find hard link target '%s' for path '%s'", target, path); - curDir = *git_tree_entry_id(entry); + /* Create hard links. */ + { + auto state(_state.lock()); + for (auto & [path, target] : hardLinks) { + if (target.isRoot()) + continue; + try { + auto child = state->root.lookup(target); + auto oid = std::get_if(&child.file); + if (!oid) + throw Error("cannot create a hard link to a directory"); + addNode(*state, path, {child.mode, *oid}); + } catch (Error & e) { + e.addTrace(nullptr, "while creating a hard link from '%s' to '%s'", path, target); + throw; + } } } - assert(entry); + // Flush all repo objects to disk. + { + auto repos = repoPool.clear(); + ThreadPool workers{repos.size()}; + for (auto & repo : repos) + workers.enqueue([repo]() { repo->flush(); }); + workers.process(); + } - addToTree(*pathComponents.rbegin(), *git_tree_entry_id(entry), git_tree_entry_filemode(entry)); - } + // Write the Git trees to disk. Would be nice to have this multithreaded too, but that's hard because a tree + // can't refer to an object that hasn't been written yet. Also it doesn't make a big difference for performance. + auto repo(repoPool.get()); - Hash flush() override - { - updateBuilders({}); + [&](this const auto & visit, Directory & node) -> void { + checkInterrupt(); - auto [oid, _name] = popBuilder(); + // Write the child directories. + for (auto & child : node.children) + if (auto dir = std::get_if(&child.second.file)) + visit(*dir); + + // Write this directory. + git_treebuilder * b; + if (git_treebuilder_new(&b, *repo, nullptr)) + throw Error("creating a tree builder: %s", git_error_last()->message); + TreeBuilder builder(b); + + for (auto & [name, child] : node.children) { + auto oid_p = std::get_if(&child.file); + auto oid = oid_p ? *oid_p : std::get(child.file).oid.value(); + if (git_treebuilder_insert(nullptr, builder.get(), name.c_str(), &oid, child.mode)) + throw Error("adding a file to a tree builder: %s", git_error_last()->message); + } - if (auto * backend = repo->packBackend) { - /* We are done writing blobs, can restore refresh functionality. */ - backend->refresh = packfileOdbRefresh; - } + git_oid oid; + if (git_treebuilder_write(&oid, builder.get())) + throw Error("creating a tree object: %s", git_error_last()->message); + node.oid = oid; + }(_state.lock()->root); repo->flush(); - return toHash(oid); + return toHash(_state.lock()->root.oid.value()); } }; @@ -1432,7 +1480,10 @@ ref Settings::getTarballCache() const * for optimal packfiles. */ static auto repoDir = std::filesystem::path(getCacheDir()) / "tarball-cache-v2"; - return GitRepo::openRepo(repoDir, /*create=*/true, /*bare=*/true, /*packfilesOnly=*/true); + auto tarballCache(_tarballCache.lock()); + if (!*tarballCache) + *tarballCache = GitRepo::openRepo(repoDir, {.create = true, .bare = true, .packfilesOnly = true}); + return ref(*tarballCache); } } // namespace fetchers @@ -1446,7 +1497,7 @@ GitRepo::WorkdirInfo GitRepo::getCachedWorkdirInfo(const std::filesystem::path & if (i != cache->end()) return i->second; } - auto workdirInfo = GitRepo::openRepo(path)->getWorkdirInfo(); + auto workdirInfo = GitRepo::openRepo(path, {})->getWorkdirInfo(); _cache.lock()->emplace(path, workdirInfo); return workdirInfo; } diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index 4f5247861d88..7b1447b2ea99 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -16,6 +16,7 @@ #include "nix/util/json-utils.hh" #include "nix/util/archive.hh" #include "nix/util/mounted-source-accessor.hh" +#include "nix/fetchers/fetch-to-store.hh" #include #include @@ -393,15 +394,17 @@ struct GitInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto url = parseURL(getStrAttr(input.attrs, "url")); if (url.scheme != "git") url.scheme = "git+" + url.scheme; if (auto rev = input.getRev()) url.query.insert_or_assign("rev", rev->gitRev()); - if (auto ref = input.getRef()) - url.query.insert_or_assign("ref", *ref); + if (auto ref = input.getRef()) { + if (!abbreviate || (*ref != "master" && *ref != "main")) + url.query.insert_or_assign("ref", *ref); + } if (getShallowAttr(input)) url.query.insert_or_assign("shallow", "1"); if (getLfsAttr(input)) @@ -558,10 +561,10 @@ struct GitInputScheme : InputScheme { if (workdirInfo.isDirty) { if (!settings.allowDirty) - throw Error("Git tree '%s' is dirty", locationToArg()); + throw Error("Git tree '%s' has uncommitted changes", locationToArg()); if (settings.warnDirty) - warn("Git tree '%s' is dirty", locationToArg()); + warn("Git tree '%s' has uncommitted changes", locationToArg()); } } @@ -637,11 +640,6 @@ struct GitInputScheme : InputScheme url); } - // If we don't check here for the path existence, then we can give libgit2 any directory - // and it will initialize them as git directories. - if (!pathExists(path)) { - throw Error("The path '%s' does not exist.", path); - } repoInfo.location = std::filesystem::absolute(path); } else { if (url.scheme == "file") @@ -703,7 +701,7 @@ struct GitInputScheme : InputScheme if (auto res = cache->lookup(key)) return getIntAttr(*res, "lastModified"); - auto lastModified = GitRepo::openRepo(repoDir)->getLastModified(rev); + auto lastModified = GitRepo::openRepo(repoDir, {})->getLastModified(rev); cache->upsert(key, {{"lastModified", lastModified}}); @@ -726,7 +724,7 @@ struct GitInputScheme : InputScheme Activity act( *logger, lvlChatty, actUnknown, fmt("getting Git revision count of '%s'", repoInfo.locationToArg())); - auto revCount = GitRepo::openRepo(repoDir)->getRevCount(rev); + auto revCount = GitRepo::openRepo(repoDir, {})->getRevCount(rev); cache->upsert(key, Attrs{{"revCount", revCount}}); @@ -737,7 +735,7 @@ struct GitInputScheme : InputScheme { auto head = std::visit( overloaded{ - [&](const std::filesystem::path & path) { return GitRepo::openRepo(path)->getWorkdirRef(); }, + [&](const std::filesystem::path & path) { return GitRepo::openRepo(path, {})->getWorkdirRef(); }, [&](const ParsedURL & url) { return readHeadCached(url.to_string(), shallow); }}, repoInfo.location); if (!head) { @@ -778,15 +776,88 @@ struct GitInputScheme : InputScheme } } - std::pair, Input> - getAccessorFromCommit(const Settings & settings, Store & store, RepoInfo & repoInfo, Input && input) const + /** + * Decide whether we can do a shallow clone, which is faster. This is possible if the user explicitly specified + * `shallow = true`, or if we already have a `revCount`. + */ + bool canDoShallow(const Input & input) const + { + bool shallow = getShallowAttr(input); + return shallow || input.getRevCount().has_value(); + } + + GitAccessorOptions getGitAccessorOptions(const Input & input) const + { + return GitAccessorOptions{ + .exportIgnore = getExportIgnoreAttr(input), + .smudgeLfs = getLfsAttr(input), + .submodules = getSubmodulesAttr(input), + }; + } + + /** + * Get a `SourceAccessor` for the given Git revision using Nix < 2.20 semantics, i.e. using `git archive` or `git + * checkout`. + */ + ref getLegacyGitAccessor( + Store & store, + RepoInfo & repoInfo, + const std::filesystem::path & repoDir, + const Hash & rev, + GitAccessorOptions & options) const + { + auto tmpDir = createTempDir(); + AutoDelete delTmpDir(tmpDir, true); + + auto storePath = + options.submodules + ? [&]() { + // Nix < 2.20 used `git checkout` for repos with submodules. + runProgram({.program = "git", .args = {"init", tmpDir}}); + runProgram({.program = "git", .args = {"-C", tmpDir, "remote", "add", "origin", repoDir}}); + runProgram({.program = "git", .args = {"-C", tmpDir, "fetch", "origin", rev.gitRev()}}); + runProgram({.program = "git", .args = {"-C", tmpDir, "checkout", rev.gitRev()}}); + PathFilter filter = [&](const Path & path) { return baseNameOf(path) != ".git"; }; + return store.addToStore( + "source", + {getFSSourceAccessor(), CanonPath(tmpDir.string())}, + ContentAddressMethod::Raw::NixArchive, + HashAlgorithm::SHA256, + {}, + filter); + }() + : [&]() { + // Nix < 2.20 used `git archive` for repos without submodules. + options.exportIgnore = true; + + auto source = sinkToSource([&](Sink & sink) { + runProgram2( + {.program = "git", + .args = {"-C", repoDir, "--git-dir", repoInfo.gitDir, "archive", rev.gitRev()}, + .standardOut = &sink}); + }); + + unpackTarfile(*source, tmpDir); + + return store.addToStore("source", {getFSSourceAccessor(), CanonPath(tmpDir.string())}); + }(); + + auto accessor = store.getFSAccessor(storePath); + + accessor->fingerprint = options.makeFingerprint(rev) + ";legacy"; + + return ref{accessor}; + } + + std::optional, Input>> getAccessorFromCommit( + const Settings & settings, Store & store, RepoInfo & repoInfo, Input && input, bool fastOnly) const { assert(!repoInfo.workdirInfo.isDirty); auto origRev = input.getRev(); auto originalRef = input.getRef(); - bool shallow = getShallowAttr(input); + bool shallow = canDoShallow(input); auto ref = originalRef ? *originalRef : getDefaultRef(repoInfo, shallow); input.attrs.insert_or_assign("ref", ref); @@ -795,17 +866,33 @@ struct GitInputScheme : InputScheme if (auto repoPath = repoInfo.getPath()) { repoDir = *repoPath; if (!input.getRev()) - input.attrs.insert_or_assign("rev", GitRepo::openRepo(repoDir)->resolveRef(ref).gitRev()); + input.attrs.insert_or_assign("rev", GitRepo::openRepo(repoDir, {})->resolveRef(ref).gitRev()); } else { + auto rev = input.getRev(); auto repoUrl = std::get(repoInfo.location); std::filesystem::path cacheDir = getCachePath(repoUrl.to_string(), shallow); repoDir = cacheDir; repoInfo.gitDir = "."; + /* If shallow = false, but we have a non-shallow repo that already contains the desired rev, then use that + * repo instead. */ + std::filesystem::path cacheDirNonShallow = getCachePath(repoUrl.to_string(), false); + if (rev && shallow && pathExists(cacheDirNonShallow)) { + auto nonShallowRepo = GitRepo::openRepo(cacheDirNonShallow, {.create = true, .bare = true}); + if (nonShallowRepo->hasObject(*rev)) { + debug( + "using non-shallow cached repo for '%s' since it contains rev '%s'", + repoUrl.to_string(), + rev->gitRev()); + repoDir = cacheDirNonShallow; + goto have_rev; + } + } + std::filesystem::create_directories(cacheDir.parent_path()); PathLocks cacheDirLock({cacheDir.string()}); - auto repo = GitRepo::openRepo(cacheDir, true, true); + auto repo = GitRepo::openRepo(cacheDir, {.create = true, .bare = true}); // We need to set the origin so resolving submodule URLs works repo->setRemote("origin", repoUrl.to_string()); @@ -817,7 +904,7 @@ struct GitInputScheme : InputScheme /* If a rev was specified, we need to fetch if it's not in the repo. */ - if (auto rev = input.getRev()) { + if (rev) { doFetch = !repo->hasObject(*rev); } else { if (getAllRefsAttr(input)) { @@ -831,7 +918,9 @@ struct GitInputScheme : InputScheme } if (doFetch) { - bool shallow = getShallowAttr(input); + if (fastOnly) + return std::nullopt; + try { auto fetchRef = getAllRefsAttr(input) ? "refs/*:refs/*" : input.getRev() ? input.getRev()->gitRev() @@ -859,7 +948,7 @@ struct GitInputScheme : InputScheme warn("could not update cached head '%s' for '%s'", ref, repoInfo.locationToArg()); } - if (auto rev = input.getRev()) { + if (rev) { if (!repo->hasObject(*rev)) throw Error( "Cannot find Git revision '%s' in ref '%s' of repository '%s'! " @@ -876,40 +965,88 @@ struct GitInputScheme : InputScheme // the remainder } - auto repo = GitRepo::openRepo(repoDir); - - auto isShallow = repo->isShallow(); - - if (isShallow && !getShallowAttr(input)) - throw Error( - "'%s' is a shallow Git repository, but shallow repositories are only allowed when `shallow = true;` is specified", - repoInfo.locationToArg()); + have_rev: + auto repo = GitRepo::openRepo(repoDir, {}); // FIXME: check whether rev is an ancestor of ref? auto rev = *input.getRev(); - input.attrs.insert_or_assign("lastModified", getLastModified(settings, repoInfo, repoDir, rev)); + /* Skip lastModified computation if it's already supplied by the caller. + We don't care if they specify an incorrect value; it doesn't + matter for security, unlike narHash. */ + if (!input.attrs.contains("lastModified")) + input.attrs.insert_or_assign("lastModified", getLastModified(settings, repoInfo, repoDir, rev)); + + /* Like lastModified, skip revCount if supplied by the caller. */ + if (!shallow && !input.attrs.contains("revCount")) { + auto isShallow = repo->isShallow(); + + if (isShallow && !shallow) + throw Error( + "'%s' is a shallow Git repository, but shallow repositories are only allowed when `shallow = true;` is specified", + repoInfo.locationToArg()); - if (!getShallowAttr(input)) input.attrs.insert_or_assign("revCount", getRevCount(settings, repoInfo, repoDir, rev)); + } printTalkative("using revision %s of repo '%s'", rev.gitRev(), repoInfo.locationToArg()); verifyCommit(input, repo); - bool exportIgnore = getExportIgnoreAttr(input); - bool smudgeLfs = getLfsAttr(input); - auto accessor = repo->getAccessor( - rev, {.exportIgnore = exportIgnore, .smudgeLfs = smudgeLfs}, "«" + input.to_string() + "»"); + auto options = getGitAccessorOptions(input); + + auto expectedNarHash = input.getNarHash(); + + auto accessor = repo->getAccessor(rev, options, "«" + input.to_string(true) + "»"); + + if (settings.nix219Compat && !options.smudgeLfs) { + /* Use Nix 2.19 semantics to generate locks, but if a NAR hash is specified, support Nix >= 2.20 semantics + * as well. */ + warn("Using Nix 2.19 semantics to export Git repository '%s'.", input.to_string()); + auto accessorModern = accessor; + accessor = getLegacyGitAccessor(store, repoInfo, repoDir, rev, options); + if (expectedNarHash) { + auto narHashLegacy = + fetchToStore2(settings, store, {accessor}, FetchMode::DryRun, input.getName()).second; + if (expectedNarHash != narHashLegacy) { + auto narHashModern = + fetchToStore2(settings, store, {accessorModern}, FetchMode::DryRun, input.getName()).second; + if (expectedNarHash == narHashModern) + accessor = accessorModern; + } + } + } else { + /* Backward compatibility hack for locks produced by Nix < 2.20 that depend on Nix applying Git filters, + * `export-ignore` or `export-subst`. Nix >= 2.20 doesn't do those, so we may get a NAR hash mismatch. If + * that happens, try again using `git archive`. */ + if (expectedNarHash) { + auto narHashNew = fetchToStore2(settings, store, {accessor}, FetchMode::DryRun, input.getName()).second; + if (expectedNarHash != narHashNew) { + auto accessorLegacy = getLegacyGitAccessor(store, repoInfo, repoDir, rev, options); + auto narHashLegacy = + fetchToStore2(settings, store, {accessorLegacy}, FetchMode::DryRun, input.getName()).second; + if (expectedNarHash == narHashLegacy) { + warn( + "Git input '%s' specifies a NAR hash '%s' that was created by Nix < 2.20.\n" + "Nix >= 2.20 does not apply Git filters, `export-ignore` and `export-subst` by default, which changes the NAR hash.\n" + "Please update the NAR hash to '%s'.", + input.to_string(), + expectedNarHash->to_string(HashFormat::SRI, true), + narHashNew.to_string(HashFormat::SRI, true)); + accessor = accessorLegacy; + } + } + } + } /* If the repo has submodules, fetch them and return a mounted input accessor consisting of the accessor for the top-level repo and the accessors for the submodules. */ - if (getSubmodulesAttr(input)) { + if (options.submodules) { std::map> mounts; - for (auto & [submodule, submoduleRev] : repo->getSubmodules(rev, exportIgnore)) { + for (auto & [submodule, submoduleRev] : repo->getSubmodules(rev, options.exportIgnore)) { auto resolved = repo->resolveSubmoduleUrl(submodule.url); debug( "Git submodule %s: %s %s %s -> %s", @@ -932,25 +1069,27 @@ struct GitInputScheme : InputScheme } } attrs.insert_or_assign("rev", submoduleRev.gitRev()); - attrs.insert_or_assign("exportIgnore", Explicit{exportIgnore}); + attrs.insert_or_assign("exportIgnore", Explicit{options.exportIgnore}); attrs.insert_or_assign("submodules", Explicit{true}); - attrs.insert_or_assign("lfs", Explicit{smudgeLfs}); + attrs.insert_or_assign("lfs", Explicit{options.smudgeLfs}); attrs.insert_or_assign("allRefs", Explicit{true}); auto submoduleInput = fetchers::Input::fromAttrs(settings, std::move(attrs)); auto [submoduleAccessor, submoduleInput2] = submoduleInput.getAccessor(settings, store); - submoduleAccessor->setPathDisplay("«" + submoduleInput.to_string() + "»"); + submoduleAccessor->setPathDisplay("«" + submoduleInput.to_string(true) + "»"); mounts.insert_or_assign(submodule.path, submoduleAccessor); } if (!mounts.empty()) { + auto newFingerprint = accessor->getFingerprint(CanonPath::root).second->append(";s"); mounts.insert_or_assign(CanonPath::root, accessor); accessor = makeMountedSourceAccessor(std::move(mounts)); + accessor->fingerprint = newFingerprint; } } assert(!origRev || origRev == rev); - return {accessor, std::move(input)}; + return {{accessor, std::move(input)}}; } std::pair, Input> @@ -963,7 +1102,7 @@ struct GitInputScheme : InputScheme for (auto & submodule : repoInfo.workdirInfo.submodules) repoInfo.workdirInfo.files.insert(submodule.path); - auto repo = GitRepo::openRepo(repoPath, false, false); + auto repo = GitRepo::openRepo(repoPath, {}); auto exportIgnore = getExportIgnoreAttr(input); @@ -988,7 +1127,7 @@ struct GitInputScheme : InputScheme auto submoduleInput = fetchers::Input::fromAttrs(settings, std::move(attrs)); auto [submoduleAccessor, submoduleInput2] = submoduleInput.getAccessor(settings, store); - submoduleAccessor->setPathDisplay("«" + submoduleInput.to_string() + "»"); + submoduleAccessor->setPathDisplay("«" + submoduleInput.to_string(true) + "»"); /* If the submodule is dirty, mark this repo dirty as well. */ @@ -1003,7 +1142,7 @@ struct GitInputScheme : InputScheme } if (!repoInfo.workdirInfo.isDirty) { - auto repo = GitRepo::openRepo(repoPath); + auto repo = GitRepo::openRepo(repoPath, {}); if (auto ref = repo->getWorkdirRef()) input.attrs.insert_or_assign("ref", *ref); @@ -1037,8 +1176,8 @@ struct GitInputScheme : InputScheme return {accessor, std::move(input)}; } - std::pair, Input> - getAccessor(const Settings & settings, Store & store, const Input & _input) const override + std::optional, Input>> + getAccessor(const Settings & settings, Store & store, const Input & _input, bool fastOnly) const override { Input input(_input); @@ -1053,22 +1192,19 @@ struct GitInputScheme : InputScheme throw UnimplementedError("exportIgnore and submodules are not supported together yet"); } - auto [accessor, final] = input.getRef() || input.getRev() || !repoInfo.getPath() - ? getAccessorFromCommit(settings, store, repoInfo, std::move(input)) - : getAccessorFromWorkdir(settings, store, repoInfo, std::move(input)); - - return {accessor, std::move(final)}; + return input.getRef() || input.getRev() || !repoInfo.getPath() + ? getAccessorFromCommit(settings, store, repoInfo, std::move(input), fastOnly) + : std::optional{getAccessorFromWorkdir(settings, store, repoInfo, std::move(input))}; } std::optional getFingerprint(Store & store, const Input & input) const override { - auto makeFingerprint = [&](const Hash & rev) { - return rev.gitRev() + (getSubmodulesAttr(input) ? ";s" : "") + (getExportIgnoreAttr(input) ? ";e" : "") - + (getLfsAttr(input) ? ";l" : ""); - }; + auto options = getGitAccessorOptions(input); if (auto rev = input.getRev()) - return makeFingerprint(*rev); + // FIXME: this can return a wrong fingerprint for the legacy (`git archive`) case, since we don't know here + // whether to append the `;legacy` suffix or not. + return options.makeFingerprint(*rev); else { auto repoInfo = getRepoInfo(input); if (auto repoPath = repoInfo.getPath(); repoPath && repoInfo.workdirInfo.submodules.empty()) { @@ -1084,7 +1220,7 @@ struct GitInputScheme : InputScheme writeString("deleted:", hashSink); writeString(file.abs(), hashSink); } - return makeFingerprint(repoInfo.workdirInfo.headRev.value_or(nullRev)) + return options.makeFingerprint(repoInfo.workdirInfo.headRev.value_or(nullRev)) + ";d=" + hashSink.finish().hash.to_string(HashFormat::Base16, false); } return std::nullopt; diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index b86fa926a668..284620985f8a 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -162,7 +162,7 @@ struct GitArchiveInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto owner = getStrAttr(input.attrs, "owner"); auto repo = getStrAttr(input.attrs, "repo"); @@ -173,7 +173,7 @@ struct GitArchiveInputScheme : InputScheme if (ref) path.push_back(*ref); if (rev) - path.push_back(rev->to_string(HashFormat::Base16, false)); + path.push_back(abbreviate ? rev->gitShortRev() : rev->gitRev()); auto url = ParsedURL{ .scheme = std::string{schemeName()}, .path = path, @@ -271,7 +271,8 @@ struct GitArchiveInputScheme : InputScheme time_t lastModified; }; - std::pair downloadArchive(const Settings & settings, Store & store, Input input) const + std::optional> + downloadArchive(const Settings & settings, Store & store, Input input, bool fastOnly) const { if (!maybeGetStrAttr(input.attrs, "ref")) input.attrs.insert_or_assign("ref", "HEAD"); @@ -299,12 +300,16 @@ struct GitArchiveInputScheme : InputScheme auto treeHash = getRevAttr(*treeHashAttrs, "treeHash"); auto lastModified = getIntAttr(*lastModifiedAttrs, "lastModified"); if (settings.getTarballCache()->hasObject(treeHash)) - return {std::move(input), TarballInfo{.treeHash = treeHash, .lastModified = (time_t) lastModified}}; + return { + {std::move(input), TarballInfo{.treeHash = treeHash, .lastModified = (time_t) lastModified}}}; else debug("Git tree with hash '%s' has disappeared from the cache, refetching...", treeHash.gitRev()); } } + if (fastOnly) + return std::nullopt; + /* Stream the tarball into the tarball cache. */ auto url = getDownloadUrl(settings, input); @@ -340,13 +345,17 @@ struct GitArchiveInputScheme : InputScheme rev->gitRev(), input.to_string(), upstreamTreeHash->gitRev(), tarballInfo.treeHash.gitRev()); #endif - return {std::move(input), tarballInfo}; + return {{std::move(input), tarballInfo}}; } - std::pair, Input> - getAccessor(const Settings & settings, Store & store, const Input & _input) const override + std::optional, Input>> + getAccessor(const Settings & settings, Store & store, const Input & _input, bool fastOnly) const override { - auto [input, tarballInfo] = downloadArchive(settings, store, _input); + auto res = downloadArchive(settings, store, _input, fastOnly); + if (fastOnly && !res) + return std::nullopt; + assert(res); + auto [input, tarballInfo] = *res; #if 0 input.attrs.insert_or_assign("treeHash", tarballInfo.treeHash.gitRev()); @@ -354,9 +363,16 @@ struct GitArchiveInputScheme : InputScheme input.attrs.insert_or_assign("lastModified", uint64_t(tarballInfo.lastModified)); auto accessor = - settings.getTarballCache()->getAccessor(tarballInfo.treeHash, {}, "«" + input.to_string() + "»"); + settings.getTarballCache()->getAccessor(tarballInfo.treeHash, {}, "«" + input.to_string(true) + "»"); + + if (!settings.trustTarballsFromGitForges) + // FIXME: computing the NAR hash here is wasteful if + // copyInputToStore() is just going to hash/copy it as + // well. + input.attrs.insert_or_assign( + "narHash", accessor->hashPath(CanonPath::root).to_string(HashFormat::SRI, true)); - return {accessor, input}; + return {{accessor, input}}; } bool isLocked(const Settings & settings, const Input & input) const override @@ -368,15 +384,10 @@ struct GitArchiveInputScheme : InputScheme return input.getRev().has_value() && (settings.trustTarballsFromGitForges || input.getNarHash().has_value()); } - std::optional experimentalFeature() const override - { - return Xp::Flakes; - } - std::optional getFingerprint(Store & store, const Input & input) const override { if (auto rev = input.getRev()) - return rev->gitRev(); + return "github:" + rev->gitRev(); else return std::nullopt; } @@ -454,8 +465,7 @@ struct GitHubInputScheme : GitArchiveInputScheme : headers.empty() ? "https://%s/%s/%s/archive/%s.tar.gz" : "https://api.%s/repos/%s/%s/tarball/%s"; - const auto url = - fmt(urlFmt, host, getOwner(input), getRepo(input), input.getRev()->to_string(HashFormat::Base16, false)); + const auto url = fmt(urlFmt, host, getOwner(input), getRepo(input), input.getRev()->gitRev()); return DownloadUrl{parseURL(url), headers}; } @@ -542,7 +552,7 @@ struct GitLabInputScheme : GitArchiveInputScheme host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), - input.getRev()->to_string(HashFormat::Base16, false)); + input.getRev()->gitRev()); Headers headers = makeHeadersWithAuthTokens(settings, host, input); return DownloadUrl{parseURL(url), headers}; @@ -638,7 +648,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), - input.getRev()->to_string(HashFormat::Base16, false)); + input.getRev()->gitRev()); Headers headers = makeHeadersWithAuthTokens(settings, host, input); return DownloadUrl{parseURL(url), headers}; diff --git a/src/libfetchers/include/nix/fetchers/cache.hh b/src/libfetchers/include/nix/fetchers/cache.hh index 7219635ec07d..8cac076f1f20 100644 --- a/src/libfetchers/include/nix/fetchers/cache.hh +++ b/src/libfetchers/include/nix/fetchers/cache.hh @@ -67,9 +67,9 @@ struct Cache /** * Look up a store path in the cache. The returned store path will - * be valid, but it may be expired. + * be valid (unless `allowInvalid` is true), but it may be expired. */ - virtual std::optional lookupStorePath(Key key, Store & store) = 0; + virtual std::optional lookupStorePath(Key key, Store & store, bool allowInvalid = false) = 0; /** * Look up a store path in the cache. Return nothing if its TTL diff --git a/src/libfetchers/include/nix/fetchers/fetch-settings.hh b/src/libfetchers/include/nix/fetchers/fetch-settings.hh index 8cfa7f6091ec..e2268203b56b 100644 --- a/src/libfetchers/include/nix/fetchers/fetch-settings.hh +++ b/src/libfetchers/include/nix/fetchers/fetch-settings.hh @@ -94,10 +94,7 @@ struct Settings : public Config are subsequently modified. Therefore lock files with dirty locks should generally only be used for local testing, and should not be pushed to other users. - )", - {}, - true, - Xp::Flakes}; + )"}; Setting trustTarballsFromGitForges{ this, @@ -118,16 +115,23 @@ struct Settings : public Config Setting flakeRegistry{ this, - "https://channels.nixos.org/flake-registry.json", + "https://install.determinate.systems/flake-registry/stable/flake-registry.json", "flake-registry", R"( Path or URI of the global flake registry. When empty, disables the global flake registry. - )", - {}, - true, - Xp::Flakes}; + )"}; + + Setting nix219Compat{ + this, + false, + "nix-219-compat", + R"( + If enabled, Nix will generate lock files that are compatible with Nix 2.19. + In particular, Nix will use `git archive` rather than `libgit2` to copy Git inputs. + The resulting locks may not be compatible with Nix >= 2.20. + )"}; ref getCache() const; @@ -135,6 +139,17 @@ struct Settings : public Config private: mutable Sync> _cache; + + mutable Sync> _tarballCache; }; } // namespace nix::fetchers + +namespace nix { + +/** + * @todo Get rid of global setttings variables + */ +extern fetchers::Settings fetchSettings; + +} // namespace nix diff --git a/src/libfetchers/include/nix/fetchers/fetch-to-store.hh b/src/libfetchers/include/nix/fetchers/fetch-to-store.hh index 3a2232302359..e7f880724911 100644 --- a/src/libfetchers/include/nix/fetchers/fetch-to-store.hh +++ b/src/libfetchers/include/nix/fetchers/fetch-to-store.hh @@ -24,7 +24,17 @@ StorePath fetchToStore( PathFilter * filter = nullptr, RepairFlag repair = NoRepair); -fetchers::Cache::Key makeFetchToStoreCacheKey( - const std::string & name, const std::string & fingerprint, ContentAddressMethod method, const std::string & path); +std::pair fetchToStore2( + const fetchers::Settings & settings, + Store & store, + const SourcePath & path, + FetchMode mode, + std::string_view name = "source", + ContentAddressMethod method = ContentAddressMethod::Raw::NixArchive, + PathFilter * filter = nullptr, + RepairFlag repair = NoRepair); + +fetchers::Cache::Key +makeSourcePathToHashCacheKey(const std::string & fingerprint, ContentAddressMethod method, const std::string & path); } // namespace nix diff --git a/src/libfetchers/include/nix/fetchers/fetchers.hh b/src/libfetchers/include/nix/fetchers/fetchers.hh index 32a3d7d9bf5a..0f7d933131e9 100644 --- a/src/libfetchers/include/nix/fetchers/fetchers.hh +++ b/src/libfetchers/include/nix/fetchers/fetchers.hh @@ -61,11 +61,11 @@ public: */ static Input fromAttrs(const Settings & settings, Attrs && attrs); - ParsedURL toURL() const; + ParsedURL toURL(bool abbreviate = false) const; - std::string toURLString(const StringMap & extraQuery = {}) const; + std::string toURLString(const StringMap & extraQuery = {}, bool abbreviate = false) const; - std::string to_string() const; + std::string to_string(bool abbreviate = false) const; Attrs toAttrs() const; @@ -113,7 +113,7 @@ public: * Fetch the entire input into the Nix store, returning the * location in the Nix store and the locked input. */ - std::pair fetchToStore(const Settings & settings, Store & store) const; + std::tuple, Input> fetchToStore(const Settings & settings, Store & store) const; /** * Check the locking attributes in `result` against @@ -225,7 +225,7 @@ struct InputScheme */ virtual const std::map & allowedAttrs() const = 0; - virtual ParsedURL toURL(const Input & input) const; + virtual ParsedURL toURL(const Input & input, bool abbreviate = false) const; virtual Input applyOverrides(const Input & input, std::optional ref, std::optional rev) const; @@ -240,8 +240,19 @@ struct InputScheme std::string_view contents, std::optional commitMsg) const; + virtual std::optional, Input>> + getAccessor(const Settings & settings, Store & store, const Input & input, bool fastOnly) const + { + if (fastOnly) + return std::nullopt; + return getAccessor(settings, store, input); + } + virtual std::pair, Input> - getAccessor(const Settings & settings, Store & store, const Input & input) const = 0; + getAccessor(const Settings & settings, Store & store, const Input & input) const + { + return getAccessor(settings, store, input, false).value(); + } /** * Is this `InputScheme` part of an experimental feature? diff --git a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh index 5e98caa58165..b53c8db5bd74 100644 --- a/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh +++ b/src/libfetchers/include/nix/fetchers/filtering-source-accessor.hh @@ -52,6 +52,10 @@ struct FilteringSourceAccessor : SourceAccessor std::pair> getFingerprint(const CanonPath & path) override; + std::shared_ptr getProvenance(const CanonPath & path) override; + + void invalidateCache(const CanonPath & path) override; + /** * Call `makeNotAllowedError` to throw a `RestrictedPathError` * exception if `isAllowed()` returns `false` for `path`. diff --git a/src/libfetchers/include/nix/fetchers/git-utils.hh b/src/libfetchers/include/nix/fetchers/git-utils.hh index 5c79f256e864..eada8745c3eb 100644 --- a/src/libfetchers/include/nix/fetchers/git-utils.hh +++ b/src/libfetchers/include/nix/fetchers/git-utils.hh @@ -26,14 +26,23 @@ struct GitAccessorOptions { bool exportIgnore = false; bool smudgeLfs = false; + bool submodules = false; // Currently implemented in GitInputScheme rather than GitAccessor + + std::string makeFingerprint(const Hash & rev) const; }; struct GitRepo { virtual ~GitRepo() {} - static ref - openRepo(const std::filesystem::path & path, bool create = false, bool bare = false, bool packfilesOnly = false); + struct Options + { + bool create = false; + bool bare = false; + bool packfilesOnly = false; + }; + + static ref openRepo(const std::filesystem::path & path, Options options); virtual uint64_t getRevCount(const Hash & rev) = 0; diff --git a/src/libfetchers/include/nix/fetchers/input-cache.hh b/src/libfetchers/include/nix/fetchers/input-cache.hh index 463927ceaee3..4a6a1dff83c6 100644 --- a/src/libfetchers/include/nix/fetchers/input-cache.hh +++ b/src/libfetchers/include/nix/fetchers/input-cache.hh @@ -1,3 +1,5 @@ +#pragma once + #include "nix/fetchers/fetchers.hh" namespace nix::fetchers { diff --git a/src/libfetchers/include/nix/fetchers/meson.build b/src/libfetchers/include/nix/fetchers/meson.build index a313b1e0bc0c..f3bb80942a28 100644 --- a/src/libfetchers/include/nix/fetchers/meson.build +++ b/src/libfetchers/include/nix/fetchers/meson.build @@ -10,6 +10,7 @@ headers = files( 'git-lfs-fetch.hh', 'git-utils.hh', 'input-cache.hh', + 'provenance.hh', 'registry.hh', 'tarball.hh', ) diff --git a/src/libfetchers/include/nix/fetchers/provenance.hh b/src/libfetchers/include/nix/fetchers/provenance.hh new file mode 100644 index 000000000000..82dc1b3b8109 --- /dev/null +++ b/src/libfetchers/include/nix/fetchers/provenance.hh @@ -0,0 +1,31 @@ +#pragma once + +#include "nix/util/provenance.hh" +#include "nix/fetchers/fetchers.hh" + +namespace nix { + +struct TreeProvenance : Provenance +{ + ref attrs; + + TreeProvenance(const fetchers::Input & input); + + TreeProvenance(ref attrs) + : attrs(std::move(attrs)) + { + } + + nlohmann::json to_json() const override; +}; + +struct FetchurlProvenance : Provenance +{ + std::string url; + + FetchurlProvenance(std::string url, bool sanitize = true); + + nlohmann::json to_json() const override; +}; + +} // namespace nix diff --git a/src/libfetchers/include/nix/fetchers/registry.hh b/src/libfetchers/include/nix/fetchers/registry.hh index dc7e3edb590e..ca38dd805d6d 100644 --- a/src/libfetchers/include/nix/fetchers/registry.hh +++ b/src/libfetchers/include/nix/fetchers/registry.hh @@ -39,6 +39,9 @@ struct Registry static std::shared_ptr read(const Settings & settings, const SourcePath & path, RegistryType type); + static std::shared_ptr + read(const Settings & settings, std::string_view whence, std::string_view jsonStr, RegistryType type); + void write(const std::filesystem::path & path); void add(const Input & from, const Input & to, const Attrs & extraAttrs); diff --git a/src/libfetchers/indirect.cc b/src/libfetchers/indirect.cc index b2a41a7421fd..e629dcbac6b0 100644 --- a/src/libfetchers/indirect.cc +++ b/src/libfetchers/indirect.cc @@ -100,7 +100,7 @@ struct IndirectInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { ParsedURL url{ .scheme = "flake", @@ -131,11 +131,6 @@ struct IndirectInputScheme : InputScheme throw Error("indirect input '%s' cannot be fetched directly", input.to_string()); } - std::optional experimentalFeature() const override - { - return Xp::Flakes; - } - bool isDirect(const Input & input) const override { return false; diff --git a/src/libfetchers/mercurial.cc b/src/libfetchers/mercurial.cc index dd31e224f6cc..f9297ce8c2f1 100644 --- a/src/libfetchers/mercurial.cc +++ b/src/libfetchers/mercurial.cc @@ -119,7 +119,7 @@ struct MercurialInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto url = parseURL(getStrAttr(input.attrs, "url")); url.scheme = "hg+" + url.scheme; @@ -247,9 +247,7 @@ struct MercurialInputScheme : InputScheme auto revInfoKey = [&](const Hash & rev) { if (rev.algo != HashAlgorithm::SHA1) - throw Error( - "Hash '%s' is not supported by Mercurial. Only sha1 is supported.", - rev.to_string(HashFormat::Base16, true)); + throw Error("Hash '%s' is not supported by Mercurial. Only sha1 is supported.", rev.gitRev()); return Cache::Key{"hgRev", {{"store", store.storeDir}, {"name", name}, {"rev", input.getRev()->gitRev()}}}; }; @@ -356,7 +354,7 @@ struct MercurialInputScheme : InputScheme auto storePath = fetchToStore(settings, store, input); auto accessor = store.requireStoreObjectAccessor(storePath); - accessor->setPathDisplay("«" + input.to_string() + "»"); + accessor->setPathDisplay("«" + input.to_string(true) + "»"); return {accessor, input}; } @@ -369,7 +367,7 @@ struct MercurialInputScheme : InputScheme std::optional getFingerprint(Store & store, const Input & input) const override { if (auto rev = input.getRev()) - return rev->gitRev(); + return "hg:" + rev->gitRev(); else return std::nullopt; } diff --git a/src/libfetchers/meson.build b/src/libfetchers/meson.build index d34dd4f434d1..2a3d356fe1c7 100644 --- a/src/libfetchers/meson.build +++ b/src/libfetchers/meson.build @@ -7,6 +7,8 @@ project( # TODO(Qyriad): increase the warning level 'warning_level=1', 'errorlogs=true', # Please print logs for tests that fail + 'unity=on', + 'unity_size=1024', ], meson_version : '>= 1.1', license : 'LGPL-2.1-or-later', @@ -35,6 +37,7 @@ subdir('nix-meson-build-support/common') sources = files( 'attrs.cc', + 'builtin.cc', 'cache.cc', 'fetch-settings.cc', 'fetch-to-store.cc', @@ -48,12 +51,20 @@ sources = files( 'input-cache.cc', 'mercurial.cc', 'path.cc', + 'provenance.cc', 'registry.cc', 'tarball.cc', ) subdir('include/nix/fetchers') +# Generate builtin-flake-registry.json.gen.hh +subdir('nix-meson-build-support/generate-header') + +sources += gen_header.process( + 'builtin-flake-registry.json', +) + subdir('nix-meson-build-support/export-all-symbols') subdir('nix-meson-build-support/windows-version') @@ -64,7 +75,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, cpp_pch : do_pch ? [ 'pch/precompiled-headers.hh' ] : [], ) diff --git a/src/libfetchers/package.nix b/src/libfetchers/package.nix index 14592087999c..1a30ac293018 100644 --- a/src/libfetchers/package.nix +++ b/src/libfetchers/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-fetchers"; + pname = "determinate-nix-fetchers"; inherit version; workDir = ./.; @@ -28,6 +28,7 @@ mkMesonLibrary (finalAttrs: { ./.version ./meson.build ./include/nix/fetchers/meson.build + ./builtin-flake-registry.json (fileset.fileFilter (file: file.hasExt "cc") ./.) (fileset.fileFilter (file: file.hasExt "hh") ./.) ]; diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc index 7f48ce07bb9f..dccc75a89875 100644 --- a/src/libfetchers/path.cc +++ b/src/libfetchers/path.cc @@ -87,7 +87,7 @@ struct PathInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto query = attrsToQuery(input.attrs); query.erase("path"); @@ -138,54 +138,36 @@ struct PathInputScheme : InputScheme throw Error("cannot fetch input '%s' because it uses a relative path", input.to_string()); } - std::pair, Input> - getAccessor(const Settings & settings, Store & store, const Input & _input) const override + std::optional, Input>> + getAccessor(const Settings & settings, Store & store, const Input & _input, bool fastOnly) const override { + // Note: fastOnly is ignored because the path fetcher is always fast. + Input input(_input); - auto path = getStrAttr(input.attrs, "path"); auto absPath = getAbsPath(input); // FIXME: check whether access to 'path' is allowed. + + auto accessor = makeFSSourceAccessor(absPath); + auto storePath = store.maybeParseStorePath(absPath.string()); - if (storePath) + if (storePath) { store.addTempRoot(*storePath); - time_t mtime = 0; - if (!storePath || storePath->name() != "source" || !store.isValidPath(*storePath)) { - Activity act(*logger, lvlTalkative, actUnknown, fmt("copying %s to the store", absPath)); - // FIXME: try to substitute storePath. - auto src = sinkToSource( - [&](Sink & sink) { mtime = dumpPathAndGetMtime(absPath.string(), sink, defaultPathFilter); }); - storePath = store.addToStoreFromDump(*src, "source"); + // To prevent `fetchToStore()` copying the path again to Nix + // store, pre-create an entry in the fetcher cache. + auto info = store.maybeQueryPathInfo(*storePath); + if (info) { + accessor->fingerprint = fmt("path:%s", info->narHash.to_string(HashFormat::SRI, true)); + settings.getCache()->upsert( + makeSourcePathToHashCacheKey(*accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, "/"), + {{"hash", info->narHash.to_string(HashFormat::SRI, true)}}); + } } - auto accessor = store.requireStoreObjectAccessor(*storePath); - - // To prevent `fetchToStore()` copying the path again to Nix - // store, pre-create an entry in the fetcher cache. - auto info = store.queryPathInfo(*storePath); - accessor->fingerprint = - fmt("path:%s", store.queryPathInfo(*storePath)->narHash.to_string(HashFormat::SRI, true)); - settings.getCache()->upsert( - makeFetchToStoreCacheKey( - input.getName(), *accessor->fingerprint, ContentAddressMethod::Raw::NixArchive, "/"), - store, - {}, - *storePath); - - /* Trust the lastModified value supplied by the user, if - any. It's not a "secure" attribute so we don't care. */ - if (!input.getLastModified()) - input.attrs.insert_or_assign("lastModified", uint64_t(mtime)); - - return {accessor, std::move(input)}; - } - - std::optional experimentalFeature() const override - { - return Xp::Flakes; + return {{accessor, std::move(input)}}; } }; diff --git a/src/libfetchers/provenance.cc b/src/libfetchers/provenance.cc new file mode 100644 index 000000000000..e984b2271014 --- /dev/null +++ b/src/libfetchers/provenance.cc @@ -0,0 +1,58 @@ +#include "nix/fetchers/provenance.hh" +#include "nix/fetchers/attrs.hh" +#include "nix/util/json-utils.hh" + +#include + +namespace nix { + +TreeProvenance::TreeProvenance(const fetchers::Input & input) + : attrs(make_ref([&]() { + // Remove the narHash attribute from the provenance info, as it's redundant (it's already recorded in the store + // path info). + auto attrs2 = input.attrs; + attrs2.erase("narHash"); + return fetchers::attrsToJSON(attrs2); + }())) +{ +} + +nlohmann::json TreeProvenance::to_json() const +{ + return nlohmann::json{ + {"type", "tree"}, + {"attrs", *attrs}, + }; +} + +Provenance::Register registerTreeProvenance("tree", [](nlohmann::json json) { + auto & obj = getObject(json); + auto & attrsJson = valueAt(obj, "attrs"); + return make_ref(make_ref(attrsJson)); +}); + +FetchurlProvenance::FetchurlProvenance(std::string _url, bool sanitize) + : url(std::move(_url)) +{ + if (sanitize) { + try { + url = parseURL(url, true).renderSanitized(); + } catch (BadURL &) { + } + } +} + +nlohmann::json FetchurlProvenance::to_json() const +{ + return nlohmann::json{ + {"type", "fetchurl"}, + {"url", url}, + }; +} + +Provenance::Register registerFetchurlProvenance("fetchurl", [](nlohmann::json json) { + auto & obj = getObject(json); + return make_ref(getString(valueAt(obj, "url")), false); +}); + +} // namespace nix diff --git a/src/libfetchers/registry.cc b/src/libfetchers/registry.cc index f96ef89b3ee0..83de80bbccfd 100644 --- a/src/libfetchers/registry.cc +++ b/src/libfetchers/registry.cc @@ -14,14 +14,24 @@ std::shared_ptr Registry::read(const Settings & settings, const Source { debug("reading registry '%s'", path); - auto registry = std::make_shared(type); - if (!path.pathExists()) return std::make_shared(type); try { + return read(settings, path.to_string(), path.readFile(), type); + } catch (Error & e) { + warn("cannot read flake registry '%s': %s", path, e.what()); + return std::make_shared(type); + } +} - auto json = nlohmann::json::parse(path.readFile()); +std::shared_ptr +Registry::read(const Settings & settings, std::string_view whence, std::string_view jsonStr, RegistryType type) +{ + auto registry = std::make_shared(type); + + try { + auto json = nlohmann::json::parse(jsonStr); auto version = json.value("version", 0); @@ -45,12 +55,10 @@ std::shared_ptr Registry::read(const Settings & settings, const Source } else - throw Error("flake registry '%s' has unsupported version %d", path, version); + warn("flake registry '%s' has unsupported version %d", whence, version); } catch (nlohmann::json::exception & e) { - warn("cannot parse flake registry '%s': %s", path, e.what()); - } catch (Error & e) { - warn("cannot read flake registry '%s': %s", path, e.what()); + warn("cannot parse flake registry '%s': %s", whence, e.what()); } return registry; @@ -139,24 +147,38 @@ void overrideRegistry(const Input & from, const Input & to, const Attrs & extraA static std::shared_ptr getGlobalRegistry(const Settings & settings, Store & store) { static auto reg = [&]() { - auto path = settings.flakeRegistry.get(); - if (path == "") { - return std::make_shared(Registry::Global); // empty registry - } + try { + auto path = settings.flakeRegistry.get(); + if (path == "") { + return std::make_shared(Registry::Global); // empty registry + } - return Registry::read( - settings, - [&] -> SourcePath { - if (!isAbsolute(path)) { - auto storePath = downloadFile(store, settings, path, "flake-registry.json").storePath; - if (auto store2 = dynamic_cast(&store)) - store2->addPermRoot(storePath, (getCacheDir() / "flake-registry.json").string()); - return {store.requireStoreObjectAccessor(storePath)}; - } else { - return SourcePath{getFSSourceAccessor(), CanonPath{path}}.resolveSymlinks(); - } - }(), - Registry::Global); + return Registry::read( + settings, + [&] -> SourcePath { + if (!isAbsolute(path)) { + auto storePath = downloadFile(store, settings, path, "flake-registry.json").storePath; + if (auto store2 = dynamic_cast(&store)) + store2->addPermRoot(storePath, (getCacheDir() / "flake-registry.json").string()); + return {store.requireStoreObjectAccessor(storePath)}; + } else { + return SourcePath{getFSSourceAccessor(), CanonPath{path}}.resolveSymlinks(); + } + }(), + Registry::Global); + } catch (Error & e) { + warn( + "cannot fetch global flake registry '%s', will use builtin fallback registry: %s", + settings.flakeRegistry.get(), + e.info().msg); + // Use builtin registry as fallback + return Registry::read( + settings, + "builtin flake registry", +#include "builtin-flake-registry.json.gen.hh" + , + Registry::Global); + } }(); return reg; diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index b1ebd749df62..4ecf7ba9e194 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -9,6 +9,9 @@ #include "nix/store/store-api.hh" #include "nix/fetchers/git-utils.hh" #include "nix/fetchers/fetch-settings.hh" +#include "nix/fetchers/provenance.hh" + +#include namespace nix::fetchers { @@ -83,6 +86,8 @@ DownloadFileResult downloadFile( }, hashString(HashAlgorithm::SHA256, sink.s)); info.narSize = sink.s.size(); + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) + info.provenance = std::make_shared(request.uri.to_string()); auto source = StringSource{sink.s}; store.addToStore(info, source, NoRepair, NoCheckSigs); storePath = std::move(info.path); @@ -104,8 +109,12 @@ DownloadFileResult downloadFile( }; } -static DownloadTarballResult downloadTarball_( - const Settings & settings, const std::string & urlS, const Headers & headers, const std::string & displayPrefix) +static std::optional downloadTarball_( + const Settings & settings, + const std::string & urlS, + const Headers & headers, + const std::string & displayPrefix, + bool fastOnly) { ParsedURL url = parseURL(urlS); @@ -148,6 +157,9 @@ static DownloadTarballResult downloadTarball_( `tarballTtl`, so no need to check the server. */ return attrsToResult(cached->value); + if (fastOnly) + return std::nullopt; + auto _res = std::make_shared>(); auto source = sinkToSource([&](Sink & sink) { @@ -374,7 +386,7 @@ struct CurlInputScheme : InputScheme return input; } - ParsedURL toURL(const Input & input) const override + ParsedURL toURL(const Input & input, bool abbreviate) const override { auto url = parseURL(getStrAttr(input.attrs, "url")); // NAR hashes are preferred over file hashes since tar/zip @@ -429,7 +441,7 @@ struct FileInputScheme : CurlInputScheme auto accessor = ref{store.getFSAccessor(file.storePath)}; - accessor->setPathDisplay("«" + input.to_string() + "»"); + accessor->setPathDisplay("«" + input.to_string(true) + "»"); return {accessor, input}; } @@ -479,12 +491,16 @@ struct TarballInputScheme : CurlInputScheme : (requireTree || hasTarballExtension(url))); } - std::pair, Input> - getAccessor(const Settings & settings, Store & store, const Input & _input) const override + std::optional, Input>> + getAccessor(const Settings & settings, Store & store, const Input & _input, bool fastOnly) const override { auto input(_input); - auto result = downloadTarball_(settings, getStrAttr(input.attrs, "url"), {}, "«" + input.to_string() + "»"); + auto res = + downloadTarball_(settings, getStrAttr(input.attrs, "url"), {}, "«" + input.to_string(true) + "»", fastOnly); + if (!res) + return std::nullopt; + auto & result = *res; if (result.immutableUrl) { auto immutableInput = Input::fromURL(settings, *result.immutableUrl); @@ -502,15 +518,15 @@ struct TarballInputScheme : CurlInputScheme "narHash", settings.getTarballCache()->treeHashToNarHash(settings, result.treeHash).to_string(HashFormat::SRI, true)); - return {result.accessor, input}; + return {{result.accessor, input}}; } std::optional getFingerprint(Store & store, const Input & input) const override { if (auto narHash = input.getNarHash()) - return narHash->to_string(HashFormat::SRI, true); + return "tarball:" + narHash->to_string(HashFormat::SRI, true); else if (auto rev = input.getRev()) - return rev->gitRev(); + return "tarball:" + rev->gitRev(); else return std::nullopt; } diff --git a/src/libflake-c/meson.build b/src/libflake-c/meson.build index fddb39bdf96b..d0055e5d98fc 100644 --- a/src/libflake-c/meson.build +++ b/src/libflake-c/meson.build @@ -57,7 +57,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libflake-c/nix_api_flake.cc b/src/libflake-c/nix_api_flake.cc index 32329585a667..793db44b438c 100644 --- a/src/libflake-c/nix_api_flake.cc +++ b/src/libflake-c/nix_api_flake.cc @@ -206,4 +206,20 @@ nix_value * nix_locked_flake_get_output_attrs( NIXC_CATCH_ERRS_NULL } +nix_err nix_locked_flake_read_path( + nix_c_context * context, + nix_locked_flake * lockedFlake, + const char * path, + nix_get_string_callback callback, + void * user_data) +{ + nix_clear_err(context); + try { + auto source_path = lockedFlake->lockedFlake->flake.path.parent() / nix::CanonPath(path); + auto v = source_path.readFile(); + return call_nix_get_string_callback(v, callback, user_data); + } + NIXC_CATCH_ERRS +} + } // extern "C" diff --git a/src/libflake-c/nix_api_flake.h b/src/libflake-c/nix_api_flake.h index a1a7060a6144..925463483119 100644 --- a/src/libflake-c/nix_api_flake.h +++ b/src/libflake-c/nix_api_flake.h @@ -238,6 +238,23 @@ void nix_flake_reference_free(nix_flake_reference * store); nix_value * nix_locked_flake_get_output_attrs( nix_c_context * context, nix_flake_settings * settings, EvalState * evalState, nix_locked_flake * lockedFlake); +/** + * @brief Reads a file within the flake. + * @note The callback borrows the string only for the duration of the call. + * + * @param[out] context Optional, stores error information + * @param[in] locked_flake the flake to get the path for + * @param[in] path The path within the flake. + * @param[in] callback The callback to call with the string + * @param[in] user_data Additional data to pass for the callback + */ +nix_err nix_locked_flake_read_path( + nix_c_context * context, + nix_locked_flake * lockedFlake, + const char * path, + nix_get_string_callback callback, + void * user_data); + #ifdef __cplusplus } // extern "C" #endif diff --git a/src/libflake-c/package.nix b/src/libflake-c/package.nix index 8c6883d9cf95..9ae3ec695154 100644 --- a/src/libflake-c/package.nix +++ b/src/libflake-c/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-flake-c"; + pname = "determinate-nix-flake-c"; inherit version; workDir = ./.; diff --git a/src/libflake-tests/flakeref.cc b/src/libflake-tests/flakeref.cc index eb8b56ea29de..3cc655907b3f 100644 --- a/src/libflake-tests/flakeref.cc +++ b/src/libflake-tests/flakeref.cc @@ -17,8 +17,6 @@ namespace nix { TEST(parseFlakeRef, path) { - experimentalFeatureSettings.experimentalFeatures.get().insert(Xp::Flakes); - fetchers::Settings fetchSettings; { @@ -67,8 +65,6 @@ TEST(parseFlakeRef, path) TEST(parseFlakeRef, GitArchiveInput) { - experimentalFeatureSettings.experimentalFeatures.get().insert(Xp::Flakes); - fetchers::Settings fetchSettings; { @@ -111,7 +107,6 @@ class InputFromURLTest : public ::testing::WithParamInterfacetype() == nPath) { + auto path = state.realisePath(pos, *args[0]); + callFlake(state, lockFlake(settings, state, path, lockFlags), v); + } else { + NixStringContext context; + std::string flakeRefS( + state.forceString(*args[0], context, pos, "while evaluating the argument passed to builtins.getFlake")); + auto rewrites = state.realiseContext(context); + flakeRefS = state.devirtualize(rewriteStrings(flakeRefS, rewrites), context); + if (hasContext(context)) + // FIXME: this should really be an error. + warn( + "In 'builtins.getFlake', the flakeref '%s' has string context, but that's not allowed. This may become a fatal error in the future.", + flakeRefS); + + auto flakeRef = nix::parseFlakeRef(state.fetchSettings, flakeRefS, {}, true); + if (state.settings.pureEval && !flakeRef.input.isLocked(state.fetchSettings)) + throw Error( + "cannot call 'getFlake' on unlocked flake reference '%s', at %s (use --impure to override)", + flakeRefS, + state.positions[pos]); + + callFlake(state, lockFlake(settings, state, flakeRef, lockFlags), v); + } }; return PrimOp{ @@ -78,7 +89,6 @@ PrimOp getFlake(const Settings & settings) ``` )", .fun = prim_getFlake, - .experimentalFeature = Xp::Flakes, }; } @@ -120,14 +130,15 @@ nix::PrimOp parseFlakeRef({ ``` )", .fun = prim_parseFlakeRef, - .experimentalFeature = Xp::Flakes, }); static void prim_flakeRefToString(EvalState & state, const PosIdx pos, Value ** args, Value & v) { state.forceAttrs(*args[0], noPos, "while evaluating the argument passed to builtins.flakeRefToString"); fetchers::Attrs attrs; + NixStringContext context; for (const auto & attr : *args[0]->attrs()) { + state.forceValue(*attr.value, attr.pos); auto t = attr.value->type(); if (t == nInt) { auto intValue = attr.value->integer().value; @@ -144,7 +155,9 @@ static void prim_flakeRefToString(EvalState & state, const PosIdx pos, Value ** } else if (t == nBool) { attrs.emplace(state.symbols[attr.name], Explicit{attr.value->boolean()}); } else if (t == nString) { - attrs.emplace(state.symbols[attr.name], std::string(attr.value->string_view())); + auto s = state.forceString( + *attr.value, context, attr.pos, "while evaluating an attribute in 'builtins.flakeRefToString'"); + attrs.emplace(state.symbols[attr.name], std::string(s)); } else { state .error( @@ -156,7 +169,7 @@ static void prim_flakeRefToString(EvalState & state, const PosIdx pos, Value ** } } auto flakeRef = FlakeRef::fromAttrs(state.fetchSettings, attrs); - v.mkString(flakeRef.to_string(), state.mem); + v.mkString(flakeRef.to_string(), context, state.mem); } nix::PrimOp flakeRefToString({ @@ -180,7 +193,6 @@ nix::PrimOp flakeRefToString({ ``` )", .fun = prim_flakeRefToString, - .experimentalFeature = Xp::Flakes, }); } // namespace nix::flake::primops diff --git a/src/libflake/flake.cc b/src/libflake/flake.cc index 9f7476bd0e27..5cea4e567f62 100644 --- a/src/libflake/flake.cc +++ b/src/libflake/flake.cc @@ -36,6 +36,7 @@ #include "nix/expr/value-to-json.hh" #include "nix/fetchers/fetch-to-store.hh" #include "nix/util/memory-source-accessor.hh" +#include "nix/util/mounted-source-accessor.hh" #include "nix/fetchers/input-cache.hh" #include "nix/expr/attr-set.hh" #include "nix/expr/eval-error.hh" @@ -66,20 +67,22 @@ namespace nix { struct SourceAccessor; using namespace flake; +using namespace fetchers; namespace flake { static void forceTrivialValue(EvalState & state, Value & value, const PosIdx pos) { - if (value.isThunk() && value.isTrivial()) + if (value.isTrivial()) state.forceValue(value, pos); } static void expectType(EvalState & state, ValueType type, Value & value, const PosIdx pos) { forceTrivialValue(state, value, pos); - if (value.type() != type) - throw Error("expected %s but got %s at %s", showType(type), showType(value.type()), state.positions[pos]); + auto t = value.type(); + if (t != type) + throw Error("expected %s but got %s at %s", showType(type), showType(t), state.positions[pos]); } static std::pair, fetchers::Attrs> parseFlakeInputs( @@ -90,7 +93,7 @@ static std::pair, fetchers::Attrs> parseFlakeInput const SourcePath & flakeDir, bool allowSelf); -static void parseFlakeInputAttr(EvalState & state, const Attr & attr, fetchers::Attrs & attrs) +static void parseFlakeInputAttr(EvalState & state, const nix::Attr & attr, fetchers::Attrs & attrs) { // Allow selecting a subset of enum values #pragma GCC diagnostic push @@ -144,6 +147,7 @@ static FlakeInput parseFlakeInput( auto sUrl = state.symbols.create("url"); auto sFlake = state.symbols.create("flake"); auto sFollows = state.symbols.create("follows"); + auto sBuildTime = state.symbols.create("buildTime"); fetchers::Attrs attrs; std::optional url; @@ -172,6 +176,11 @@ static FlakeInput parseFlakeInput( } else if (attr.name == sFlake) { expectType(state, nBool, *attr.value, attr.pos); input.isFlake = attr.value->boolean(); + } else if (attr.name == sBuildTime) { + expectType(state, nBool, *attr.value, attr.pos); + input.buildTime = attr.value->boolean(); + if (input.buildTime) + experimentalFeatureSettings.require(Xp::BuildTimeFetchTree); } else if (attr.name == sInputs) { input.overrides = parseFlakeInputs(state, attr.value, attr.pos, lockRootAttrPath, flakeDir, false).first; @@ -240,7 +249,7 @@ static std::pair, fetchers::Attrs> parseFlakeInput return {inputs, selfAttrs}; } -static Flake readFlake( +Flake readFlake( EvalState & state, const FlakeRef & originalRef, const FlakeRef & resolvedRef, @@ -260,6 +269,7 @@ static Flake readFlake( .resolvedRef = resolvedRef, .lockedRef = lockedRef, .path = flakePath, + .provenance = flakePath.getProvenance(), }; if (auto description = vInfo.attrs()->get(state.s.description)) { @@ -369,7 +379,8 @@ static Flake getFlake( EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries, - const InputAttrPath & lockRootAttrPath) + const InputAttrPath & lockRootAttrPath, + bool requireLockable) { // Fetch a lazy tree first. auto cachedInput = @@ -401,13 +412,14 @@ static Flake getFlake( originalRef, resolvedRef, lockedRef, - state.storePath(state.mountInput(lockedRef.input, originalRef.input, cachedInput.accessor)), + state.storePath(state.mountInput(lockedRef.input, originalRef.input, cachedInput.accessor, requireLockable)), lockRootAttrPath); } -Flake getFlake(EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries) +Flake getFlake( + EvalState & state, const FlakeRef & originalRef, fetchers::UseRegistries useRegistries, bool requireLockable) { - return getFlake(state, originalRef, useRegistries, {}); + return getFlake(state, originalRef, useRegistries, {}, requireLockable); } static LockFile readLockFile(const fetchers::Settings & fetchSettings, const SourcePath & lockFilePath) @@ -416,19 +428,13 @@ static LockFile readLockFile(const fetchers::Settings & fetchSettings, const Sou : LockFile(); } -/* Compute an in-memory lock file for the specified top-level flake, - and optionally write it to file, if the flake is writable. */ -LockedFlake -lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, const LockFlags & lockFlags) +LockedFlake lockFlake( + const Settings & settings, EvalState & state, const FlakeRef & topRef, const LockFlags & lockFlags, Flake flake) { - experimentalFeatureSettings.require(Xp::Flakes); - auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries); auto useRegistriesTop = useRegistries ? fetchers::UseRegistries::All : fetchers::UseRegistries::No; auto useRegistriesInputs = useRegistries ? fetchers::UseRegistries::Limited : fetchers::UseRegistries::No; - auto flake = getFlake(state, topRef, useRegistriesTop, {}); - if (lockFlags.applyNixConfig) { flake.config.apply(settings); state.store->setOptions(); @@ -608,7 +614,7 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, if (auto resolvedPath = resolveRelativePath()) { return readFlake(state, ref, ref, ref, *resolvedPath, inputAttrPath); } else { - return getFlake(state, ref, useRegistries, inputAttrPath); + return getFlake(state, ref, useRegistriesInputs, inputAttrPath, true); } }; @@ -631,7 +637,11 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, didn't change and there is no override from a higher level flake. */ auto childNode = make_ref( - oldLock->lockedRef, oldLock->originalRef, oldLock->isFlake, oldLock->parentInputAttrPath); + oldLock->lockedRef, + oldLock->originalRef, + oldLock->isFlake, + oldLock->buildTime, + oldLock->parentInputAttrPath); node->inputs.insert_or_assign(id, childNode); @@ -720,12 +730,34 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, auto inputIsOverride = explicitCliOverrides.contains(inputAttrPath); auto ref = (input2.ref && inputIsOverride) ? *input2.ref : *input.ref; + /* Warn against the use of indirect flakerefs + (but only at top-level since we don't want + to annoy users about flakes that are not + under their control). */ + auto warnRegistry = [&](const FlakeRef & resolvedRef) { + if (inputAttrPath.size() == 1 && !input.ref->input.isDirect()) { + std::ostringstream s; + printLiteralString(s, resolvedRef.to_string()); + warn( + "Flake input '%1%' uses the flake registry. " + "Using the registry in flake inputs is deprecated in Determinate Nix. " + "To make your flake future-proof, add the following to '%2%':\n" + "\n" + " inputs.%1%.url = %3%;\n" + "\n" + "For more information, see: https://github.com/DeterminateSystems/nix-src/issues/37", + inputAttrPathS, + flake.path, + s.str()); + } + }; + if (input.isFlake) { auto inputFlake = getInputFlake( *input.ref, inputIsOverride ? fetchers::UseRegistries::All : useRegistriesInputs); - auto childNode = - make_ref(inputFlake.lockedRef, ref, true, overriddenParentPath); + auto childNode = make_ref( + inputFlake.lockedRef, ref, true, input.buildTime, overriddenParentPath); node->inputs.insert_or_assign(id, childNode); @@ -747,6 +779,8 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, inputAttrPath, inputFlake.path, false); + + warnRegistry(inputFlake.resolvedRef); } else { @@ -758,16 +792,21 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, auto cachedInput = state.inputCache->getAccessor( state.fetchSettings, *state.store, input.ref->input, useRegistriesInputs); + auto resolvedRef = + FlakeRef(std::move(cachedInput.resolvedInput), input.ref->subdir); auto lockedRef = FlakeRef(std::move(cachedInput.lockedInput), input.ref->subdir); + warnRegistry(resolvedRef); + return { - state.storePath( - state.mountInput(lockedRef.input, input.ref->input, cachedInput.accessor)), + state.storePath(state.mountInput( + lockedRef.input, input.ref->input, cachedInput.accessor, true, true)), lockedRef}; } }(); - auto childNode = make_ref(lockedRef, ref, false, overriddenParentPath); + auto childNode = + make_ref(lockedRef, ref, false, input.buildTime, overriddenParentPath); nodePaths.emplace(childNode, path); @@ -877,13 +916,15 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, CanonPath((topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock"), newLockFileS, commitMessage); + + flake.lockFilePath().invalidateCache(); } /* Rewriting the lockfile changed the top-level repo, so we should re-read it. FIXME: we could also just clear the 'rev' field... */ auto prevLockedRef = flake.lockedRef; - flake = getFlake(state, topRef, useRegistriesTop); + flake = getFlake(state, topRef, useRegistriesTop, lockFlags.requireLockable); if (lockFlags.commitLockFile && flake.lockedRef.input.getRev() && prevLockedRef.input.getRev() != flake.lockedRef.input.getRev()) @@ -907,6 +948,23 @@ lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, } } +LockedFlake +lockFlake(const Settings & settings, EvalState & state, const FlakeRef & topRef, const LockFlags & lockFlags) +{ + auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries); + auto useRegistriesTop = useRegistries ? fetchers::UseRegistries::All : fetchers::UseRegistries::No; + + return lockFlake(settings, state, topRef, lockFlags, getFlake(state, topRef, useRegistriesTop, {}, false)); +} + +LockedFlake +lockFlake(const Settings & settings, EvalState & state, const SourcePath & flakeDir, const LockFlags & lockFlags) +{ + /* We need a fake flakeref to put in the `Flake` struct, but it's not used for anything. */ + auto fakeRef = parseFlakeRef(state.fetchSettings, "flake:get-flake"); + return lockFlake(settings, state, fakeRef, lockFlags, readFlake(state, fakeRef, fakeRef, fakeRef, flakeDir, {})); +} + static ref makeInternalFS() { auto internalFS = make_ref(MemorySourceAccessor{}); @@ -930,8 +988,6 @@ static Value * requireInternalFile(EvalState & state, CanonPath path) void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & vRes) { - experimentalFeatureSettings.require(Xp::Flakes); - auto [lockFileStr, keyMap] = lockedFlake.lockFile.to_string(); auto overrides = state.buildBindings(lockedFlake.nodePaths.size()); @@ -968,10 +1024,7 @@ void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & vRes) auto vLocks = state.allocValue(); vLocks->mkString(lockFileStr, state.mem); - auto vFetchFinalTree = get(state.internalPrimOps, "fetchFinalTree"); - assert(vFetchFinalTree); - - Value * args[] = {vLocks, &vOverrides, *vFetchFinalTree}; + Value * args[] = {vLocks, &vOverrides}; state.callFunction(*vCallFlake, args, vRes, noPos); } @@ -1002,41 +1055,6 @@ std::optional LockedFlake::getFingerprint(Store & store, const fetc Flake::~Flake() {} -ref openEvalCache(EvalState & state, ref lockedFlake) -{ - auto fingerprint = state.settings.useEvalCache && state.settings.pureEval - ? lockedFlake->getFingerprint(*state.store, state.fetchSettings) - : std::nullopt; - auto rootLoader = [&state, lockedFlake]() { - /* For testing whether the evaluation cache is - complete. */ - if (getEnv("NIX_ALLOW_EVAL").value_or("1") == "0") - throw Error("not everything is cached, but evaluation is not allowed"); - - auto vFlake = state.allocValue(); - callFlake(state, *lockedFlake, *vFlake); - - state.forceAttrs(*vFlake, noPos, "while parsing cached flake data"); - - auto aOutputs = vFlake->attrs()->get(state.symbols.create("outputs")); - assert(aOutputs); - - return aOutputs->value; - }; - - if (fingerprint) { - auto search = state.evalCaches.find(fingerprint.value()); - if (search == state.evalCaches.end()) { - search = state.evalCaches - .emplace(fingerprint.value(), make_ref(fingerprint, state, rootLoader)) - .first; - } - return search->second; - } else { - return make_ref(std::nullopt, state, rootLoader); - } -} - } // namespace flake } // namespace nix diff --git a/src/libflake/flakeref.cc b/src/libflake/flakeref.cc index 0a55ac35cf3a..d186db8ac858 100644 --- a/src/libflake/flakeref.cc +++ b/src/libflake/flakeref.cc @@ -26,6 +26,7 @@ #include "nix/store/outputs-spec.hh" #include "nix/util/ref.hh" #include "nix/util/types.hh" +#include "nix/fetchers/fetch-settings.hh" namespace nix { class Store; @@ -42,12 +43,12 @@ const static std::string subDirElemRegex = "(?:[a-zA-Z0-9_-]+[a-zA-Z0-9._-]*)"; const static std::string subDirRegex = subDirElemRegex + "(?:/" + subDirElemRegex + ")*"; #endif -std::string FlakeRef::to_string() const +std::string FlakeRef::to_string(bool abbreviate) const { StringMap extraQuery; if (subdir != "") extraQuery.insert_or_assign("dir", subdir); - return input.toURLString(extraQuery); + return input.toURLString(extraQuery, abbreviate); } fetchers::Attrs FlakeRef::toAttrs() const @@ -90,7 +91,8 @@ static std::pair fromParsedURL(const fetchers::Settings & fetchSettings, ParsedURL && parsedURL, bool isFlake) { auto dir = getOr(parsedURL.query, "dir", ""); - parsedURL.query.erase("dir"); + if (!fetchSettings.nix219Compat) + parsedURL.query.erase("dir"); std::string fragment; std::swap(fragment, parsedURL.fragment); diff --git a/src/libflake/include/nix/flake/flake.hh b/src/libflake/include/nix/flake/flake.hh index c2d597ac15ec..301db4f60137 100644 --- a/src/libflake/include/nix/flake/flake.hh +++ b/src/libflake/include/nix/flake/flake.hh @@ -10,6 +10,7 @@ namespace nix { class EvalState; +struct Provenance; namespace flake { @@ -44,12 +45,18 @@ typedef std::map FlakeInputs; struct FlakeInput { std::optional ref; + /** - * true = process flake to get outputs - * - * false = (fetched) static source path + * Whether to call the `flake.nix` file in this input to get its outputs. */ bool isFlake = true; + + /** + * Whether to fetch this input at evaluation time or at build + * time. + */ + bool buildTime = false; + std::optional follows; FlakeInputs overrides; }; @@ -88,6 +95,11 @@ struct Flake */ SourcePath path; + /** + * Cached provenance of `flake.nix` (equivalent to `path.getProvenance()`). + */ + std::shared_ptr provenance; + /** * Pretend that `lockedRef` is dirty. */ @@ -116,7 +128,8 @@ struct Flake } }; -Flake getFlake(EvalState & state, const FlakeRef & flakeRef, fetchers::UseRegistries useRegistries); +Flake getFlake( + EvalState & state, const FlakeRef & flakeRef, fetchers::UseRegistries useRegistries, bool requireLockable = true); /** * Fingerprint of a locked flake; used as a cache key. @@ -212,17 +225,39 @@ struct LockFlags * for those inputs will be ignored. */ std::set inputUpdates; + + /** + * Whether to require a locked input. + */ + bool requireLockable = true; }; +/** + * Return a `Flake` object representing the flake read from the + * `flake.nix` file in `rootDir`. + */ +Flake readFlake( + EvalState & state, + const FlakeRef & originalRef, + const FlakeRef & resolvedRef, + const FlakeRef & lockedRef, + const SourcePath & rootDir, + const InputAttrPath & lockRootPath); + +/* + * Compute an in-memory lock file for the specified top-level flake, and optionally write it to file, if the flake is + * writable. + */ LockedFlake lockFlake(const Settings & settings, EvalState & state, const FlakeRef & flakeRef, const LockFlags & lockFlags); -void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & v); +LockedFlake lockFlake( + const Settings & settings, EvalState & state, const FlakeRef & topRef, const LockFlags & lockFlags, Flake flake); -/** - * Open an evaluation cache for a flake. - */ -ref openEvalCache(EvalState & state, ref lockedFlake); +LockedFlake +lockFlake(const Settings & settings, EvalState & state, const SourcePath & flakeDir, const LockFlags & lockFlags); + +void callFlake(EvalState & state, const LockedFlake & lockedFlake, Value & v); } // namespace flake @@ -234,11 +269,4 @@ void emitTreeAttrs( bool emptyRevFallback = false, bool forceDirty = false); -/** - * An internal builtin similar to `fetchTree`, except that it - * always treats the input as final (i.e. no attributes can be - * added/removed/changed). - */ -void prim_fetchFinalTree(EvalState & state, const PosIdx pos, Value ** args, Value & v); - } // namespace nix diff --git a/src/libflake/include/nix/flake/flakeref.hh b/src/libflake/include/nix/flake/flakeref.hh index 05c21bd1c6d2..629afab03b5a 100644 --- a/src/libflake/include/nix/flake/flakeref.hh +++ b/src/libflake/include/nix/flake/flakeref.hh @@ -66,8 +66,7 @@ struct FlakeRef { } - // FIXME: change to operator <<. - std::string to_string() const; + std::string to_string(bool abbreviate = false) const; fetchers::Attrs toAttrs() const; diff --git a/src/libflake/include/nix/flake/lockfile.hh b/src/libflake/include/nix/flake/lockfile.hh index c5740a2f114b..1ca7cc3dd305 100644 --- a/src/libflake/include/nix/flake/lockfile.hh +++ b/src/libflake/include/nix/flake/lockfile.hh @@ -37,6 +37,7 @@ struct LockedNode : Node { FlakeRef lockedRef, originalRef; bool isFlake = true; + bool buildTime = false; /* The node relative to which relative source paths (e.g. 'path:../foo') are interpreted. */ @@ -46,10 +47,12 @@ struct LockedNode : Node const FlakeRef & lockedRef, const FlakeRef & originalRef, bool isFlake = true, + bool buildTime = false, std::optional parentInputAttrPath = {}) : lockedRef(std::move(lockedRef)) , originalRef(std::move(originalRef)) , isFlake(isFlake) + , buildTime(buildTime) , parentInputAttrPath(std::move(parentInputAttrPath)) { } diff --git a/src/libflake/include/nix/flake/meson.build b/src/libflake/include/nix/flake/meson.build index fc580164eaec..fbe54f41208b 100644 --- a/src/libflake/include/nix/flake/meson.build +++ b/src/libflake/include/nix/flake/meson.build @@ -6,6 +6,7 @@ headers = files( 'flake.hh', 'flakeref.hh', 'lockfile.hh', + 'provenance.hh', 'settings.hh', 'url-name.hh', ) diff --git a/src/libflake/include/nix/flake/provenance.hh b/src/libflake/include/nix/flake/provenance.hh new file mode 100644 index 000000000000..011744f5e65d --- /dev/null +++ b/src/libflake/include/nix/flake/provenance.hh @@ -0,0 +1,21 @@ +#pragma once + +#include "nix/util/provenance.hh" + +namespace nix { + +struct FlakeProvenance : Provenance +{ + std::shared_ptr next; + std::string flakeOutput; + bool pure = true; + + FlakeProvenance(std::shared_ptr next, std::string flakeOutput, bool pure) + : next(std::move(next)) + , flakeOutput(std::move(flakeOutput)) + , pure(pure) {}; + + nlohmann::json to_json() const override; +}; + +} // namespace nix diff --git a/src/libflake/include/nix/flake/settings.hh b/src/libflake/include/nix/flake/settings.hh index 7187a3294a34..05b36f5b779c 100644 --- a/src/libflake/include/nix/flake/settings.hh +++ b/src/libflake/include/nix/flake/settings.hh @@ -21,13 +21,7 @@ struct Settings : public Config void configureEvalSettings(nix::EvalSettings & evalSettings) const; Setting useRegistries{ - this, - true, - "use-registries", - "Whether to use flake registries to resolve flake references.", - {}, - true, - Xp::Flakes}; + this, true, "use-registries", "Whether to use flake registries to resolve flake references.", {}, true}; Setting acceptFlakeConfig{ this, @@ -35,8 +29,7 @@ struct Settings : public Config "accept-flake-config", "Whether to accept Nix configuration settings from a flake without prompting.", {}, - true, - Xp::Flakes}; + true}; Setting commitLockFileSummary{ this, @@ -47,8 +40,7 @@ struct Settings : public Config empty, the summary is generated based on the action performed. )", {"commit-lockfile-summary"}, - true, - Xp::Flakes}; + true}; }; } // namespace nix::flake diff --git a/src/libflake/lockfile.cc b/src/libflake/lockfile.cc index f2914feab784..b287db5b8e59 100644 --- a/src/libflake/lockfile.cc +++ b/src/libflake/lockfile.cc @@ -71,6 +71,7 @@ LockedNode::LockedNode(const fetchers::Settings & fetchSettings, const nlohmann: : lockedRef(getFlakeRef(fetchSettings, json, "locked", "info")) // FIXME: remove "info" , originalRef(getFlakeRef(fetchSettings, json, "original", nullptr)) , isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true) + , buildTime(json.find("buildTime") != json.end() ? (bool) json["buildTime"] : false) , parentInputAttrPath( json.find("parent") != json.end() ? (std::optional) json["parent"] : std::nullopt) { @@ -229,13 +230,11 @@ std::pair LockFile::toJSON() const if (auto lockedNode = node.dynamic_pointer_cast()) { n["original"] = fetchers::attrsToJSON(lockedNode->originalRef.toAttrs()); n["locked"] = fetchers::attrsToJSON(lockedNode->lockedRef.toAttrs()); - /* For backward compatibility, omit the "__final" - attribute. We never allow non-final inputs in lock files - anyway. */ assert(lockedNode->lockedRef.input.isFinal() || lockedNode->lockedRef.input.isRelative()); - n["locked"].erase("__final"); if (!lockedNode->isFlake) n["flake"] = false; + if (lockedNode->buildTime) + n["buildTime"] = true; if (lockedNode->parentInputAttrPath) n["parent"] = *lockedNode->parentInputAttrPath; } @@ -339,7 +338,7 @@ std::map LockFile::getAllInputs() const static std::string describe(const FlakeRef & flakeRef) { - auto s = fmt("'%s'", flakeRef.to_string()); + auto s = fmt("'%s'", flakeRef.to_string(true)); if (auto lastModified = flakeRef.input.getLastModified()) s += fmt(" (%s)", std::put_time(std::gmtime(&*lastModified), "%Y-%m-%d")); diff --git a/src/libflake/meson.build b/src/libflake/meson.build index 58916ecd9ab2..516ef7ff3383 100644 --- a/src/libflake/meson.build +++ b/src/libflake/meson.build @@ -45,6 +45,7 @@ sources = files( 'flake.cc', 'flakeref.cc', 'lockfile.cc', + 'provenance.cc', 'settings.cc', 'url-name.cc', ) @@ -62,7 +63,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libflake/package.nix b/src/libflake/package.nix index dd442a44ec9a..2b0c827a09ce 100644 --- a/src/libflake/package.nix +++ b/src/libflake/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-flake"; + pname = "determinate-nix-flake"; inherit version; workDir = ./.; diff --git a/src/libflake/provenance.cc b/src/libflake/provenance.cc new file mode 100644 index 000000000000..c80c4154561a --- /dev/null +++ b/src/libflake/provenance.cc @@ -0,0 +1,28 @@ +#include "nix/flake/provenance.hh" +#include "nix/util/json-utils.hh" + +#include + +namespace nix { + +nlohmann::json FlakeProvenance::to_json() const +{ + return nlohmann::json{ + {"type", "flake"}, + {"next", next ? next->to_json() : nlohmann::json(nullptr)}, + {"flakeOutput", flakeOutput}, + {"pure", pure}}; +} + +Provenance::Register registerFlakeProvenance("flake", [](nlohmann::json json) { + auto & obj = getObject(json); + std::shared_ptr next; + if (auto p = optionalValueAt(obj, "next"); p && !p->is_null()) + next = Provenance::from_json(*p); + bool pure = true; + if (auto p = optionalValueAt(obj, "pure")) + pure = getBoolean(*p); + return make_ref(next, getString(valueAt(obj, "flakeOutput")), pure); +}); + +} // namespace nix diff --git a/src/libmain-c/meson.build b/src/libmain-c/meson.build index 36332fdb70a1..dd02c20a4c67 100644 --- a/src/libmain-c/meson.build +++ b/src/libmain-c/meson.build @@ -49,7 +49,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libmain-c/package.nix b/src/libmain-c/package.nix index f019a917d360..17858d56f2e5 100644 --- a/src/libmain-c/package.nix +++ b/src/libmain-c/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-main-c"; + pname = "determinate-nix-main-c"; inherit version; workDir = ./.; diff --git a/src/libmain/include/nix/main/shared.hh b/src/libmain/include/nix/main/shared.hh index 43069ba82bda..800018290f69 100644 --- a/src/libmain/include/nix/main/shared.hh +++ b/src/libmain/include/nix/main/shared.hh @@ -29,6 +29,8 @@ void parseCmdLine( const Strings & args, std::function parseArg); +std::string version(); + void printVersion(const std::string & programName); /** diff --git a/src/libmain/meson.build b/src/libmain/meson.build index 2ac59924e592..d9d5e9362d5e 100644 --- a/src/libmain/meson.build +++ b/src/libmain/meson.build @@ -81,7 +81,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libmain/package.nix b/src/libmain/package.nix index 7b0a4dee7dad..119e1f1aca59 100644 --- a/src/libmain/package.nix +++ b/src/libmain/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-main"; + pname = "determinate-nix-main"; inherit version; workDir = ./.; diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index a973102f9509..05fd89827869 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -51,6 +51,7 @@ class ProgressBar : public Logger ActivityId parent; std::optional name; std::chrono::time_point startTime; + bool logged = false; }; struct ActivitiesByType @@ -142,8 +143,14 @@ class ProgressBar : public Logger return; } - if (state->active) + if (state->active) { writeToStderr("\r\e[K"); + /* Show activities that were previously only shown on the + progress bar. Otherwise the user won't know what's + happening. */ + for (auto & act : state->activities) + logActivity(*state, lvlNotice, act); + } } void resume() override @@ -196,6 +203,14 @@ class ProgressBar : public Logger } } + void logActivity(State & state, Verbosity lvl, ActInfo & act) + { + if (!act.logged && lvl <= verbosity && !act.s.empty() && act.type != actBuildWaiting) { + log(state, lvl, act.s + "..."); + act.logged = true; + } + } + void startActivity( ActivityId act, Verbosity lvl, @@ -206,15 +221,14 @@ class ProgressBar : public Logger { auto state(state_.lock()); - if (lvl <= verbosity && !s.empty() && type != actBuildWaiting) - log(*state, lvl, s + "..."); - state->activities.emplace_back( ActInfo{.s = s, .type = type, .parent = parent, .startTime = std::chrono::steady_clock::now()}); auto i = std::prev(state->activities.end()); state->its.emplace(act, i); state->activitiesByType[type].its.emplace(act, i); + logActivity(*state, lvl, *i); + if (type == actBuild) { std::string name(storePathToName(getS(fields, 0))); if (hasSuffix(name, ".drv")) @@ -456,11 +470,7 @@ class ProgressBar : public Logger } } - auto width = getWindowSize().second; - if (width <= 0) - width = std::numeric_limits::max(); - - redraw("\r" + filterANSIEscapes(line, false, width) + ANSI_NORMAL + "\e[K"); + redraw("\r" + filterANSIEscapes(line, false, getWindowWidth()) + ANSI_NORMAL + "\e[K"); return nextWakeup; } diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index ad1caae2b644..815c60cf82f3 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #ifdef __linux__ @@ -116,6 +117,26 @@ std::string getArg(const std::string & opt, Strings::iterator & i, const Strings static void sigHandler(int signo) {} #endif +/** + * Increase the open file soft limit to the hard limit. On some + * platforms (macOS), the default soft limit is very low, but the hard + * limit is high. So let's just raise it the maximum permitted. + */ +void bumpFileLimit() +{ +#ifndef _WIN32 + struct rlimit limit; + if (getrlimit(RLIMIT_NOFILE, &limit) != 0) + return; + + if (limit.rlim_cur < limit.rlim_max) { + limit.rlim_cur = limit.rlim_max; + // Ignore errors, this is best effort. + setrlimit(RLIMIT_NOFILE, &limit); + } +#endif +} + void initNix(bool loadConfig) { /* Turn on buffering for cerr. */ @@ -183,6 +204,8 @@ void initNix(bool loadConfig) now. In particular, store objects should be readable by everybody. */ umask(0022); + + bumpFileLimit(); } LegacyArgs::LegacyArgs( @@ -292,9 +315,14 @@ void parseCmdLine( LegacyArgs(programName, parseArg).parseCmdline(args); } +std::string version() +{ + return fmt("(Determinate Nix %s) %s", determinateNixVersion, nixVersion); +} + void printVersion(const std::string & programName) { - std::cout << fmt("%1% (Nix) %2%", programName, nixVersion) << std::endl; + std::cout << fmt("%s %s", programName, version()) << std::endl; if (verbosity > lvlInfo) { Strings cfg; #if NIX_USE_BOEHMGC @@ -326,7 +354,7 @@ int handleExceptions(const std::string & programName, std::function fun) return e.status; } catch (UsageError & e) { logError(e.info()); - printError("Try '%1% --help' for more information.", programName); + printError("\nTry '%1% --help' for more information.", programName); return 1; } catch (BaseError & e) { logError(e.info()); diff --git a/src/libstore-c/meson.build b/src/libstore-c/meson.build index c81235bf16d4..fd5d4990efba 100644 --- a/src/libstore-c/meson.build +++ b/src/libstore-c/meson.build @@ -52,7 +52,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libstore-c/nix_api_store.cc b/src/libstore-c/nix_api_store.cc index 4f71d0a3caed..4133d769f218 100644 --- a/src/libstore-c/nix_api_store.cc +++ b/src/libstore-c/nix_api_store.cc @@ -290,6 +290,31 @@ nix_derivation * nix_derivation_from_json(nix_c_context * context, Store * store NIXC_CATCH_ERRS_NULL } +nix_err nix_derivation_make_outputs( + nix_c_context * context, + Store * store, + const char * json, + void (*callback)(void * userdata, const char * output_name, const char * path), + void * userdata) +{ + if (context) + context->last_err_code = NIX_OK; + try { + auto drv = nix::Derivation::parseJsonAndValidate(*store->ptr, nlohmann::json::parse(json)); + auto hashesModulo = hashDerivationModulo(*store->ptr, drv, true); + + for (auto & output : drv.outputs) { + nix::Hash h = hashesModulo.hashes.at(output.first); + auto outPath = store->ptr->makeOutputPath(output.first, h, drv.name); + + if (callback) { + callback(userdata, output.first.c_str(), store->ptr->printStorePath(outPath).c_str()); + } + } + } + NIXC_CATCH_ERRS +} + nix_err nix_derivation_to_json( nix_c_context * context, const nix_derivation * drv, nix_get_string_callback callback, void * userdata) { @@ -338,4 +363,132 @@ nix_derivation * nix_store_drv_from_store_path(nix_c_context * context, Store * NIXC_CATCH_ERRS_NULL } +nix_err nix_store_drv_from_path( + nix_c_context * context, + Store * store, + const StorePath * path, + void (*callback)(void * userdata, const nix_derivation * drv), + void * userdata) +{ + if (context) + context->last_err_code = NIX_OK; + try { + nix::Derivation drv = store->ptr->derivationFromPath(path->path); + if (callback) { + const nix_derivation tmp{drv, store}; + callback(userdata, &tmp); + } + } + NIXC_CATCH_ERRS +} + +nix_err nix_store_query_path_info( + nix_c_context * context, + Store * store, + const StorePath * store_path, + void * userdata, + nix_get_string_callback callback) +{ + if (context) + context->last_err_code = NIX_OK; + try { + auto info = store->ptr->queryPathInfo(store_path->path); + if (callback) { + auto result = info->toJSON(&store->ptr->config, true, nix::PathInfoJsonFormat::V1).dump(); + callback(result.data(), result.size(), userdata); + } + } + NIXC_CATCH_ERRS +} + +nix_err nix_store_build_paths( + nix_c_context * context, + Store * store, + const StorePath ** store_paths, + unsigned int num_store_paths, + void (*callback)(void * userdata, const char * path, const char * result), + void * userdata) +{ + if (context) + context->last_err_code = NIX_OK; + try { + std::vector derived_paths; + for (size_t i = 0; i < num_store_paths; i++) { + const StorePath * store_path = store_paths[i]; + derived_paths.push_back(nix::SingleDerivedPath::Opaque{store_path->path}); + } + + auto results = store->ptr->buildPathsWithResults(derived_paths); + for (auto & result : results) { + if (callback) + callback( + userdata, result.path.to_string(store->ptr->config).c_str(), nlohmann::json(result).dump().c_str()); + } + } + NIXC_CATCH_ERRS +} + +nix_err nix_derivation_get_outputs_and_optpaths( + nix_c_context * context, + const nix_derivation * drv, + const Store * store, + void (*callback)(void * userdata, const char * name, const StorePath * path), + void * userdata) +{ + if (context) + context->last_err_code = NIX_OK; + try { + auto value = drv->drv.outputsAndOptPaths(store->ptr->config); + if (callback) { + for (const auto & [name, result] : value) { + if (auto store_path = result.second) { + const StorePath tmp_path{*store_path}; + callback(userdata, name.c_str(), &tmp_path); + } else { + callback(userdata, name.c_str(), nullptr); + } + } + } + } + NIXC_CATCH_ERRS +} + +StorePath * nix_store_query_path_from_hash_part(nix_c_context * context, Store * store, const char * hash) +{ + if (context) + context->last_err_code = NIX_OK; + try { + std::optional s = store->ptr->queryPathFromHashPart(hash); + + if (!s.has_value()) { + return nullptr; + } + + return new StorePath{std::move(s.value())}; + } + NIXC_CATCH_ERRS_NULL +} + +nix_err nix_store_copy_path( + nix_c_context * context, Store * srcStore, Store * dstStore, const StorePath * path, bool repair, bool checkSigs) +{ + if (context) + context->last_err_code = NIX_OK; + try { + if (srcStore == nullptr) + return nix_set_err_msg(context, NIX_ERR_UNKNOWN, "Source store is null"); + + if (dstStore == nullptr) + return nix_set_err_msg(context, NIX_ERR_UNKNOWN, "Destination store is null"); + + if (path == nullptr) + return nix_set_err_msg(context, NIX_ERR_UNKNOWN, "Store path is null"); + + auto repairFlag = repair ? nix::RepairFlag::Repair : nix::RepairFlag::NoRepair; + auto checkSigsFlag = checkSigs ? nix::CheckSigsFlag::CheckSigs : nix::CheckSigsFlag::NoCheckSigs; + nix::copyStorePath(*srcStore->ptr, *dstStore->ptr, path->path, repairFlag, checkSigsFlag); + } + NIXC_CATCH_ERRS +} + } // extern "C" diff --git a/src/libstore-c/nix_api_store.h b/src/libstore-c/nix_api_store.h index 761fdf3c899d..964c4066154b 100644 --- a/src/libstore-c/nix_api_store.h +++ b/src/libstore-c/nix_api_store.h @@ -201,6 +201,22 @@ nix_store_get_version(nix_c_context * context, Store * store, nix_get_string_cal */ nix_derivation * nix_derivation_from_json(nix_c_context * context, Store * store, const char * json); +/** + * @brief Hashes the derivation and gives the output paths + * + * @param[in] context Optional, stores error information. + * @param[in] store nix store reference. + * @param[in] json JSON of the derivation as a string. + * @param[in] callback Called for every output to provide the output path. + * @param[in] userdata User data to pass to the callback. + */ +nix_err nix_derivation_make_outputs( + nix_c_context * context, + Store * store, + const char * json, + void (*callback)(void * userdata, const char * output_name, const char * path), + void * userdata); + /** * @brief Add the given `nix_derivation` to the given store * @@ -259,6 +275,112 @@ nix_err nix_store_get_fs_closure( */ nix_derivation * nix_store_drv_from_store_path(nix_c_context * context, Store * store, const StorePath * path); +/** + * @note The callback borrows the Derivation only for the duration of the call. + * + * @param[out] context Optional, stores error information + * @param[in] store The nix store + * @param[in] path The nix store path + * @param[in] callback The callback to call + * @param[in] userdata The userdata to pass to the callback + */ +nix_err nix_store_drv_from_path( + nix_c_context * context, + Store * store, + const StorePath * path, + void (*callback)(void * userdata, const nix_derivation * drv), + void * userdata); + +/** + * @brief Queries for the nix store path info. + * + * @param[out] context Optional, stores error information + * @param[in] store nix store reference + * @param[in] store_path A store path + * @param[in] userdata The data to pass to the callback + * @param[in] callback Called for when the path info is resolved + */ +nix_err nix_store_query_path_info( + nix_c_context * context, + Store * store, + const StorePath * store_path, + void * userdata, + nix_get_string_callback callback); + +/** + * @brief Builds the paths, if they are a derivation then they get built. + * + * @note Path and result for the callback only exist for the lifetime of + * the call. Result is a string containing the build result in JSON. + * + * @param[out] context Optional, stores error information + * @param[in] store nix store reference + * @param[in] store_paths Pointer to list of nix store paths + * @param[in] num_store_paths Number of nix store paths + * @param[in] callback The callback to trigger for build results + * @param[in] userdata User data to pass to the callback + */ +nix_err nix_store_build_paths( + nix_c_context * context, + Store * store, + const StorePath ** store_paths, + unsigned int num_store_paths, + void (*callback)(void * userdata, const char * path, const char * result), + void * userdata); + +/** + * @brief Iterate and get all of the store paths for each output. + * + * @note The callback borrows the StorePath only for the duration of the call. + * + * @param[out] context Optional, stores error information + * @param[in] drv The derivation + * @param[in] store The nix store + * @param[in] callback The function to call on every output and store path + * @param[in] userdata The userdata to pass to the callback + */ +nix_err nix_derivation_get_outputs_and_optpaths( + nix_c_context * context, + const nix_derivation * drv, + const Store * store, + void (*callback)(void * userdata, const char * name, const StorePath * path), + void * userdata); + +/** + * @brief Gets the derivation as a JSON string + * + * @param[out] context Optional, stores error information + * @param[in] drv The derivation + * @param[in] callback Called with the JSON string + * @param[in] userdata Arbitrary data passed to the callback + */ +nix_err nix_derivation_to_json( + nix_c_context * context, const nix_derivation * drv, nix_get_string_callback callback, void * userdata); + +/** + * @brief Query the full store path given the hash part of a valid store + * path, or empty if no matching path is found. + * + * @param[out] context Optional, stores error information + * @param[in] store nix store reference + * @param[in] hash Hash part of path as a string + * @return Store path reference, NULL if no matching path is found. + */ +StorePath * nix_store_query_path_from_hash_part(nix_c_context * context, Store * store, const char * hash); + +/** + * @brief Copy a path from one store to another. + * + * @param[out] context Optional, stores error information + * @param[in] srcStore nix source store reference + * @param[in] dstStore nix destination store reference + * @param[in] path The path to copy + * @param[in] repair Whether to repair the path + * @param[in] checkSigs Whether to check path signatures are trusted before copying + */ +nix_err nix_store_copy_path( + nix_c_context * context, Store * srcStore, Store * dstStore, const StorePath * path, bool repair, bool checkSigs); + // cffi end #ifdef __cplusplus } diff --git a/src/libstore-c/nix_api_store_internal.h b/src/libstore-c/nix_api_store_internal.h index 712d96488a57..0199628da8a9 100644 --- a/src/libstore-c/nix_api_store_internal.h +++ b/src/libstore-c/nix_api_store_internal.h @@ -18,6 +18,7 @@ struct StorePath struct nix_derivation { nix::Derivation drv; + Store * store; }; } // extern "C" diff --git a/src/libstore-c/package.nix b/src/libstore-c/package.nix index fde17c78e017..0ce37e44c012 100644 --- a/src/libstore-c/package.nix +++ b/src/libstore-c/package.nix @@ -15,7 +15,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-store-c"; + pname = "determinate-nix-store-c"; inherit version; workDir = ./.; diff --git a/src/libstore-test-support/meson.build b/src/libstore-test-support/meson.build index 8617225d743f..d3d925d26848 100644 --- a/src/libstore-test-support/meson.build +++ b/src/libstore-test-support/meson.build @@ -51,7 +51,7 @@ this_library = library( # TODO: Remove `-lrapidcheck` when https://github.com/emil-e/rapidcheck/pull/326 # is available. See also ../libutil/build.meson link_args : linker_export_flags + [ '-lrapidcheck' ], - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libstore-test-support/package.nix b/src/libstore-test-support/package.nix index 391ddeefda26..2561dd791eb7 100644 --- a/src/libstore-test-support/package.nix +++ b/src/libstore-test-support/package.nix @@ -18,7 +18,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-store-test-support"; + pname = "determinate-nix-store-test-support"; inherit version; workDir = ./.; diff --git a/src/libstore-tests/nix_api_store.cc b/src/libstore-tests/nix_api_store.cc index ea600f905704..0f6c795765d2 100644 --- a/src/libstore-tests/nix_api_store.cc +++ b/src/libstore-tests/nix_api_store.cc @@ -955,4 +955,55 @@ TEST_F(nix_api_store_test, nix_derivation_clone) nix_derivation_free(drv2); } +TEST_F(nix_api_store_test, nix_store_build_paths) +{ + nix::experimentalFeatureSettings.set("extra-experimental-features", "ca-derivations"); + nix::settings.substituters = {}; + + auto * store = open_local_store(); + + std::filesystem::path unitTestData{getenv("_NIX_TEST_UNIT_DATA")}; + std::ifstream t{unitTestData / "derivation/ca/self-contained.json"}; + std::stringstream buffer; + buffer << t.rdbuf(); + + // Replace the hardcoded system with the current system + std::string jsonStr = nix::replaceStrings(buffer.str(), "x86_64-linux", nix::settings.thisSystem.get()); + + auto * drv = nix_derivation_from_json(ctx, store, jsonStr.c_str()); + assert_ctx_ok(); + ASSERT_NE(drv, nullptr); + + auto * drvPath = nix_add_derivation(ctx, store, drv); + assert_ctx_ok(); + ASSERT_NE(drv, nullptr); + + // Realise the derivation - capture the order outputs are returned + std::map outputs; + std::vector output_order; + auto cb = LambdaAdapter{.fun = [&](const char * path, const char * result) { + ASSERT_NE(path, nullptr); + ASSERT_NE(result, nullptr); + output_order.push_back(path); + outputs.emplace(path, result); + }}; + + std::vector paths = {drvPath}; + + auto ret = nix_store_build_paths( + ctx, + store, + const_cast(paths.data()), + paths.size(), + decltype(cb)::call_void, + static_cast(&cb)); + assert_ctx_ok(); + ASSERT_EQ(ret, NIX_OK); + ASSERT_EQ(outputs.size(), 1); + + nix_store_path_free(drvPath); + nix_derivation_free(drv); + nix_store_free(store); +} + } // namespace nixC diff --git a/src/libstore-tests/s3-url.cc b/src/libstore-tests/s3-url.cc index 9fa625fd6c7d..cd68b0437a2c 100644 --- a/src/libstore-tests/s3-url.cc +++ b/src/libstore-tests/s3-url.cc @@ -104,6 +104,33 @@ INSTANTIATE_TEST_SUITE_P( }, }, "with_absolute_endpoint_uri", + }, + ParsedS3URLTestCase{ + "s3://bucket/key?addressing-style=virtual", + { + .bucket = "bucket", + .key = {"key"}, + .addressingStyle = S3AddressingStyle::Virtual, + }, + "with_addressing_style_virtual", + }, + ParsedS3URLTestCase{ + "s3://bucket/key?addressing-style=path", + { + .bucket = "bucket", + .key = {"key"}, + .addressingStyle = S3AddressingStyle::Path, + }, + "with_addressing_style_path", + }, + ParsedS3URLTestCase{ + "s3://bucket/key?addressing-style=auto", + { + .bucket = "bucket", + .key = {"key"}, + .addressingStyle = S3AddressingStyle::Auto, + }, + "with_addressing_style_auto", }), [](const ::testing::TestParamInfo & info) { return info.param.description; }); @@ -138,6 +165,26 @@ INSTANTIATE_TEST_SUITE_P( InvalidS3URLTestCase{"s3://bucket", "error: URI has a missing or invalid key", "missing_key"}), [](const ::testing::TestParamInfo & info) { return info.param.description; }); +TEST(ParsedS3URLTest, invalidAddressingStyleThrows) +{ + ASSERT_THROW(ParsedS3URL::parse(parseURL("s3://bucket/key?addressing-style=bogus")), InvalidS3AddressingStyle); +} + +TEST(ParsedS3URLTest, virtualStyleWithAuthoritylessEndpointThrows) +{ + ParsedS3URL input{ + .bucket = "bucket", + .key = {"key"}, + .addressingStyle = S3AddressingStyle::Virtual, + .endpoint = + ParsedURL{ + .scheme = "file", + .path = {"", "some", "path"}, + }, + }; + ASSERT_THROW(input.toHttpsUrl(), nix::Error); +} + // ============================================================================= // S3 URL to HTTPS Conversion Tests // ============================================================================= @@ -166,6 +213,7 @@ INSTANTIATE_TEST_SUITE_P( S3ToHttpsConversion, S3ToHttpsConversionTest, ::testing::Values( + // Default (auto) addressing style: virtual-hosted for standard AWS endpoints S3ToHttpsConversionTestCase{ ParsedS3URL{ .bucket = "my-bucket", @@ -173,10 +221,10 @@ INSTANTIATE_TEST_SUITE_P( }, ParsedURL{ .scheme = "https", - .authority = ParsedURL::Authority{.host = "s3.us-east-1.amazonaws.com"}, - .path = {"", "my-bucket", "my-key.txt"}, + .authority = ParsedURL::Authority{.host = "my-bucket.s3.us-east-1.amazonaws.com"}, + .path = {"", "my-key.txt"}, }, - "https://s3.us-east-1.amazonaws.com/my-bucket/my-key.txt", + "https://my-bucket.s3.us-east-1.amazonaws.com/my-key.txt", "basic_s3_default_region", }, S3ToHttpsConversionTestCase{ @@ -187,12 +235,13 @@ INSTANTIATE_TEST_SUITE_P( }, ParsedURL{ .scheme = "https", - .authority = ParsedURL::Authority{.host = "s3.eu-west-1.amazonaws.com"}, - .path = {"", "prod-cache", "nix", "store", "abc123.nar.xz"}, + .authority = ParsedURL::Authority{.host = "prod-cache.s3.eu-west-1.amazonaws.com"}, + .path = {"", "nix", "store", "abc123.nar.xz"}, }, - "https://s3.eu-west-1.amazonaws.com/prod-cache/nix/store/abc123.nar.xz", + "https://prod-cache.s3.eu-west-1.amazonaws.com/nix/store/abc123.nar.xz", "with_eu_west_1_region", }, + // Custom endpoint authority: path-style by default S3ToHttpsConversionTestCase{ ParsedS3URL{ .bucket = "bucket", @@ -208,6 +257,7 @@ INSTANTIATE_TEST_SUITE_P( "http://custom.s3.com/bucket/key", "custom_endpoint_authority", }, + // Custom endpoint URL: path-style by default S3ToHttpsConversionTestCase{ ParsedS3URL{ .bucket = "bucket", @@ -236,10 +286,10 @@ INSTANTIATE_TEST_SUITE_P( }, ParsedURL{ .scheme = "https", - .authority = ParsedURL::Authority{.host = "s3.ap-southeast-2.amazonaws.com"}, - .path = {"", "bucket", "path", "to", "file.txt"}, + .authority = ParsedURL::Authority{.host = "bucket.s3.ap-southeast-2.amazonaws.com"}, + .path = {"", "path", "to", "file.txt"}, }, - "https://s3.ap-southeast-2.amazonaws.com/bucket/path/to/file.txt", + "https://bucket.s3.ap-southeast-2.amazonaws.com/path/to/file.txt", "complex_path_and_region", }, S3ToHttpsConversionTestCase{ @@ -250,11 +300,11 @@ INSTANTIATE_TEST_SUITE_P( }, ParsedURL{ .scheme = "https", - .authority = ParsedURL::Authority{.host = "s3.us-east-1.amazonaws.com"}, - .path = {"", "my-bucket", "my-key.txt"}, + .authority = ParsedURL::Authority{.host = "my-bucket.s3.us-east-1.amazonaws.com"}, + .path = {"", "my-key.txt"}, .query = {{"versionId", "abc123xyz"}}, }, - "https://s3.us-east-1.amazonaws.com/my-bucket/my-key.txt?versionId=abc123xyz", + "https://my-bucket.s3.us-east-1.amazonaws.com/my-key.txt?versionId=abc123xyz", "with_versionId", }, S3ToHttpsConversionTestCase{ @@ -266,13 +316,185 @@ INSTANTIATE_TEST_SUITE_P( }, ParsedURL{ .scheme = "https", - .authority = ParsedURL::Authority{.host = "s3.eu-west-1.amazonaws.com"}, - .path = {"", "versioned-bucket", "path", "to", "object"}, + .authority = ParsedURL::Authority{.host = "versioned-bucket.s3.eu-west-1.amazonaws.com"}, + .path = {"", "path", "to", "object"}, .query = {{"versionId", "version456"}}, }, - "https://s3.eu-west-1.amazonaws.com/versioned-bucket/path/to/object?versionId=version456", + "https://versioned-bucket.s3.eu-west-1.amazonaws.com/path/to/object?versionId=version456", "with_region_and_versionId", + }, + // Explicit addressing-style=path forces path-style on standard AWS endpoints + S3ToHttpsConversionTestCase{ + ParsedS3URL{ + .bucket = "my-bucket", + .key = {"my-key.txt"}, + .region = "us-west-2", + .addressingStyle = S3AddressingStyle::Path, + }, + ParsedURL{ + .scheme = "https", + .authority = ParsedURL::Authority{.host = "s3.us-west-2.amazonaws.com"}, + .path = {"", "my-bucket", "my-key.txt"}, + }, + "https://s3.us-west-2.amazonaws.com/my-bucket/my-key.txt", + "explicit_path_style", + }, + // Explicit addressing-style=virtual forces virtual-hosted-style on custom endpoints + S3ToHttpsConversionTestCase{ + ParsedS3URL{ + .bucket = "bucket", + .key = {"key"}, + .scheme = "http", + .addressingStyle = S3AddressingStyle::Virtual, + .endpoint = ParsedURL::Authority{.host = "custom.s3.com"}, + }, + ParsedURL{ + .scheme = "http", + .authority = ParsedURL::Authority{.host = "bucket.custom.s3.com"}, + .path = {"", "key"}, + }, + "http://bucket.custom.s3.com/key", + "explicit_virtual_style_custom_endpoint", + }, + // Explicit addressing-style=virtual with full endpoint URL + S3ToHttpsConversionTestCase{ + ParsedS3URL{ + .bucket = "bucket", + .key = {"key"}, + .addressingStyle = S3AddressingStyle::Virtual, + .endpoint = + ParsedURL{ + .scheme = "http", + .authority = ParsedURL::Authority{.host = "server", .port = 9000}, + .path = {""}, + }, + }, + ParsedURL{ + .scheme = "http", + .authority = ParsedURL::Authority{.host = "bucket.server", .port = 9000}, + .path = {"", "key"}, + }, + "http://bucket.server:9000/key", + "explicit_virtual_style_full_endpoint_url", + }, + // Dotted bucket names work normally with explicit path-style + S3ToHttpsConversionTestCase{ + ParsedS3URL{ + .bucket = "my.bucket", + .key = {"key"}, + .addressingStyle = S3AddressingStyle::Path, + }, + ParsedURL{ + .scheme = "https", + .authority = ParsedURL::Authority{.host = "s3.us-east-1.amazonaws.com"}, + .path = {"", "my.bucket", "key"}, + }, + "https://s3.us-east-1.amazonaws.com/my.bucket/key", + "dotted_bucket_with_path_style", + }, + // Dotted bucket names fall back to path-style with auto on standard AWS endpoints + S3ToHttpsConversionTestCase{ + ParsedS3URL{ + .bucket = "my.bucket.name", + .key = {"key"}, + }, + ParsedURL{ + .scheme = "https", + .authority = ParsedURL::Authority{.host = "s3.us-east-1.amazonaws.com"}, + .path = {"", "my.bucket.name", "key"}, + }, + "https://s3.us-east-1.amazonaws.com/my.bucket.name/key", + "dotted_bucket_with_auto_style_on_aws", + }, + // Dotted bucket names work with auto style on custom endpoints (auto = path-style) + S3ToHttpsConversionTestCase{ + ParsedS3URL{ + .bucket = "my.bucket", + .key = {"key"}, + .endpoint = ParsedURL::Authority{.host = "minio.local"}, + }, + ParsedURL{ + .scheme = "https", + .authority = ParsedURL::Authority{.host = "minio.local"}, + .path = {"", "my.bucket", "key"}, + }, + "https://minio.local/my.bucket/key", + "dotted_bucket_with_auto_style_custom_endpoint", }), [](const ::testing::TestParamInfo & info) { return info.param.description; }); +// ============================================================================= +// S3 URL to HTTPS Conversion Error Tests +// ============================================================================= + +struct S3ToHttpsConversionErrorTestCase +{ + ParsedS3URL input; + std::string description; +}; + +class S3ToHttpsConversionErrorTest : public ::testing::WithParamInterface, + public ::testing::Test +{}; + +TEST_P(S3ToHttpsConversionErrorTest, ThrowsOnConversion) +{ + auto & [input, description] = GetParam(); + ASSERT_THROW(input.toHttpsUrl(), nix::Error); +} + +INSTANTIATE_TEST_SUITE_P( + S3ToHttpsConversionErrors, + S3ToHttpsConversionErrorTest, + ::testing::Values( + S3ToHttpsConversionErrorTestCase{ + ParsedS3URL{ + .bucket = "bucket", + .key = {"key"}, + .addressingStyle = S3AddressingStyle::Virtual, + .endpoint = ParsedURL::Authority{.host = ""}, + }, + "virtual_style_with_empty_host_authority", + }, + S3ToHttpsConversionErrorTestCase{ + ParsedS3URL{ + .bucket = "my.bucket", + .key = {"key"}, + .addressingStyle = S3AddressingStyle::Virtual, + }, + "dotted_bucket_with_explicit_virtual_style", + }, + S3ToHttpsConversionErrorTestCase{ + ParsedS3URL{ + .bucket = "my.bucket.name", + .key = {"key"}, + .addressingStyle = S3AddressingStyle::Virtual, + }, + "dotted_bucket_with_explicit_virtual_style_multi_dot", + }, + S3ToHttpsConversionErrorTestCase{ + ParsedS3URL{ + .bucket = "my.bucket", + .key = {"key"}, + .addressingStyle = S3AddressingStyle::Virtual, + .endpoint = ParsedURL::Authority{.host = "minio.local"}, + }, + "dotted_bucket_with_explicit_virtual_style_custom_authority", + }, + S3ToHttpsConversionErrorTestCase{ + ParsedS3URL{ + .bucket = "my.bucket", + .key = {"key"}, + .addressingStyle = S3AddressingStyle::Virtual, + .endpoint = + ParsedURL{ + .scheme = "http", + .authority = ParsedURL::Authority{.host = "minio.local", .port = 9000}, + .path = {""}, + }, + }, + "dotted_bucket_with_explicit_virtual_style_full_endpoint_url", + }), + [](const ::testing::TestParamInfo & info) { return info.param.description; }); + } // namespace nix diff --git a/src/libstore/active-builds.cc b/src/libstore/active-builds.cc new file mode 100644 index 000000000000..838f188d8912 --- /dev/null +++ b/src/libstore/active-builds.cc @@ -0,0 +1,149 @@ +#include "nix/store/active-builds.hh" +#include "nix/util/json-utils.hh" + +#include + +#ifndef _WIN32 +# include +#endif + +namespace nix { + +UserInfo UserInfo::fromUid(uid_t uid) +{ + UserInfo info; + info.uid = uid; + +#ifndef _WIN32 + // Look up the user name for the UID (thread-safe) + struct passwd pwd; + struct passwd * result; + std::vector buf(16384); + if (getpwuid_r(uid, &pwd, buf.data(), buf.size(), &result) == 0 && result) + info.name = result->pw_name; +#endif + + return info; +} + +} // namespace nix + +namespace nlohmann { + +using namespace nix; + +UserInfo adl_serializer::from_json(const json & j) +{ + return UserInfo{ + .uid = j.at("uid").get(), + .name = j.contains("name") && !j.at("name").is_null() + ? std::optional(j.at("name").get()) + : std::nullopt, + }; +} + +void adl_serializer::to_json(json & j, const UserInfo & info) +{ + j = nlohmann::json{ + {"uid", info.uid}, + {"name", info.name}, + }; +} + +// Durations are serialized as floats representing seconds. +static std::optional parseDuration(const json & j, const char * key) +{ + if (j.contains(key) && !j.at(key).is_null()) + return std::chrono::duration_cast( + std::chrono::duration(j.at(key).get())); + else + return std::nullopt; +} + +static nlohmann::json printDuration(const std::optional & duration) +{ + return duration + ? nlohmann::json( + std::chrono::duration_cast>(*duration) + .count()) + : nullptr; +} + +ActiveBuildInfo::ProcessInfo adl_serializer::from_json(const json & j) +{ + return ActiveBuildInfo::ProcessInfo{ + .pid = j.at("pid").get(), + .parentPid = j.at("parentPid").get(), + .user = j.at("user").get(), + .argv = j.at("argv").get>(), + .utime = parseDuration(j, "utime"), + .stime = parseDuration(j, "stime"), + .cutime = parseDuration(j, "cutime"), + .cstime = parseDuration(j, "cstime"), + }; +} + +void adl_serializer::to_json(json & j, const ActiveBuildInfo::ProcessInfo & process) +{ + j = nlohmann::json{ + {"pid", process.pid}, + {"parentPid", process.parentPid}, + {"user", process.user}, + {"argv", process.argv}, + {"utime", printDuration(process.utime)}, + {"stime", printDuration(process.stime)}, + {"cutime", printDuration(process.cutime)}, + {"cstime", printDuration(process.cstime)}, + }; +} + +ActiveBuild adl_serializer::from_json(const json & j) +{ + auto type = j.at("type").get(); + if (type != "build") + throw Error("invalid active build JSON: expected type 'build' but got '%s'", type); + return ActiveBuild{ + .nixPid = j.at("nixPid").get(), + .clientPid = j.at("clientPid").get>(), + .clientUid = j.at("clientUid").get>(), + .mainPid = j.at("mainPid").get(), + .mainUser = j.at("mainUser").get(), + .cgroup = j.at("cgroup").get>(), + .startTime = (time_t) j.at("startTime").get(), + .derivation = StorePath{getString(j.at("derivation"))}, + }; +} + +void adl_serializer::to_json(json & j, const ActiveBuild & build) +{ + j = nlohmann::json{ + {"type", "build"}, + {"nixPid", build.nixPid}, + {"clientPid", build.clientPid}, + {"clientUid", build.clientUid}, + {"mainPid", build.mainPid}, + {"mainUser", build.mainUser}, + {"cgroup", build.cgroup}, + {"startTime", (double) build.startTime}, + {"derivation", build.derivation.to_string()}, + }; +} + +ActiveBuildInfo adl_serializer::from_json(const json & j) +{ + ActiveBuildInfo info(adl_serializer::from_json(j)); + info.processes = j.at("processes").get>(); + info.utime = parseDuration(j, "utime"); + info.stime = parseDuration(j, "stime"); + return info; +} + +void adl_serializer::to_json(json & j, const ActiveBuildInfo & build) +{ + adl_serializer::to_json(j, build); + j["processes"] = build.processes; + j["utime"] = printDuration(build.utime); + j["stime"] = printDuration(build.stime); +} + +} // namespace nlohmann diff --git a/src/libstore/async-path-writer.cc b/src/libstore/async-path-writer.cc new file mode 100644 index 000000000000..dce38b526478 --- /dev/null +++ b/src/libstore/async-path-writer.cc @@ -0,0 +1,183 @@ +#include "nix/store/async-path-writer.hh" +#include "nix/util/archive.hh" +#include "nix/util/provenance.hh" + +#include +#include + +namespace nix { + +struct AsyncPathWriterImpl : AsyncPathWriter +{ + ref store; + + struct Item + { + StorePath storePath; + std::string contents; + std::string name; + Hash hash; + StorePathSet references; + RepairFlag repair; + std::shared_ptr provenance; + std::promise promise; + }; + + struct State + { + std::vector items; + std::unordered_map> futures; + bool quit = false; + }; + + Sync state_; + + std::thread workerThread; + + std::condition_variable wakeupCV; + + AsyncPathWriterImpl(ref store) + : store(store) + { + workerThread = std::thread([&]() { + while (true) { + std::vector items; + + { + auto state(state_.lock()); + while (!state->quit && state->items.empty()) + state.wait(wakeupCV); + if (state->items.empty() && state->quit) + return; + std::swap(items, state->items); + } + + try { + writePaths(items); + for (auto & item : items) + item.promise.set_value(); + } catch (...) { + for (auto & item : items) + item.promise.set_exception(std::current_exception()); + } + } + }); + } + + virtual ~AsyncPathWriterImpl() + { + state_.lock()->quit = true; + wakeupCV.notify_all(); + workerThread.join(); + } + + StorePath addPath( + std::string contents, + std::string name, + StorePathSet references, + RepairFlag repair, + bool readOnly, + std::shared_ptr provenance) override + { + auto hash = hashString(HashAlgorithm::SHA256, contents); + + auto storePath = store->makeFixedOutputPathFromCA( + name, + TextInfo{ + .hash = hash, + .references = references, + }); + + if (!readOnly) { + auto state(state_.lock()); + std::promise promise; + state->futures.insert_or_assign(storePath, promise.get_future()); + state->items.push_back( + Item{ + .storePath = storePath, + .contents = std::move(contents), + .name = std::move(name), + .hash = hash, + .references = std::move(references), + .repair = repair, + .provenance = provenance, + .promise = std::move(promise), + }); + wakeupCV.notify_all(); + } + + return storePath; + } + + void waitForPath(const StorePath & path) override + { + auto future = ({ + auto state = state_.lock(); + auto i = state->futures.find(path); + if (i == state->futures.end()) + return; + i->second; + }); + future.get(); + } + + void waitForAllPaths() override + { + auto futures = ({ + auto state(state_.lock()); + std::move(state->futures); + }); + for (auto & future : futures) + future.second.get(); + } + + void writePaths(const std::vector & items) + { +// FIXME: addMultipeToStore() shouldn't require a NAR hash. +#if 0 + Store::PathsSource sources; + RepairFlag repair = NoRepair; + + for (auto & item : items) { + ValidPathInfo info{item.storePath, Hash(HashAlgorithm::SHA256)}; + info.references = item.references; + info.ca = ContentAddress { + .method = ContentAddressMethod::Raw::Text, + .hash = item.hash, + }; + if (item.repair) repair = item.repair; + auto source = sinkToSource([&](Sink & sink) + { + dumpString(item.contents, sink); + }); + sources.push_back({std::move(info), std::move(source)}); + } + + Activity act(*logger, lvlDebug, actUnknown, fmt("adding %d paths to the store", items.size())); + + store->addMultipleToStore(std::move(sources), act, repair); +#endif + + for (auto & item : items) { + StringSource source(item.contents); + store->addTempRoot(item.storePath); + auto storePath = store->addToStoreFromDump( + source, + item.storePath.name(), + FileSerialisationMethod::Flat, + ContentAddressMethod::Raw::Text, + HashAlgorithm::SHA256, + item.references, + item.repair, + item.provenance); + assert(storePath == item.storePath); + } + } +}; + +ref AsyncPathWriter::make(ref store) +{ + return make_ref(store); +} + +} // namespace nix diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 848669ae84f9..51ef2fc7faad 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -315,7 +315,8 @@ StorePath BinaryCacheStore::addToStoreFromDump( ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) + RepairFlag repair, + std::shared_ptr provenance) { std::optional caHash; std::string nar; @@ -378,6 +379,7 @@ StorePath BinaryCacheStore::addToStoreFromDump( }), nar.hash); info.narSize = nar.numBytesDigested; + info.provenance = provenance; return info; }) ->path; @@ -435,37 +437,41 @@ void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink) void BinaryCacheStore::queryPathInfoUncached( const StorePath & storePath, Callback> callback) noexcept { - auto uri = config.getReference().render(/*FIXME withParams=*/false); - auto storePathS = printStorePath(storePath); - auto act = std::make_shared( - *logger, - lvlTalkative, - actQueryPathInfo, - fmt("querying info about '%s' on '%s'", storePathS, uri), - Logger::Fields{storePathS, uri}); - PushActivity pact(act->id); - - auto narInfoFile = narInfoFileFor(storePath); - auto callbackPtr = std::make_shared(std::move(callback)); - getFile(narInfoFile, {[=, this](std::future> fut) { - try { - auto data = fut.get(); - - if (!data) - return (*callbackPtr)({}); - - stats.narInfoRead++; - - (*callbackPtr)( - (std::shared_ptr) std::make_shared(*this, *data, narInfoFile)); - - (void) act; // force Activity into this lambda to ensure it stays alive - } catch (...) { - callbackPtr->rethrow(); - } - }}); + try { + auto uri = config.getReference().render(/*FIXME withParams=*/false); + auto storePathS = printStorePath(storePath); + auto act = std::make_shared( + *logger, + lvlTalkative, + actQueryPathInfo, + fmt("querying info about '%s' on '%s'", storePathS, uri), + Logger::Fields{storePathS, uri}); + PushActivity pact(act->id); + + auto narInfoFile = narInfoFileFor(storePath); + + getFile(narInfoFile, {[=, this](std::future> fut) { + try { + auto data = fut.get(); + + if (!data) + return (*callbackPtr)({}); + + stats.narInfoRead++; + + (*callbackPtr)( + (std::shared_ptr) std::make_shared(*this, *data, narInfoFile)); + + (void) act; // force Activity into this lambda to ensure it stays alive + } catch (...) { + callbackPtr->rethrow(); + } + }}); + } catch (...) { + callbackPtr->rethrow(); + } } StorePath BinaryCacheStore::addToStore( @@ -503,6 +509,7 @@ StorePath BinaryCacheStore::addToStore( }), nar.hash); info.narSize = nar.numBytesDigested; + info.provenance = path.getProvenance(); return info; }) ->path; diff --git a/src/libstore/build-result.cc b/src/libstore/build-result.cc index f4bc8ab33532..19080e0f19ac 100644 --- a/src/libstore/build-result.cc +++ b/src/libstore/build-result.cc @@ -1,5 +1,7 @@ #include "nix/store/build-result.hh" #include "nix/util/json-utils.hh" +#include "nix/util/provenance.hh" + #include namespace nix { @@ -22,7 +24,7 @@ static constexpr std::array, 12> failureStatusStrings{{ +static constexpr std::array, 13> failureStatusStrings{{ #define ENUM_ENTRY(e) {BuildResult::Failure::e, #e} ENUM_ENTRY(PermanentFailure), ENUM_ENTRY(InputRejected), @@ -54,10 +56,11 @@ static constexpr std::array::to_json(json & res, const BuildResult & br) overloaded{ [&](const BuildResult::Success & success) { res["success"] = true; - res["status"] = successStatusToString(success.status); + res["status"] = BuildResult::Success::statusToString(success.status); res["builtOutputs"] = success.builtOutputs; + if (success.provenance) + res["provenance"] = success.provenance->to_json(); }, [&](const BuildResult::Failure & failure) { res["success"] = false; - res["status"] = failureStatusToString(failure.status); + res["status"] = BuildResult::Failure::statusToString(failure.status); res["errorMsg"] = failure.errorMsg; res["isNonDeterministic"] = failure.isNonDeterministic; + if (failure.provenance) + res["provenance"] = failure.provenance->to_json(); }, }, br.inner); @@ -137,16 +144,24 @@ BuildResult adl_serializer::from_json(const json & _json) bool success = getBoolean(valueAt(json, "success")); std::string statusStr = getString(valueAt(json, "status")); + auto provenanceFromJson = [](const nlohmann::json * j) -> std::shared_ptr { + if (j && !j->is_null()) + return Provenance::from_json(*j); + return nullptr; + }; + if (success) { BuildResult::Success s; s.status = successStatusFromString(statusStr); s.builtOutputs = valueAt(json, "builtOutputs"); + s.provenance = provenanceFromJson(optionalValueAt(json, "provenance")); br.inner = std::move(s); } else { BuildResult::Failure f; f.status = failureStatusFromString(statusStr); f.errorMsg = getString(valueAt(json, "errorMsg")); f.isNonDeterministic = getBoolean(valueAt(json, "isNonDeterministic")); + f.provenance = provenanceFromJson(optionalValueAt(json, "provenance")); br.inner = std::move(f); } diff --git a/src/libstore/build/derivation-building-goal.cc b/src/libstore/build/derivation-building-goal.cc index 6c43dcc98713..871504d5ed83 100644 --- a/src/libstore/build/derivation-building-goal.cc +++ b/src/libstore/build/derivation-building-goal.cc @@ -440,6 +440,11 @@ Goal::Co DerivationBuildingGoal::tryToBuild() actLock.reset(); + /* Get the provenance of the derivation, if available. */ + std::shared_ptr provenance; + if (auto info = worker.evalStore.maybeQueryPathInfo(drvPath)) + provenance = info->provenance; + if (useHook) { buildResult.startTime = time(0); // inexact started(); @@ -522,7 +527,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() outputLocks.setDeletion(true); outputLocks.unlock(); - co_return doneSuccess(BuildResult::Success::Built, std::move(builtOutputs)); + co_return doneSuccess(BuildResult::Success::Built, std::move(builtOutputs), provenance); } co_await yield(); @@ -618,6 +623,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() DerivationBuilderParams params{ .drvPath = drvPath, + .drvProvenance = provenance, .buildResult = buildResult, .drv = *drv, .drvOptions = drvOptions, @@ -627,6 +633,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() .defaultPathsInChroot = std::move(defaultPathsInChroot), .systemFeatures = worker.store.config.systemFeatures.get(), .desugaredEnv = std::move(desugaredEnv), + .act = act, }; /* If we have to wait and retry (see below), then `builder` will @@ -723,7 +730,7 @@ Goal::Co DerivationBuildingGoal::tryToBuild() (unlinked) lock files. */ outputLocks.setDeletion(true); outputLocks.unlock(); - co_return doneSuccess(BuildResult::Success::Built, std::move(builtOutputs)); + co_return doneSuccess(BuildResult::Success::Built, std::move(builtOutputs), provenance); } #endif } @@ -811,7 +818,7 @@ BuildError DerivationBuildingGoal::fixupBuilderFailureErrorMessage(BuilderFailur msg += line; msg += "\n"; } - auto nixLogCommand = experimentalFeatureSettings.isEnabled(Xp::NixCommand) ? "nix log" : "nix-store -l"; + auto nixLogCommand = "nix log"; // The command is on a separate line for easy copying, such as with triple click. // This message will be indented elsewhere, so removing the indentation before the // command will not put it at the start of the line unfortunately. @@ -1173,7 +1180,8 @@ DerivationBuildingGoal::checkPathValidity(std::map & return {allValid, validOutputs}; } -Goal::Done DerivationBuildingGoal::doneSuccess(BuildResult::Success::Status status, SingleDrvOutputs builtOutputs) +Goal::Done DerivationBuildingGoal::doneSuccess( + BuildResult::Success::Status status, SingleDrvOutputs builtOutputs, std::shared_ptr provenance) { mcRunningBuilds.reset(); @@ -1182,11 +1190,21 @@ Goal::Done DerivationBuildingGoal::doneSuccess(BuildResult::Success::Status stat worker.updateProgress(); - return Goal::doneSuccess( + auto res = Goal::doneSuccess( BuildResult::Success{ .status = status, .builtOutputs = std::move(builtOutputs), + .provenance = provenance, }); + + logger->result( + act ? act->id : getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult( + buildResult, + DerivedPath::Built{.drvPath = makeConstantStorePathRef(drvPath), .outputs = OutputsSpec::All{}}))); + + return res; } Goal::Done DerivationBuildingGoal::doneFailure(BuildError ex) @@ -1202,13 +1220,22 @@ Goal::Done DerivationBuildingGoal::doneFailure(BuildError ex) worker.updateProgress(); - return Goal::doneFailure( + auto res = Goal::doneFailure( ecFailed, BuildResult::Failure{ .status = ex.status, .errorMsg = fmt("%s", Uncolored(ex.info().msg)), }, std::move(ex)); + + logger->result( + act ? act->id : getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult( + buildResult, + DerivedPath::Built{.drvPath = makeConstantStorePathRef(drvPath), .outputs = OutputsSpec::All{}}))); + + return res; } } // namespace nix diff --git a/src/libstore/build/derivation-check.cc b/src/libstore/build/derivation-check.cc index e56b9fe49a14..677546e878bb 100644 --- a/src/libstore/build/derivation-check.cc +++ b/src/libstore/build/derivation-check.cc @@ -12,7 +12,8 @@ void checkOutputs( const StorePath & drvPath, const decltype(Derivation::outputs) & drvOutputs, const decltype(DerivationOptions::outputChecks) & outputChecks, - const std::map & outputs) + const std::map & outputs, + Activity & act) { std::map outputsByPath; for (auto & output : outputs) @@ -36,6 +37,13 @@ void checkOutputs( if (wanted != got) { /* Throw an error after registering the path as valid. */ + act.result( + resHashMismatch, + { + {"storePath", store.printStorePath(drvPath)}, + {"wanted", wanted}, + {"got", got}, + }); throw BuildError( BuildResult::Failure::HashMismatch, "hash mismatch in fixed-output derivation '%s':\n specified: %s\n got: %s", diff --git a/src/libstore/build/derivation-check.hh b/src/libstore/build/derivation-check.hh index 01e6c5d56383..ee2d01229524 100644 --- a/src/libstore/build/derivation-check.hh +++ b/src/libstore/build/derivation-check.hh @@ -22,6 +22,7 @@ void checkOutputs( const StorePath & drvPath, const decltype(Derivation::outputs) & drvOutputs, const decltype(DerivationOptions::outputChecks) & drvOptions, - const std::map & outputs); + const std::map & outputs, + Activity & act); } // namespace nix diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 72e62cdb6d25..97188d30c314 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -459,7 +459,7 @@ Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, Unke worker.updateProgress(); - return Goal::doneSuccess( + auto res = Goal::doneSuccess( BuildResult::Success{ .status = status, .builtOutputs = {{ @@ -473,6 +473,15 @@ Goal::Done DerivationGoal::doneSuccess(BuildResult::Success::Status status, Unke }, }}, }); + + logger->result( + getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult( + buildResult, + DerivedPath::Built{.drvPath = makeConstantStorePathRef(drvPath), .outputs = OutputsSpec::All{}}))); + + return res; } Goal::Done DerivationGoal::doneFailure(BuildError ex) @@ -488,13 +497,22 @@ Goal::Done DerivationGoal::doneFailure(BuildError ex) worker.updateProgress(); - return Goal::doneFailure( + auto res = Goal::doneFailure( ecFailed, BuildResult::Failure{ .status = ex.status, .errorMsg = fmt("%s", Uncolored(ex.info().msg)), }, std::move(ex)); + + logger->result( + getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult( + buildResult, + DerivedPath::Built{.drvPath = makeConstantStorePathRef(drvPath), .outputs = OutputsSpec::All{}}))); + + return res; } } // namespace nix diff --git a/src/libstore/build/entry-points.cc b/src/libstore/build/entry-points.cc index 04b16f5a8b1a..4bbd4c8f0591 100644 --- a/src/libstore/build/entry-points.cc +++ b/src/libstore/build/entry-points.cc @@ -63,18 +63,12 @@ std::vector Store::buildPathsWithResults( std::vector results; results.reserve(state.size()); - for (auto & [req, goalPtr] : state) { - /* Goals that were never started or were cancelled have exitCode - ecBusy and a default buildResult with empty errorMsg. Skip them - to avoid reporting spurious failures with empty messages. */ - if (goalPtr->exitCode == Goal::ecBusy) - continue; + for (auto & [req, goalPtr] : state) results.emplace_back( KeyedBuildResult{ goalPtr->buildResult, /* .path = */ req, }); - } return results; } diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc index 46924437d035..48fbe2c98c52 100644 --- a/src/libstore/build/substitution-goal.cc +++ b/src/libstore/build/substitution-goal.cc @@ -8,6 +8,8 @@ #include +#include + namespace nix { PathSubstitutionGoal::PathSubstitutionGoal( @@ -27,23 +29,40 @@ PathSubstitutionGoal::~PathSubstitutionGoal() cleanup(); } -Goal::Done PathSubstitutionGoal::doneSuccess(BuildResult::Success::Status status) +Goal::Done +PathSubstitutionGoal::doneSuccess(BuildResult::Success::Status status, std::shared_ptr provenance) { - return Goal::doneSuccess( + auto res = Goal::doneSuccess( BuildResult::Success{ .status = status, + .provenance = provenance, }); + + logger->result( + getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult(buildResult, DerivedPath::Opaque{storePath}))); + + return res; } Goal::Done PathSubstitutionGoal::doneFailure(ExitCode result, BuildResult::Failure::Status status, std::string errorMsg) { debug(errorMsg); - return Goal::doneFailure( + + auto res = Goal::doneFailure( result, BuildResult::Failure{ .status = status, .errorMsg = std::move(errorMsg), }); + + logger->result( + getCurActivity(), + resBuildResult, + nlohmann::json(KeyedBuildResult(buildResult, DerivedPath::Opaque{storePath}))); + + return res; } Goal::Co PathSubstitutionGoal::init() @@ -54,7 +73,7 @@ Goal::Co PathSubstitutionGoal::init() /* If the path already exists we're done. */ if (!repair && worker.store.isValidPath(storePath)) { - co_return doneSuccess(BuildResult::Success::AlreadyValid); + co_return doneSuccess(BuildResult::Success::AlreadyValid, nullptr); } if (settings.readOnlyMode) @@ -224,7 +243,7 @@ Goal::Co PathSubstitutionGoal::tryToRun( outPipe.createAsyncPipe(worker.ioport.get()); #endif - auto promise = std::promise(); + auto promise = std::promise>(); thr = std::thread([this, &promise, &subPath, &sub]() { try { @@ -239,9 +258,8 @@ Goal::Co PathSubstitutionGoal::tryToRun( Logger::Fields{worker.store.printStorePath(storePath), sub->config.getHumanReadableURI()}); PushActivity pact(act.id); - copyStorePath(*sub, worker.store, subPath, repair, sub->config.isTrusted ? NoCheckSigs : CheckSigs); - - promise.set_value(); + promise.set_value( + copyStorePath(*sub, worker.store, subPath, repair, sub->config.isTrusted ? NoCheckSigs : CheckSigs)); } catch (...) { promise.set_exception(std::current_exception()); } @@ -266,8 +284,12 @@ Goal::Co PathSubstitutionGoal::tryToRun( thr.join(); worker.childTerminated(this); + std::shared_ptr provenance; + try { - promise.get_future().get(); + auto info = promise.get_future().get(); + if (info) + provenance = info->provenance; } catch (std::exception & e) { /* Cause the parent build to fail unless --fallback is given, or the substitute has disappeared. The latter case behaves @@ -308,7 +330,7 @@ Goal::Co PathSubstitutionGoal::tryToRun( worker.updateProgress(); - co_return doneSuccess(BuildResult::Success::Substituted); + co_return doneSuccess(BuildResult::Success::Substituted, provenance); } void PathSubstitutionGoal::handleEOF(Descriptor fd) diff --git a/src/libstore/common-protocol.cc b/src/libstore/common-protocol.cc index b069c9498232..3db3c419fb21 100644 --- a/src/libstore/common-protocol.cc +++ b/src/libstore/common-protocol.cc @@ -26,13 +26,13 @@ void CommonProto::Serialise::write( StorePath CommonProto::Serialise::read(const StoreDirConfig & store, CommonProto::ReadConn conn) { - return store.parseStorePath(readString(conn.from)); + return conn.shortStorePaths ? StorePath(readString(conn.from)) : store.parseStorePath(readString(conn.from)); } void CommonProto::Serialise::write( const StoreDirConfig & store, CommonProto::WriteConn conn, const StorePath & storePath) { - conn.to << store.printStorePath(storePath); + conn.to << (conn.shortStorePaths ? storePath.to_string() : store.printStorePath(storePath)); } ContentAddress CommonProto::Serialise::read(const StoreDirConfig & store, CommonProto::ReadConn conn) @@ -78,13 +78,15 @@ std::optional CommonProto::Serialise>::read(const StoreDirConfig & store, CommonProto::ReadConn conn) { auto s = readString(conn.from); - return s == "" ? std::optional{} : store.parseStorePath(s); + return s == "" ? std::optional{} : conn.shortStorePaths ? StorePath(s) : store.parseStorePath(s); } void CommonProto::Serialise>::write( const StoreDirConfig & store, CommonProto::WriteConn conn, const std::optional & storePathOpt) { - conn.to << (storePathOpt ? store.printStorePath(*storePathOpt) : ""); + conn.to + << (storePathOpt ? (conn.shortStorePaths ? storePathOpt->to_string() : store.printStorePath(*storePathOpt)) + : ""); } std::optional diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 4d1c9078ff00..e588cb45e248 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -17,6 +17,8 @@ #include "nix/util/git.hh" #include "nix/util/logging.hh" #include "nix/store/globals.hh" +#include "nix/store/active-builds.hh" +#include "nix/util/provenance.hh" #ifndef _WIN32 // TODO need graceful async exit support on Windows? # include "nix/util/monitor-fd.hh" @@ -432,6 +434,9 @@ static void performOp( bool repairBool; conn.from >> repairBool; auto repair = RepairFlag{repairBool}; + auto provenance = conn.features.contains(WorkerProto::featureProvenance) + ? Provenance::from_json_str_optional(readString(conn.from)) + : nullptr; logger->startWork(); auto pathInfo = [&]() { @@ -457,8 +462,8 @@ static void performOp( assert(false); } // TODO these two steps are essentially RemoteStore::addCAToStore. Move it up to Store. - auto path = - store->addToStoreFromDump(source, name, dumpMethod, contentAddressMethod, hashAlgo, refs, repair); + auto path = store->addToStoreFromDump( + source, name, dumpMethod, contentAddressMethod, hashAlgo, refs, repair, provenance); return store->queryPathInfo(path); }(); logger->stopWork(); @@ -746,6 +751,7 @@ static void performOp( options.action = WorkerProto::Serialise::read(*store, rconn); options.pathsToDelete = WorkerProto::Serialise::read(*store, rconn); conn.from >> options.ignoreLiveness >> options.maxFreed; + options.censor = !trusted; // obsolete fields readInt(conn.from); readInt(conn.from); @@ -754,7 +760,7 @@ static void performOp( GCResults results; logger->startWork(); - if (options.ignoreLiveness) + if (options.ignoreLiveness && !getEnv("_NIX_IN_TEST").has_value()) throw Error("you are not allowed to ignore liveness"); auto & gcStore = require(*store); gcStore.collectGarbage(options, results); @@ -852,7 +858,10 @@ static void performOp( auto path = WorkerProto::Serialise::read(*store, rconn); std::shared_ptr info; logger->startWork(); - info = store->queryPathInfo(path); + try { + info = store->queryPathInfo(path); + } catch (InvalidPath &) { + } logger->stopWork(); if (info) { conn.to << 1; @@ -911,6 +920,9 @@ static void performOp( conn.from >> info.registrationTime >> info.narSize >> info.ultimate; info.sigs = readStrings(conn.from); info.ca = ContentAddress::parseOpt(readString(conn.from)); + info.provenance = conn.features.contains(WorkerProto::featureProvenance) + ? Provenance::from_json_str_optional(readString(conn.from)) + : nullptr; conn.from >> repair >> dontCheckSigs; if (!trusted && dontCheckSigs) dontCheckSigs = false; @@ -1015,6 +1027,15 @@ static void performOp( case WorkerProto::Op::ClearFailedPaths: throw Error("Removed operation %1%", op); + case WorkerProto::Op::QueryActiveBuilds: { + logger->startWork(); + auto & activeBuildsStore = require(*store); + auto activeBuilds = activeBuildsStore.queryActiveBuilds(); + logger->stopWork(); + conn.to << nlohmann::json(activeBuilds).dump(); + break; + } + default: throw Error("invalid operation %1%", op); } @@ -1029,8 +1050,12 @@ void processConnection(ref store, FdSource && from, FdSink && to, Trusted #endif /* Exchange the greeting. */ + auto myFeatures = WorkerProto::allFeatures; + if (!experimentalFeatureSettings.isEnabled(Xp::Provenance)) + myFeatures.erase(std::string(WorkerProto::featureProvenance)); + auto [protoVersion, features] = - WorkerProto::BasicServerConnection::handshake(to, from, PROTOCOL_VERSION, WorkerProto::allFeatures); + WorkerProto::BasicServerConnection::handshake(to, from, PROTOCOL_VERSION, myFeatures); if (protoVersion < MINIMUM_PROTOCOL_VERSION) throw Error("the Nix client version is too old"); diff --git a/src/libstore/derivation-options.cc b/src/libstore/derivation-options.cc index 2ead0c444c91..5403db288ac6 100644 --- a/src/libstore/derivation-options.cc +++ b/src/libstore/derivation-options.cc @@ -396,8 +396,8 @@ StringSet DerivationOptions::getRequiredSystemFeatures(const BasicDerivat template bool DerivationOptions::canBuildLocally(Store & localStore, const BasicDerivation & drv) const { - if (drv.platform != settings.thisSystem.get() && !settings.extraPlatforms.get().count(drv.platform) - && !drv.isBuiltin()) + if (drv.platform != settings.thisSystem.get() && drv.platform != "wasm32-wasip1" + && !settings.extraPlatforms.get().count(drv.platform) && !drv.isBuiltin()) return false; if (settings.maxBuildJobs.get() == 0 && !drv.isBuiltin()) diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index a4cdcb17a70e..5994e7cb43eb 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -9,6 +9,7 @@ #include "nix/store/common-protocol-impl.hh" #include "nix/util/strings-inline.hh" #include "nix/util/json-utils.hh" +#include "nix/store/async-path-writer.hh" #include #include @@ -126,19 +127,31 @@ static auto infoForDerivation(Store & store, const Derivation & drv) }; } -StorePath writeDerivation(Store & store, const Derivation & drv, RepairFlag repair, bool readOnly) +StorePath writeDerivation( + Store & store, + const Derivation & drv, + RepairFlag repair, + bool readOnly, + std::shared_ptr provenance) { if (readOnly || settings.readOnlyMode) { auto [_x, _y, _z, path] = infoForDerivation(store, drv); return path; } else - return store.writeDerivation(drv, repair); + return store.writeDerivation(drv, repair, provenance); } -StorePath Store::writeDerivation(const Derivation & drv, RepairFlag repair) +StorePath +Store::writeDerivation(const Derivation & drv, RepairFlag repair, std::shared_ptr provenance) { auto [suffix, contents, references, path] = infoForDerivation(*this, drv); + /* In case the derivation is already valid, we bail out early since that's + faster. But we need to make sure that the derivation has a corresponding + temproot. It is added by the remote in addToStoreFromDump, but we'd like + to avoid sending a lot of drv contents to the daemon. */ + addTempRoot(path); + if (isValidPath(path) && !repair) return path; @@ -150,12 +163,33 @@ StorePath Store::writeDerivation(const Derivation & drv, RepairFlag repair) ContentAddressMethod::Raw::Text, HashAlgorithm::SHA256, references, - repair); + repair, + provenance); assert(path2 == path); return path; } +StorePath writeDerivation( + Store & store, + AsyncPathWriter & asyncPathWriter, + const Derivation & drv, + RepairFlag repair, + bool readOnly, + std::shared_ptr provenance) +{ + auto references = drv.inputSrcs; + for (auto & i : drv.inputDrvs.map) + references.insert(i.first); + return asyncPathWriter.addPath( + drv.unparse(store, false), + std::string(drv.name) + drvExtension, + references, + repair, + readOnly || settings.readOnlyMode, + provenance); +} + namespace { /** * This mimics std::istream to some extent. We use this much smaller implementation diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 375ed7b2d242..32e95646a740 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -216,7 +216,9 @@ struct DummyStoreImpl : DummyStore if (info.path.isDerivation()) { warn("back compat supporting `addToStore` for inserting derivations in dummy store"); writeDerivation( - parseDerivation(*this, accessor->readFile(CanonPath::root), Derivation::nameFromPath(info.path))); + parseDerivation(*this, accessor->readFile(CanonPath::root), Derivation::nameFromPath(info.path)), + repair, + info.provenance); return; } @@ -233,11 +235,12 @@ struct DummyStoreImpl : DummyStore StorePath addToStoreFromDump( Source & source, std::string_view name, - FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive, - ContentAddressMethod hashMethod = FileIngestionMethod::NixArchive, - HashAlgorithm hashAlgo = HashAlgorithm::SHA256, - const StorePathSet & references = StorePathSet(), - RepairFlag repair = NoRepair) override + FileSerialisationMethod dumpMethod, + ContentAddressMethod hashMethod, + HashAlgorithm hashAlgo, + const StorePathSet & references, + RepairFlag repair, + std::shared_ptr provenance) override { if (isDerivation(name)) throw Error("Do not insert derivation into dummy store with `addToStoreFromDump`"); @@ -285,6 +288,7 @@ struct DummyStoreImpl : DummyStore std::move(narHash.first)); info.narSize = narHash.second.value(); + info.provenance = provenance; auto path = info.path; auto accessor = make_ref(std::move(*temp)); @@ -300,9 +304,10 @@ struct DummyStoreImpl : DummyStore return path; } - StorePath writeDerivation(const Derivation & drv, RepairFlag repair = NoRepair) override + StorePath + writeDerivation(const Derivation & drv, RepairFlag repair, std::shared_ptr provenance) override { - auto drvPath = ::nix::writeDerivation(*this, drv, repair, /*readonly=*/true); + auto drvPath = ::nix::writeDerivation(*this, drv, repair, /*readonly=*/true, provenance); if (!derivations.contains(drvPath) || repair) { if (config->readOnly) diff --git a/src/libstore/export-import.cc b/src/libstore/export-import.cc index b1c61626c8c7..7b6193c657d3 100644 --- a/src/libstore/export-import.cc +++ b/src/libstore/export-import.cc @@ -4,92 +4,163 @@ #include "nix/util/archive.hh" #include "nix/store/common-protocol.hh" #include "nix/store/common-protocol-impl.hh" - -#include +#include "nix/store/worker-protocol.hh" namespace nix { -static void exportPath(Store & store, const StorePath & path, Sink & sink) -{ - auto info = store.queryPathInfo(path); - - HashSink hashSink(HashAlgorithm::SHA256); - TeeSink teeSink(sink, hashSink); - - store.narFromPath(path, teeSink); - - /* Refuse to export paths that have changed. This prevents - filesystem corruption from spreading to other machines. - Don't complain if the stored hash is zero (unknown). */ - Hash hash = hashSink.currentHash().hash; - if (hash != info->narHash && info->narHash != Hash(info->narHash.algo)) - throw Error( - "hash of path '%s' has changed from '%s' to '%s'!", - store.printStorePath(path), - info->narHash.to_string(HashFormat::Nix32, true), - hash.to_string(HashFormat::Nix32, true)); - - teeSink << exportMagic << store.printStorePath(path); - CommonProto::write(store, CommonProto::WriteConn{.to = teeSink}, info->references); - teeSink << (info->deriver ? store.printStorePath(*info->deriver) : "") << 0; -} +static const uint32_t exportMagicV1 = 0x4558494e; +static const uint64_t exportMagicV2 = 0x324f4952414e; // = 'NARIO2' -void exportPaths(Store & store, const StorePathSet & paths, Sink & sink) +void exportPaths(Store & store, const StorePathSet & paths, Sink & sink, unsigned int version) { auto sorted = store.topoSortPaths(paths); std::reverse(sorted.begin(), sorted.end()); - for (auto & path : sorted) { - sink << 1; - exportPath(store, path, sink); + auto dumpNar = [&](const ValidPathInfo & info) { + HashSink hashSink(HashAlgorithm::SHA256); + TeeSink teeSink(sink, hashSink); + + store.narFromPath(info.path, teeSink); + + /* Refuse to export paths that have changed. This prevents + filesystem corruption from spreading to other machines. + Don't complain if the stored hash is zero (unknown). */ + Hash hash = hashSink.currentHash().hash; + if (hash != info.narHash && info.narHash != Hash(info.narHash.algo)) + throw Error( + "hash of path '%s' has changed from '%s' to '%s'!", + store.printStorePath(info.path), + info.narHash.to_string(HashFormat::Nix32, true), + hash.to_string(HashFormat::Nix32, true)); + }; + + switch (version) { + + case 1: + for (auto & path : sorted) { + sink << 1; + auto info = store.queryPathInfo(path); + dumpNar(*info); + sink << exportMagicV1 << store.printStorePath(path); + CommonProto::write(store, CommonProto::WriteConn{.to = sink}, info->references); + sink << (info->deriver ? store.printStorePath(*info->deriver) : "") << 0; + } + sink << 0; + break; + + case 2: + sink << exportMagicV2; + + for (auto & path : sorted) { + Activity act(*logger, lvlTalkative, actUnknown, fmt("exporting path '%s'", store.printStorePath(path))); + sink << 1; + auto info = store.queryPathInfo(path); + // FIXME: move to CommonProto? + WorkerProto::Serialise::write( + store, WorkerProto::WriteConn{.to = sink, .version = 16, .shortStorePaths = true}, *info); + dumpNar(*info); + } + + sink << 0; + break; + + default: + throw Error("unsupported nario version %d", version); } - - sink << 0; } StorePaths importPaths(Store & store, Source & source, CheckSigsFlag checkSigs) { StorePaths res; - while (true) { - auto n = readNum(source); - if (n == 0) - break; - if (n != 1) - throw Error("input doesn't look like something created by 'nix-store --export'"); - - /* Extract the NAR from the source. */ + + auto version = readNum(source); + + /* Note: nario version 1 lacks an explicit header. The first + integer denotes whether a store path follows or not. So look + for 0 or 1. */ + switch (version) { + + case 0: + /* Empty version 1 nario, nothing to do. */ + break; + + case 1: { + /* Reuse a string buffer to avoid kernel overhead allocating + memory for large strings. */ StringSink saved; - TeeSource tee{source, saved}; - NullFileSystemObjectSink ether; - parseDump(ether, tee); - uint32_t magic = readInt(source); - if (magic != exportMagic) - throw Error("Nix archive cannot be imported; wrong format"); + /* Non-empty version 1 nario. */ + while (true) { + /* Extract the NAR from the source. */ + saved.s.clear(); + TeeSource tee{source, saved}; + NullFileSystemObjectSink ether; + parseDump(ether, tee); + + uint32_t magic = readInt(source); + if (magic != exportMagicV1) + throw Error("nario cannot be imported; wrong format"); + + auto path = store.parseStorePath(readString(source)); + + auto references = CommonProto::Serialise::read(store, CommonProto::ReadConn{.from = source}); + auto deriver = readString(source); + + // Ignore optional legacy signature. + if (readInt(source) == 1) + readString(source); + + if (!store.isValidPath(path)) { + auto narHash = hashString(HashAlgorithm::SHA256, saved.s); + + ValidPathInfo info{path, {store, narHash}}; + if (deriver != "") + info.deriver = store.parseStorePath(deriver); + info.references = references; + info.narSize = saved.s.size(); + + // Can't use underlying source, which would have been exhausted. + auto source2 = StringSource(saved.s); + store.addToStore(info, source2, NoRepair, checkSigs); + } + + res.push_back(path); + + auto n = readNum(source); + if (n == 0) + break; + if (n != 1) + throw Error("input doesn't look like a nario"); + } + break; + } - auto path = store.parseStorePath(readString(source)); + case exportMagicV2: + while (true) { + auto n = readNum(source); + if (n == 0) + break; + if (n != 1) + throw Error("input doesn't look like a nario"); - // Activity act(*logger, lvlInfo, "importing path '%s'", info.path); + auto info = WorkerProto::Serialise::read( + store, WorkerProto::ReadConn{.from = source, .version = 16, .shortStorePaths = true}); - auto references = CommonProto::Serialise::read(store, CommonProto::ReadConn{.from = source}); - auto deriver = readString(source); - auto narHash = hashString(HashAlgorithm::SHA256, saved.s); + if (!store.isValidPath(info.path)) { + Activity act( + *logger, lvlTalkative, actUnknown, fmt("importing path '%s'", store.printStorePath(info.path))); - ValidPathInfo info{path, {store, narHash}}; - if (deriver != "") - info.deriver = store.parseStorePath(deriver); - info.references = references; - info.narSize = saved.s.size(); + store.addToStore(info, source, NoRepair, checkSigs); + } else + source.skip(info.narSize); - // Ignore optional legacy signature. - if (readInt(source) == 1) - readString(source); + res.push_back(info.path); + } - // Can't use underlying source, which would have been exhausted - auto source = StringSource(saved.s); - store.addToStore(info, source, NoRepair, checkSigs); + break; - res.push_back(info.path); + default: + throw Error("input doesn't look like a nario"); } return res; diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 8a7577146efc..04cf164fa1f3 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -30,8 +30,6 @@ #include #include -using namespace std::string_literals; - namespace nix { const unsigned int RETRY_TIME_MS_DEFAULT = 250; @@ -53,14 +51,15 @@ struct curlFileTransfer : public FileTransfer curlFileTransfer & fileTransfer; FileTransferRequest request; FileTransferResult result; - Activity act; + std::unique_ptr _act; bool done = false; // whether either the success or failure function has been called Callback callback; CURL * req = 0; // buffer to accompany the `req` above char errbuf[CURL_ERROR_SIZE]; - bool active = false; // whether the handle has been added to the multi object - bool paused = false; // whether the request has been paused previously + bool active = false; // whether the handle has been added to the multi object + bool paused = false; // whether the request has been paused previously + bool enqueued = false; // whether the request has been added to the incoming queue std::string statusMsg; unsigned int attempt = 0; @@ -98,12 +97,6 @@ struct curlFileTransfer : public FileTransfer Callback && callback) : fileTransfer(fileTransfer) , request(request) - , act(*logger, - lvlTalkative, - actFileTransfer, - fmt("%s '%s'", request.verb(/*continuous=*/true), request.uri), - {request.uri.to_string()}, - request.parentAct) , callback(std::move(callback)) , finalSink([this](std::string_view data) { if (errorSink) { @@ -160,7 +153,7 @@ struct curlFileTransfer : public FileTransfer if (requestHeaders) curl_slist_free_all(requestHeaders); try { - if (!done) + if (!done && enqueued) fail(FileTransferError( Interrupted, {}, "%s of '%s' was interrupted", Uncolored(request.noun()), request.uri)); } catch (...) { @@ -310,9 +303,29 @@ struct curlFileTransfer : public FileTransfer return ((TransferItem *) userp)->headerCallback(contents, size, nmemb); } + /** + * Lazily start an `Activity`. We don't do this in the `TransferItem` constructor to avoid showing downloads + * that are only enqueued but not actually started. + */ + Activity & act() + { + if (!_act) { + _act = std::make_unique( + *logger, + lvlTalkative, + actFileTransfer, + fmt("%s '%s'", request.verb(/*continuous=*/true), request.uri), + Logger::Fields{request.uri.to_string()}, + request.parentAct); + // Reset the start time to when we actually started the download. + startTime = std::chrono::steady_clock::now(); + } + return *_act; + } + int progressCallback(curl_off_t dltotal, curl_off_t dlnow) noexcept try { - act.progress(dlnow, dltotal); + act().progress(dlnow, dltotal); return getInterrupted(); } catch (nix::Interrupted &) { assert(getInterrupted()); @@ -389,6 +402,15 @@ struct curlFileTransfer : public FileTransfer return ((TransferItem *) clientp)->seekCallback(offset, origin); } + static int resolverCallbackWrapper(void *, void *, void * clientp) noexcept + try { + // Create the `Activity` associated with this download. + ((TransferItem *) clientp)->act(); + return 0; + } catch (...) { + return 1; + } + void unpause() { /* Unpausing an already unpaused transfer is a no-op. */ @@ -417,7 +439,7 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt( req, CURLOPT_USERAGENT, - ("curl/" LIBCURL_VERSION " Nix/" + nixVersion + ("curl/" LIBCURL_VERSION " Nix/" + nixVersion + " DeterminateNix/" + determinateNixVersion + (fileTransferSettings.userAgentSuffix != "" ? " " + fileTransferSettings.userAgentSuffix.get() : "")) .c_str()); #if LIBCURL_VERSION_NUM >= 0x072b00 @@ -479,6 +501,12 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, fileTransferSettings.connectTimeout.get()); + // Enable TCP keep-alive so that idle connections in curl's reuse pool + // are not silently dropped by NATs, firewalls, or load balancers. + curl_easy_setopt(req, CURLOPT_TCP_KEEPALIVE, 1L); + curl_easy_setopt(req, CURLOPT_TCP_KEEPIDLE, 60L); + curl_easy_setopt(req, CURLOPT_TCP_KEEPINTVL, 60L); + curl_easy_setopt(req, CURLOPT_LOW_SPEED_LIMIT, 1L); curl_easy_setopt(req, CURLOPT_LOW_SPEED_TIME, fileTransferSettings.stalledDownloadTimeout.get()); @@ -511,6 +539,11 @@ struct curlFileTransfer : public FileTransfer } #endif + // This seems to be the earliest libcurl callback that signals that the download is happening, so we can + // call act(). + curl_easy_setopt(req, CURLOPT_RESOLVER_START_FUNCTION, resolverCallbackWrapper); + curl_easy_setopt(req, CURLOPT_RESOLVER_START_DATA, this); + result.data.clear(); result.bodySize = 0; } @@ -559,7 +592,7 @@ struct curlFileTransfer : public FileTransfer if (httpStatus == 304 && result.etag == "") result.etag = request.expectedETag; - act.progress(result.bodySize, result.bodySize); + act().progress(result.bodySize, result.bodySize); done = true; callback(std::move(result)); } @@ -717,6 +750,11 @@ struct curlFileTransfer : public FileTransfer std::thread workerThread; + const size_t maxQueueSize = + (fileTransferSettings.httpConnections.get() ? fileTransferSettings.httpConnections.get() + : std::max(1U, std::thread::hardware_concurrency())) + * 5; + curlFileTransfer() : mt19937(rd()) { @@ -764,8 +802,13 @@ struct curlFileTransfer : public FileTransfer void workerThreadMain() { + /* NOTE(cole-h): the maxQueueSize needs to be >0 or else things will hang */ + assert(maxQueueSize > 0); + /* Cause this thread to be notified on SIGINT. */ -#ifndef _WIN32 // TODO need graceful async exit support on Windows? +#if !defined(_WIN32) && !defined(IS_STATIC) // TODO need graceful async exit support on Windows? + // FIXME(RossComputerGuy): this causes issues on static builds. + // In particular, it causes a segfault to happen at the end of the program running. auto callback = createInterruptCallback([&]() { stopWorkerThread(); }); #endif @@ -846,6 +889,13 @@ struct curlFileTransfer : public FileTransfer { auto state(state_.lock()); while (!state->incoming.empty()) { + /* Limit the number of active curl handles, since curl doesn't scale well. */ + if (items.size() + incoming.size() >= maxQueueSize) { + auto t = now + std::chrono::milliseconds(100); + if (nextWakeup == std::chrono::steady_clock::time_point() || t < nextWakeup) + nextWakeup = t; + break; + } auto item = state->incoming.top(); if (item->embargo <= now) { incoming.push_back(item); @@ -912,6 +962,7 @@ struct curlFileTransfer : public FileTransfer if (state->isQuitting()) throw nix::Error("cannot enqueue download request because the download thread is shutting down"); state->incoming.push(item); + item->enqueued = true; } #ifndef _WIN32 // TODO need graceful async exit support on Windows? writeFull(wakeupPipe.writeSide.get(), " "); @@ -947,24 +998,29 @@ struct curlFileTransfer : public FileTransfer } }; -ref makeCurlFileTransfer() -{ - return make_ref(); -} +static Sync> _fileTransfer; ref getFileTransfer() { - static ref fileTransfer = makeCurlFileTransfer(); + auto fileTransfer(_fileTransfer.lock()); - if (fileTransfer->state_.lock()->isQuitting()) - fileTransfer = makeCurlFileTransfer(); + if (!*fileTransfer || (*fileTransfer)->state_.lock()->isQuitting()) + *fileTransfer = std::make_shared(); - return fileTransfer; + return ref(*fileTransfer); } ref makeFileTransfer() { - return makeCurlFileTransfer(); + return make_ref(); +} + +std::shared_ptr resetFileTransfer() +{ + auto fileTransfer(_fileTransfer.lock()); + std::shared_ptr prev; + fileTransfer->swap(prev); + return prev; } void FileTransferRequest::setupForS3() diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 4846d445fe1f..37f148cbc431 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -208,7 +208,7 @@ void LocalStore::findTempRoots(Roots & tempRoots, bool censor) while ((end = contents.find((char) 0, pos)) != std::string::npos) { Path root(contents, pos, end - pos); debug("got temporary root '%s'", root); - tempRoots[parseStorePath(root)].emplace(censor ? censored : fmt("{temp:%d}", pid)); + tempRoots[parseStorePath(root)].emplace(censor ? censored : fmt("{nix-process:%d}", pid)); pos = end + 1; } } @@ -467,13 +467,14 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) bool gcKeepOutputs = settings.gcKeepOutputs; bool gcKeepDerivations = settings.gcKeepDerivations; - boost::unordered_flat_set> roots, dead, alive; + Roots roots; + boost::unordered_flat_set> dead, alive; struct Shared { // The temp roots only store the hash part to make it easier to // ignore suffixes like '.lock', '.chroot' and '.check'. - boost::unordered_flat_set> tempRoots; + boost::unordered_flat_map tempRoots; // Hash part of the store path currently being deleted, if // any. @@ -584,7 +585,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) debug("got new GC root '%s'", path); auto hashPart = storePath->hashPart(); auto shared(_shared.lock()); - shared->tempRoots.emplace(hashPart); + // FIXME: could get the PID from the socket. + shared->tempRoots.insert_or_assign(std::string(hashPart), "{nix-process:unknown}"); /* If this path is currently being deleted, then we have to wait until deletion is finished to ensure that @@ -624,20 +626,16 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) /* Find the roots. Since we've grabbed the GC lock, the set of permanent roots cannot increase now. */ printInfo("finding garbage collector roots..."); - Roots rootMap; if (!options.ignoreLiveness) - findRootsNoTemp(rootMap, true); - - for (auto & i : rootMap) - roots.insert(i.first); + findRootsNoTemp(roots, options.censor); /* Read the temporary roots created before we acquired the global GC root. Any new roots will be sent to our socket. */ - Roots tempRoots; - findTempRoots(tempRoots, true); - for (auto & root : tempRoots) { - _shared.lock()->tempRoots.emplace(root.first.hashPart()); - roots.insert(root.first); + { + Roots tempRoots; + findTempRoots(tempRoots, options.censor); + for (auto & root : tempRoots) + _shared.lock()->tempRoots.insert_or_assign(std::string(root.first.hashPart()), *root.second.begin()); } /* Synchronisation point for testing, see tests/functional/gc-non-blocking.sh. */ @@ -733,20 +731,32 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) } }; + if (options.action == GCOptions::gcDeleteSpecific && !options.pathsToDelete.count(*path)) { + throw Error( + "Cannot delete path '%s' because it's referenced by path '%s'.", + printStorePath(start), + printStorePath(*path)); + } + /* If this is a root, bail out. */ - if (roots.count(*path)) { + if (auto i = roots.find(*path); i != roots.end()) { + if (options.action == GCOptions::gcDeleteSpecific) + throw Error( + "Cannot delete path '%s' because it's referenced by the GC root '%s'.", + printStorePath(start), + *i->second.begin()); debug("cannot delete '%s' because it's a root", printStorePath(*path)); return markAlive(); } - if (options.action == GCOptions::gcDeleteSpecific && !options.pathsToDelete.count(*path)) - return; - - { + static bool inTest = getEnv("_NIX_IN_TEST").has_value(); + if (!(inTest && options.ignoreLiveness)) { auto hashPart = path->hashPart(); auto shared(_shared.lock()); - if (shared->tempRoots.count(hashPart)) { - debug("cannot delete '%s' because it's a temporary root", printStorePath(*path)); + if (auto i = shared->tempRoots.find(std::string(hashPart)); i != shared->tempRoots.end()) { + if (options.action == GCOptions::gcDeleteSpecific) + throw Error( + "Cannot delete path '%s' because it's in use by '%s'.", printStorePath(start), i->second); return markAlive(); } shared->pending = hashPart; @@ -805,12 +815,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) for (auto & i : options.pathsToDelete) { deleteReferrersClosure(i); - if (!dead.count(i)) - throw Error( - "Cannot delete path '%1%' since it is still alive. " - "To find out why, use: " - "nix-store --query --roots and nix-store --query --referrers", - printStorePath(i)); + assert(dead.count(i)); } } else if (options.maxFreed > 0) { diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 27d17e1a9b94..4acfd91969ae 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -15,6 +15,8 @@ #include #include +#include + #ifndef _WIN32 # include #endif @@ -267,8 +269,24 @@ const ExternalBuilder * Settings::findExternalDerivationBuilderIfSupported(const return nullptr; } +std::optional Settings::getHostName() +{ + if (hostName != "") + return hostName; + +#ifndef _WIN32 + char hostname[_POSIX_HOST_NAME_MAX + 1]; + if (gethostname(hostname, sizeof(hostname)) == 0) + return std::string(hostname); +#endif + + return std::nullopt; +} + std::string nixVersion = PACKAGE_VERSION; +const std::string determinateNixVersion = DETERMINATE_NIX_VERSION; + NLOHMANN_JSON_SERIALIZE_ENUM( SandboxMode, { diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index ef6ae92a44d3..d4361264edf6 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -182,7 +182,7 @@ FileTransferRequest HttpBinaryCacheStore::makeRequest(std::string_view path) /* path is not a path, but a full relative or absolute URL, e.g. we've seen in the wild NARINFO files have a URL field which is - `nar/15f99rdaf26k39knmzry4xd0d97wp6yfpnfk1z9avakis7ipb9yg.nar?hash=zphkqn2wg8mnvbkixnl2aadkbn0rcnfj` + `nar/15f99rdaf26k39knmzry4xd0d97wp6yfpnfk1z9avakis7ipb9yg.nar?hash=wvx0nans273vb7b0cjlplsmr2z905hwd` (note the query param) and that gets passed here. */ auto result = parseURLRelative(path, cacheUriWithTrailingSlash); diff --git a/src/libstore/include/nix/store/active-builds.hh b/src/libstore/include/nix/store/active-builds.hh new file mode 100644 index 000000000000..c8a40e137986 --- /dev/null +++ b/src/libstore/include/nix/store/active-builds.hh @@ -0,0 +1,108 @@ +#pragma once + +#include "nix/util/util.hh" +#include "nix/util/json-impls.hh" +#include "nix/store/path.hh" + +#include +#include + +namespace nix { + +/** + * A uid and optional corresponding user name. + */ +struct UserInfo +{ + uid_t uid = -1; + std::optional name; + + /** + * Create a UserInfo from a UID, looking up the username if possible. + */ + static UserInfo fromUid(uid_t uid); +}; + +struct ActiveBuild +{ + pid_t nixPid; + + std::optional clientPid; + std::optional clientUid; + + pid_t mainPid; + UserInfo mainUser; + std::optional cgroup; + + time_t startTime; + + StorePath derivation; +}; + +struct ActiveBuildInfo : ActiveBuild +{ + struct ProcessInfo + { + pid_t pid = 0; + pid_t parentPid = 0; + UserInfo user; + std::vector argv; + std::optional utime, stime, cutime, cstime; + }; + + // User/system CPU time for the entire cgroup, if available. + std::optional utime, stime; + + std::vector processes; +}; + +struct TrackActiveBuildsStore +{ + struct BuildHandle + { + TrackActiveBuildsStore & tracker; + uint64_t id; + + BuildHandle(TrackActiveBuildsStore & tracker, uint64_t id) + : tracker(tracker) + , id(id) + { + } + + BuildHandle(BuildHandle && other) noexcept + : tracker(other.tracker) + , id(other.id) + { + other.id = 0; + } + + ~BuildHandle() + { + if (id) { + try { + tracker.buildFinished(*this); + } catch (...) { + ignoreExceptionInDestructor(); + } + } + } + }; + + virtual BuildHandle buildStarted(const ActiveBuild & build) = 0; + + virtual void buildFinished(const BuildHandle & handle) = 0; +}; + +struct QueryActiveBuildsStore +{ + inline static std::string operationName = "Querying active builds"; + + virtual std::vector queryActiveBuilds() = 0; +}; + +} // namespace nix + +JSON_IMPL(UserInfo) +JSON_IMPL(ActiveBuild) +JSON_IMPL(ActiveBuildInfo) +JSON_IMPL(ActiveBuildInfo::ProcessInfo) diff --git a/src/libstore/include/nix/store/async-path-writer.hh b/src/libstore/include/nix/store/async-path-writer.hh new file mode 100644 index 000000000000..d64418479bc6 --- /dev/null +++ b/src/libstore/include/nix/store/async-path-writer.hh @@ -0,0 +1,24 @@ +#pragma once + +#include "nix/store/store-api.hh" + +namespace nix { + +struct AsyncPathWriter +{ + virtual StorePath addPath( + std::string contents, + std::string name, + StorePathSet references, + RepairFlag repair, + bool readOnly = false, + std::shared_ptr provenance = {}) = 0; + + virtual void waitForPath(const StorePath & path) = 0; + + virtual void waitForAllPaths() = 0; + + static ref make(ref store); +}; + +} // namespace nix diff --git a/src/libstore/include/nix/store/binary-cache-store.hh b/src/libstore/include/nix/store/binary-cache-store.hh index e7b3d07ebb66..4cb7b23f2d31 100644 --- a/src/libstore/include/nix/store/binary-cache-store.hh +++ b/src/libstore/include/nix/store/binary-cache-store.hh @@ -100,6 +100,11 @@ protected: public: + bool includeInProvenance() override + { + return true; + } + virtual bool fileExists(const std::string & path) = 0; virtual void upsertFile( @@ -183,7 +188,8 @@ public: ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) override; + RepairFlag repair, + std::shared_ptr provenance) override; StorePath addToStore( std::string_view name, diff --git a/src/libstore/include/nix/store/build-result.hh b/src/libstore/include/nix/store/build-result.hh index 96134791b9d9..e6cbd1f73ccd 100644 --- a/src/libstore/include/nix/store/build-result.hh +++ b/src/libstore/include/nix/store/build-result.hh @@ -11,6 +11,8 @@ namespace nix { +struct Provenance; + struct BuildResult { struct Success @@ -30,12 +32,20 @@ struct BuildResult ResolvesToAlreadyValid = 13, } status; + static std::string_view statusToString(Status status); + /** * For derivations, a mapping from the names of the wanted outputs * to actual paths. */ SingleDrvOutputs builtOutputs; + /** + * The provenance of the derivation, if any. Note that this is the provenance of the current build, not + * necessarily of previously existing outputs. + */ + std::shared_ptr provenance; + bool operator==(const BuildResult::Success &) const noexcept; std::strong_ordering operator<=>(const BuildResult::Success &) const noexcept; @@ -74,8 +84,11 @@ struct BuildResult /// know about this one, so change it back to `OutputRejected` /// before serialization. HashMismatch = 15, + Cancelled = 16, } status = MiscFailure; + static std::string_view statusToString(Status status); + /** * Information about the error if the build failed. * @@ -92,12 +105,17 @@ struct BuildResult */ bool isNonDeterministic = false; + /** + * The provenance of the derivation, if any. + */ + std::shared_ptr provenance; + bool operator==(const BuildResult::Failure &) const noexcept; std::strong_ordering operator<=>(const BuildResult::Failure &) const noexcept; [[noreturn]] void rethrow() const { - throw Error("%s", errorMsg); + throw Error("%s", errorMsg.empty() ? statusToString(status) : errorMsg); } }; @@ -141,6 +159,13 @@ struct BuildResult bool operator==(const BuildResult &) const noexcept; std::strong_ordering operator<=>(const BuildResult &) const noexcept; + + bool isCancelled() const + { + auto failure = tryGetFailure(); + // FIXME: remove MiscFailure eventually. + return failure && (failure->status == Failure::Cancelled || failure->status == Failure::MiscFailure); + } }; /** diff --git a/src/libstore/include/nix/store/build/derivation-builder.hh b/src/libstore/include/nix/store/build/derivation-builder.hh index ca6319855ad3..8ff8613b689e 100644 --- a/src/libstore/include/nix/store/build/derivation-builder.hh +++ b/src/libstore/include/nix/store/build/derivation-builder.hh @@ -57,6 +57,11 @@ struct DerivationBuilderParams /** The path of the derivation. */ const StorePath & drvPath; + /** + * The provenance of the derivation, if known + */ + const std::shared_ptr drvProvenance; + BuildResult & buildResult; /** @@ -98,6 +103,11 @@ struct DerivationBuilderParams StringSet systemFeatures; DesugaredEnv desugaredEnv; + + /** + * The activity corresponding to the build. + */ + std::unique_ptr & act; }; /** diff --git a/src/libstore/include/nix/store/build/derivation-building-goal.hh b/src/libstore/include/nix/store/build/derivation-building-goal.hh index e745c12c7ba4..75ca43b41f91 100644 --- a/src/libstore/include/nix/store/build/derivation-building-goal.hh +++ b/src/libstore/include/nix/store/build/derivation-building-goal.hh @@ -156,7 +156,10 @@ private: */ void killChild(); - Done doneSuccess(BuildResult::Success::Status status, SingleDrvOutputs builtOutputs); + Done doneSuccess( + BuildResult::Success::Status status, + SingleDrvOutputs builtOutputs, + std::shared_ptr provenance = nullptr); Done doneFailure(BuildError ex); diff --git a/src/libstore/include/nix/store/build/goal.hh b/src/libstore/include/nix/store/build/goal.hh index cb1cb94e4628..d26803532dbe 100644 --- a/src/libstore/include/nix/store/build/goal.hh +++ b/src/libstore/include/nix/store/build/goal.hh @@ -109,7 +109,7 @@ public: /** * Build result. */ - BuildResult buildResult; + BuildResult buildResult = {.inner = BuildResult::Failure{.status = BuildResult::Failure::Cancelled}}; /** * Suspend our goal and wait until we get `work`-ed again. diff --git a/src/libstore/include/nix/store/build/substitution-goal.hh b/src/libstore/include/nix/store/build/substitution-goal.hh index 5f33b9aa5d78..1b7d956a1a2e 100644 --- a/src/libstore/include/nix/store/build/substitution-goal.hh +++ b/src/libstore/include/nix/store/build/substitution-goal.hh @@ -41,7 +41,7 @@ struct PathSubstitutionGoal : public Goal */ std::optional ca; - Done doneSuccess(BuildResult::Success::Status status); + Done doneSuccess(BuildResult::Success::Status status, std::shared_ptr provenance); Done doneFailure(ExitCode result, BuildResult::Failure::Status status, std::string errorMsg); diff --git a/src/libstore/include/nix/store/builtins.hh b/src/libstore/include/nix/store/builtins.hh index 7cc9c0911029..fee11e59e9f3 100644 --- a/src/libstore/include/nix/store/builtins.hh +++ b/src/libstore/include/nix/store/builtins.hh @@ -8,8 +8,12 @@ # include "nix/store/aws-creds.hh" #endif +#include + namespace nix { +struct StructuredAttrs; + struct BuiltinBuilderContext { const BasicDerivation & drv; diff --git a/src/libstore/include/nix/store/common-protocol.hh b/src/libstore/include/nix/store/common-protocol.hh index c1d22fa6c54b..6139afc5d2ec 100644 --- a/src/libstore/include/nix/store/common-protocol.hh +++ b/src/libstore/include/nix/store/common-protocol.hh @@ -30,6 +30,7 @@ struct CommonProto struct ReadConn { Source & from; + bool shortStorePaths = false; }; /** @@ -39,6 +40,7 @@ struct CommonProto struct WriteConn { Sink & to; + bool shortStorePaths = false; }; template diff --git a/src/libstore/include/nix/store/derivations.hh b/src/libstore/include/nix/store/derivations.hh index a8c702fc366c..3b07072d99d4 100644 --- a/src/libstore/include/nix/store/derivations.hh +++ b/src/libstore/include/nix/store/derivations.hh @@ -17,6 +17,8 @@ namespace nix { struct StoreDirConfig; +struct AsyncPathWriter; +struct Provenance; /* Abstract syntax of derivations. */ @@ -455,7 +457,23 @@ class Store; /** * Write a derivation to the Nix store, and return its path. */ -StorePath writeDerivation(Store & store, const Derivation & drv, RepairFlag repair = NoRepair, bool readOnly = false); +StorePath writeDerivation( + Store & store, + const Derivation & drv, + RepairFlag repair = NoRepair, + bool readOnly = false, + std::shared_ptr provenance = nullptr); + +/** + * Asynchronously write a derivation to the Nix store, and return its path. + */ +StorePath writeDerivation( + Store & store, + AsyncPathWriter & asyncPathWriter, + const Derivation & drv, + RepairFlag repair = NoRepair, + bool readOnly = false, + std::shared_ptr provenance = nullptr); /** * Read a derivation from a file. diff --git a/src/libstore/include/nix/store/export-import.hh b/src/libstore/include/nix/store/export-import.hh index 15092202f1f6..4ea696f992f9 100644 --- a/src/libstore/include/nix/store/export-import.hh +++ b/src/libstore/include/nix/store/export-import.hh @@ -4,16 +4,11 @@ namespace nix { -/** - * Magic header of exportPath() output (obsolete). - */ -const uint32_t exportMagic = 0x4558494e; - /** * Export multiple paths in the format expected by `nix-store * --import`. The paths will be sorted topologically. */ -void exportPaths(Store & store, const StorePathSet & paths, Sink & sink); +void exportPaths(Store & store, const StorePathSet & paths, Sink & sink, unsigned int version); /** * Import a sequence of NAR dumps created by `exportPaths()` into the diff --git a/src/libstore/include/nix/store/filetransfer.hh b/src/libstore/include/nix/store/filetransfer.hh index 57b781c3320e..fa8a649e2b36 100644 --- a/src/libstore/include/nix/store/filetransfer.hh +++ b/src/libstore/include/nix/store/filetransfer.hh @@ -328,6 +328,8 @@ ref getFileTransfer(); */ ref makeFileTransfer(); +std::shared_ptr resetFileTransfer(); + class FileTransferError : public Error { public: diff --git a/src/libstore/include/nix/store/gc-store.hh b/src/libstore/include/nix/store/gc-store.hh index 7f04ed5a2c2f..de7a71382f92 100644 --- a/src/libstore/include/nix/store/gc-store.hh +++ b/src/libstore/include/nix/store/gc-store.hh @@ -7,9 +7,13 @@ namespace nix { +// FIXME: should turn this into an std::variant to represent the +// several root types. +using GcRootInfo = std::string; + typedef boost::unordered_flat_map< StorePath, - boost::unordered_flat_set>, + boost::unordered_flat_set>, std::hash> Roots; @@ -58,6 +62,12 @@ struct GCOptions * Stop after at least `maxFreed` bytes have been freed. */ uint64_t maxFreed{std::numeric_limits::max()}; + + /** + * Whether to hide potentially sensitive information about GC + * roots (such as PIDs). + */ + bool censor = false; }; struct GCResults diff --git a/src/libstore/include/nix/store/globals.hh b/src/libstore/include/nix/store/globals.hh index 89e92d30e444..23ffc0d49f8c 100644 --- a/src/libstore/include/nix/store/globals.hh +++ b/src/libstore/include/nix/store/globals.hh @@ -225,12 +225,8 @@ public: The following system types are widely used, as Nix is actively supported on these platforms: - `x86_64-linux` - - `x86_64-darwin` - - `i686-linux` - `aarch64-linux` - `aarch64-darwin` - - `armv6l-linux` - - `armv7l-linux` In general, you do not have to modify this setting. While you can force Nix to run a Darwin-specific `builder` executable on a Linux machine, the result would obviously be wrong. @@ -1061,6 +1057,17 @@ public: mismatch if the build isn't reproducible. )"}; + Setting ttlNarInfoCacheMeta{ + this, + 7 * 24 * 3600, + "narinfo-cache-meta-ttl", + R"( + The TTL in seconds for caching binary cache metadata (i.e. + `/nix-cache-info`). This determines how long information about a + binary cache (such as its store directory, priority, and whether it + wants mass queries) is considered valid before being refreshed. + )"}; + Setting printMissing{ this, true, "print-missing", "Whether to print what paths need to be built or downloaded."}; @@ -1125,11 +1132,11 @@ public: character. Example: - `/nix/store/zf5lbh336mnzf1nlswdn11g4n2m8zh3g-bash-4.4-p23-dev - /nix/store/rjxwxwv1fpn9wa2x5ssk5phzwlcv4mna-bash-4.4-p23-doc - /nix/store/6bqvbzjkcp9695dq0dpl5y43nvy37pq1-bash-4.4-p23-info - /nix/store/r7fng3kk3vlpdlh2idnrbn37vh4imlj2-bash-4.4-p23-man - /nix/store/xfghy8ixrhz3kyy6p724iv3cxji088dx-bash-4.4-p23`. + `/nix/store/l88brggg9hpy96ijds34dlq4n8fan63g-bash-4.4-p23-dev + /nix/store/vch71bhyi5akr5zs40k8h2wqxx69j80l-bash-4.4-p23-doc + /nix/store/c5cxjywi66iwn9dcx5yvwjkvl559ay6p-bash-4.4-p23-info + /nix/store/scz72lskj03ihkcn42ias5mlp4i4gr1k-bash-4.4-p23-man + /nix/store/a724znygmd1cac856j3gfsyvih3lw07j-bash-4.4-p23`. )"}; Setting downloadSpeed{ @@ -1357,11 +1364,12 @@ public: Setting upgradeNixStorePathUrl{ this, - "https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix", + "", "upgrade-nix-store-path-url", R"( - Used by `nix upgrade-nix`, the URL of the file that contains the - store paths of the latest Nix release. + Deprecated. This option was used to configure how `nix upgrade-nix` operated. + + Using this setting has no effect. It will be removed in a future release of Determinate Nix. )"}; Setting warnLargePathThreshold{ @@ -1444,6 +1452,16 @@ public: * derivation, or else returns a null pointer. */ const ExternalBuilder * findExternalDerivationBuilderIfSupported(const Derivation & drv); + + Setting hostName{ + this, + "", + "host-name", + R"( + The name of this host for recording build provenance. If unset, the Unix host name is used. + )"}; + + std::optional getHostName(); }; // FIXME: don't use a global variable. @@ -1470,6 +1488,8 @@ std::vector getUserConfigFiles(); */ extern std::string nixVersion; +extern const std::string determinateNixVersion; + /** * @param loadConfig Whether to load configuration from `nix.conf`, `NIX_CONFIG`, etc. May be disabled for unit tests. * @note When using libexpr, and/or libmain, This is not sufficient. See initNix(). diff --git a/src/libstore/include/nix/store/legacy-ssh-store.hh b/src/libstore/include/nix/store/legacy-ssh-store.hh index 994918f90f08..6fd077796044 100644 --- a/src/libstore/include/nix/store/legacy-ssh-store.hh +++ b/src/libstore/include/nix/store/legacy-ssh-store.hh @@ -73,6 +73,11 @@ struct LegacySSHStore : public virtual Store ref openConnection(); + bool includeInProvenance() override + { + return true; + } + void queryPathInfoUncached( const StorePath & path, Callback> callback) noexcept override; @@ -112,11 +117,12 @@ struct LegacySSHStore : public virtual Store StorePath addToStoreFromDump( Source & dump, std::string_view name, - FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive, - ContentAddressMethod hashMethod = FileIngestionMethod::NixArchive, - HashAlgorithm hashAlgo = HashAlgorithm::SHA256, - const StorePathSet & references = StorePathSet(), - RepairFlag repair = NoRepair) override + FileSerialisationMethod dumpMethod, + ContentAddressMethod hashMethod, + HashAlgorithm hashAlgo, + const StorePathSet & references, + RepairFlag repair, + std::shared_ptr provenance) override { unsupported("addToStore"); } diff --git a/src/libstore/include/nix/store/local-store.hh b/src/libstore/include/nix/store/local-store.hh index 7d93d7045f46..40aa7c699e78 100644 --- a/src/libstore/include/nix/store/local-store.hh +++ b/src/libstore/include/nix/store/local-store.hh @@ -6,6 +6,7 @@ #include "nix/store/pathlocks.hh" #include "nix/store/store-api.hh" #include "nix/store/indirect-root-store.hh" +#include "nix/store/active-builds.hh" #include "nix/util/sync.hh" #include @@ -127,7 +128,10 @@ public: StoreReference getReference() const override; }; -class LocalStore : public virtual IndirectRootStore, public virtual GcStore +class LocalStore : public virtual IndirectRootStore, + public virtual GcStore, + public virtual TrackActiveBuildsStore, + public virtual QueryActiveBuildsStore { public: @@ -246,7 +250,8 @@ public: ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) override; + RepairFlag repair, + std::shared_ptr provenance) override; void addTempRoot(const StorePath & path) override; @@ -459,6 +464,24 @@ private: friend struct PathSubstitutionGoal; friend struct DerivationGoal; + +private: + + std::filesystem::path activeBuildsDir; + + struct ActiveBuildFile + { + AutoCloseFD fd; + AutoDelete del; + }; + + Sync> activeBuilds; + + std::vector queryActiveBuilds() override; + + BuildHandle buildStarted(const ActiveBuild & build) override; + + void buildFinished(const BuildHandle & handle) override; }; } // namespace nix diff --git a/src/libstore/include/nix/store/meson.build b/src/libstore/include/nix/store/meson.build index c17d6a9cb5a5..f99cf39040cf 100644 --- a/src/libstore/include/nix/store/meson.build +++ b/src/libstore/include/nix/store/meson.build @@ -10,6 +10,8 @@ config_pub_h = configure_file( ) headers = [ config_pub_h ] + files( + 'active-builds.hh', + 'async-path-writer.hh', 'aws-creds.hh', 'binary-cache-store.hh', 'build-result.hh', @@ -67,6 +69,7 @@ headers = [ config_pub_h ] + files( 'pathlocks.hh', 'posix-fs-canonicalise.hh', 'profiles.hh', + 'provenance.hh', 'realisation.hh', 'references.hh', 'remote-fs-accessor.hh', diff --git a/src/libstore/include/nix/store/path-info.hh b/src/libstore/include/nix/store/path-info.hh index dbcd933f4265..374b90a26ae0 100644 --- a/src/libstore/include/nix/store/path-info.hh +++ b/src/libstore/include/nix/store/path-info.hh @@ -13,6 +13,7 @@ namespace nix { class Store; struct StoreDirConfig; +struct Provenance; /** * JSON format version for path info output. @@ -123,6 +124,12 @@ struct UnkeyedValidPathInfo */ std::optional ca; + /** + * The provenance of this store path, i.e. a link back to the Nix + * expression used to create it. + */ + std::shared_ptr provenance; + UnkeyedValidPathInfo(const UnkeyedValidPathInfo & other) = default; UnkeyedValidPathInfo(const StoreDirConfig & store, Hash narHash); diff --git a/src/libstore/include/nix/store/provenance.hh b/src/libstore/include/nix/store/provenance.hh new file mode 100644 index 000000000000..f742888b362a --- /dev/null +++ b/src/libstore/include/nix/store/provenance.hh @@ -0,0 +1,77 @@ +#pragma once + +#include "nix/util/provenance.hh" +#include "nix/store/path.hh" +#include "nix/store/outputs-spec.hh" + +namespace nix { + +struct BuildProvenance : Provenance +{ + /** + * The derivation that built this path. + */ + StorePath drvPath; + + /** + * The output of the derivation that corresponds to this path. + */ + OutputName output; + + /** + * The hostname of the machine on which the derivation was built, if known. + */ + std::optional buildHost; + + /** + * The system type of the derivation. + */ + std::string system; + + /** + * The provenance of the derivation, if known. + */ + std::shared_ptr next; + + // FIXME: do we need anything extra for CA derivations? + + BuildProvenance( + const StorePath & drvPath, + const OutputName & output, + std::optional buildHost, + std::string system, + std::shared_ptr next) + : drvPath(drvPath) + , output(output) + , buildHost(std::move(buildHost)) + , system(std::move(system)) + , next(std::move(next)) + { + } + + nlohmann::json to_json() const override; +}; + +struct CopiedProvenance : Provenance +{ + /** + * Store URL (typically a binary cache) from which this store + * path was copied. + */ + std::string from; + + /** + * Provenance of the store path in the upstream store, if any. + */ + std::shared_ptr next; + + CopiedProvenance(std::string_view from, std::shared_ptr next) + : from(from) + , next(std::move(next)) + { + } + + nlohmann::json to_json() const override; +}; + +} // namespace nix diff --git a/src/libstore/include/nix/store/remote-store.hh b/src/libstore/include/nix/store/remote-store.hh index b152e054b9d3..3644c55f206e 100644 --- a/src/libstore/include/nix/store/remote-store.hh +++ b/src/libstore/include/nix/store/remote-store.hh @@ -7,6 +7,7 @@ #include "nix/store/store-api.hh" #include "nix/store/gc-store.hh" #include "nix/store/log-store.hh" +#include "nix/store/active-builds.hh" namespace nix { @@ -23,7 +24,7 @@ struct RemoteStoreConfig : virtual StoreConfig using StoreConfig::StoreConfig; const Setting maxConnections{ - this, 1, "max-connections", "Maximum number of concurrent connections to the Nix daemon."}; + this, 64, "max-connections", "Maximum number of concurrent connections to the Nix daemon."}; const Setting maxConnectionAge{ this, @@ -36,7 +37,10 @@ struct RemoteStoreConfig : virtual StoreConfig * \todo RemoteStore is a misnomer - should be something like * DaemonStore. */ -struct RemoteStore : public virtual Store, public virtual GcStore, public virtual LogStore +struct RemoteStore : public virtual Store, + public virtual GcStore, + public virtual LogStore, + public virtual QueryActiveBuildsStore { using Config = RemoteStoreConfig; @@ -78,7 +82,8 @@ struct RemoteStore : public virtual Store, public virtual GcStore, public virtua ContentAddressMethod caMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair); + RepairFlag repair, + std::shared_ptr provenance); /** * Add a content-addressable store path. `dump` will be drained. @@ -86,11 +91,12 @@ struct RemoteStore : public virtual Store, public virtual GcStore, public virtua StorePath addToStoreFromDump( Source & dump, std::string_view name, - FileSerialisationMethod dumpMethod = FileSerialisationMethod::NixArchive, - ContentAddressMethod hashMethod = FileIngestionMethod::NixArchive, - HashAlgorithm hashAlgo = HashAlgorithm::SHA256, - const StorePathSet & references = StorePathSet(), - RepairFlag repair = NoRepair) override; + FileSerialisationMethod dumpMethod, + ContentAddressMethod hashMethod, + HashAlgorithm hashAlgo, + const StorePathSet & references, + RepairFlag repair, + std::shared_ptr provenance) override; void addToStore(const ValidPathInfo & info, Source & nar, RepairFlag repair, CheckSigsFlag checkSigs) override; @@ -143,6 +149,8 @@ struct RemoteStore : public virtual Store, public virtual GcStore, public virtua void addBuildLog(const StorePath & drvPath, std::string_view log) override; + std::vector queryActiveBuilds() override; + std::optional getVersion() override; void connect() override; diff --git a/src/libstore/include/nix/store/s3-binary-cache-store.hh b/src/libstore/include/nix/store/s3-binary-cache-store.hh index 5896293f1c42..e679035e461b 100644 --- a/src/libstore/include/nix/store/s3-binary-cache-store.hh +++ b/src/libstore/include/nix/store/s3-binary-cache-store.hh @@ -3,6 +3,7 @@ #include "nix/store/config.hh" #include "nix/store/http-binary-cache-store.hh" +#include "nix/store/s3-url.hh" namespace nix { @@ -52,13 +53,22 @@ struct S3BinaryCacheStoreConfig : HttpBinaryCacheStoreConfig "endpoint", R"( The S3 endpoint to use. When empty (default), uses AWS S3 with - region-specific endpoints (e.g., s3.us-east-1.amazonaws.com). - For S3-compatible services such as MinIO, set this to your service's endpoint. + region-specific endpoints. For S3-compatible services such as + MinIO, set this to your service's endpoint. + )"}; - > **Note** - > - > Custom endpoints must support HTTPS and use path-based - > addressing instead of virtual host based addressing. + Setting addressingStyle{ + this, + S3AddressingStyle::Auto, + "addressing-style", + R"( + The S3 addressing style to use. `auto` (default) uses + virtual-hosted-style for standard AWS endpoints and path-style + for custom endpoints; bucket names containing dots automatically + fall back to path-style to avoid TLS certificate errors. `path` + forces path-style addressing (deprecated by AWS). `virtual` + forces virtual-hosted-style addressing (bucket names must not + contain dots). )"}; const Setting multipartUpload{ @@ -117,7 +127,7 @@ struct S3BinaryCacheStoreConfig : HttpBinaryCacheStoreConfig * Set of settings that are part of the S3 URI itself. * These are needed for region specification and other S3-specific settings. */ - const std::set s3UriSettings = {&profile, ®ion, &scheme, &endpoint}; + const std::set s3UriSettings = {&profile, ®ion, &scheme, &endpoint, &addressingStyle}; static const std::string name() { diff --git a/src/libstore/include/nix/store/s3-url.hh b/src/libstore/include/nix/store/s3-url.hh index cf59dbea86ad..533710745229 100644 --- a/src/libstore/include/nix/store/s3-url.hh +++ b/src/libstore/include/nix/store/s3-url.hh @@ -1,16 +1,41 @@ #pragma once ///@file #include "nix/store/config.hh" +#include "nix/util/error.hh" #include "nix/util/url.hh" #include "nix/util/util.hh" #include #include +#include #include #include namespace nix { +/** + * S3 addressing style for bucket access. + * - Auto: virtual-hosted-style for standard AWS endpoints, path-style for custom endpoints. + * - Path: always use path-style (bucket in URL path). + * - Virtual: always use virtual-hosted-style (bucket as hostname prefix; bucket name must not contain dots). + */ +enum class S3AddressingStyle { + Auto, + Path, + Virtual, +}; + +MakeError(InvalidS3AddressingStyle, Error); + +S3AddressingStyle parseS3AddressingStyle(std::string_view style); +std::string_view showS3AddressingStyle(S3AddressingStyle style); + +template<> +S3AddressingStyle BaseSetting::parse(const std::string & str) const; + +template<> +std::string BaseSetting::to_string() const; + /** * Parsed S3 URL. */ @@ -27,6 +52,7 @@ struct ParsedS3URL std::optional region; std::optional scheme; std::optional versionId; + std::optional addressingStyle; /** * The endpoint can be either missing, be an absolute URI (with a scheme like `http:`) * or an authority (so an IP address or a registered name). @@ -46,7 +72,8 @@ struct ParsedS3URL static ParsedS3URL parse(const ParsedURL & uri); /** - * Convert this ParsedS3URL to HTTPS ParsedURL for use with curl's AWS SigV4 authentication + * Convert this ParsedS3URL to an HTTP(S) ParsedURL for use with curl's AWS SigV4 authentication. + * The scheme defaults to HTTPS but respects the 'scheme' setting and custom endpoint schemes. */ ParsedURL toHttpsUrl() const; diff --git a/src/libstore/include/nix/store/store-api.hh b/src/libstore/include/nix/store/store-api.hh index db107fc0ce70..f7beb07c30cf 100644 --- a/src/libstore/include/nix/store/store-api.hh +++ b/src/libstore/include/nix/store/store-api.hh @@ -43,6 +43,8 @@ struct SourceAccessor; class NarInfoDiskCache; class Store; +struct Provenance; + typedef std::map OutputPathMap; enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true }; @@ -337,7 +339,9 @@ public: StorePath followLinksToStorePath(std::string_view path) const; /** - * Check whether a path is valid. + * Check whether a path is valid. NOTE: this function does not + * generally cache whether a path is valid. You may want to use + * `maybeQueryPathInfo()`, which does cache. */ bool isValidPath(const StorePath & path); @@ -377,10 +381,17 @@ public: /** * Query information about a valid path. It is permitted to omit - * the name part of the store path. + * the name part of the store path. Throws an exception if the + * path is not valid. */ ref queryPathInfo(const StorePath & path); + /** + * Like `queryPathInfo()`, but returns `nullptr` if the path is + * not valid. + */ + std::shared_ptr maybeQueryPathInfo(const StorePath & path); + /** * Asynchronous version of queryPathInfo(). */ @@ -588,7 +599,8 @@ public: ContentAddressMethod hashMethod = ContentAddressMethod::Raw::NixArchive, HashAlgorithm hashAlgo = HashAlgorithm::SHA256, const StorePathSet & references = StorePathSet(), - RepairFlag repair = NoRepair) = 0; + RepairFlag repair = NoRepair, + std::shared_ptr provenance = nullptr) = 0; /** * Add a mapping indicating that `deriver!outputName` maps to the output path @@ -781,7 +793,8 @@ public: /** * Write a derivation to the Nix store, and return its path. */ - virtual StorePath writeDerivation(const Derivation & drv, RepairFlag repair = NoRepair); + virtual StorePath writeDerivation( + const Derivation & drv, RepairFlag repair = NoRepair, std::shared_ptr provenance = nullptr); /** * Read a derivation (which must already be valid). @@ -906,6 +919,15 @@ public: return {}; } + /** + * Whether, when copying *from* this store, a "copied" provenance + * record should be added. + */ + virtual bool includeInProvenance() + { + return false; + } + protected: Stats stats; @@ -924,9 +946,10 @@ protected: }; /** - * Copy a path from one store to another. + * Copy a path from one store to another. Return the path info of the newly added store path, or nullptr if the path was + * already valid. */ -void copyStorePath( +std::shared_ptr copyStorePath( Store & srcStore, Store & dstStore, const StorePath & storePath, diff --git a/src/libstore/include/nix/store/worker-protocol-connection.hh b/src/libstore/include/nix/store/worker-protocol-connection.hh index 31436395fe79..591e2cf09b5f 100644 --- a/src/libstore/include/nix/store/worker-protocol-connection.hh +++ b/src/libstore/include/nix/store/worker-protocol-connection.hh @@ -41,6 +41,7 @@ struct WorkerProto::BasicConnection return WorkerProto::ReadConn{ .from = from, .version = protoVersion, + .provenance = features.contains(WorkerProto::featureProvenance), }; } @@ -57,6 +58,7 @@ struct WorkerProto::BasicConnection return WorkerProto::WriteConn{ .to = to, .version = protoVersion, + .provenance = features.contains(WorkerProto::featureProvenance), }; } }; diff --git a/src/libstore/include/nix/store/worker-protocol-impl.hh b/src/libstore/include/nix/store/worker-protocol-impl.hh index 26f6b9d44e46..c36145d620d0 100644 --- a/src/libstore/include/nix/store/worker-protocol-impl.hh +++ b/src/libstore/include/nix/store/worker-protocol-impl.hh @@ -45,12 +45,14 @@ struct WorkerProto::Serialise { static T read(const StoreDirConfig & store, WorkerProto::ReadConn conn) { - return CommonProto::Serialise::read(store, CommonProto::ReadConn{.from = conn.from}); + return CommonProto::Serialise::read( + store, CommonProto::ReadConn{.from = conn.from, .shortStorePaths = conn.shortStorePaths}); } static void write(const StoreDirConfig & store, WorkerProto::WriteConn conn, const T & t) { - CommonProto::Serialise::write(store, CommonProto::WriteConn{.to = conn.to}, t); + CommonProto::Serialise::write( + store, CommonProto::WriteConn{.to = conn.to, .shortStorePaths = conn.shortStorePaths}, t); } }; diff --git a/src/libstore/include/nix/store/worker-protocol.hh b/src/libstore/include/nix/store/worker-protocol.hh index 87ef2a399840..8ae2d261a9e9 100644 --- a/src/libstore/include/nix/store/worker-protocol.hh +++ b/src/libstore/include/nix/store/worker-protocol.hh @@ -67,6 +67,8 @@ struct WorkerProto { Source & from; Version version; + bool shortStorePaths = false; + bool provenance = false; }; /** @@ -77,6 +79,8 @@ struct WorkerProto { Sink & to; Version version; + bool shortStorePaths = false; + bool provenance = false; }; /** @@ -137,6 +141,9 @@ struct WorkerProto using Feature = std::string; using FeatureSet = std::set>; + static constexpr std::string_view featureQueryActiveBuilds{"queryActiveBuilds"}; + static constexpr std::string_view featureProvenance{"provenance"}; + static const FeatureSet allFeatures; }; @@ -185,6 +192,7 @@ enum struct WorkerProto::Op : uint64_t { AddBuildLog = 45, BuildPathsWithResults = 46, AddPermRoot = 47, + QueryActiveBuilds = 48, }; struct WorkerProto::ClientHandshakeInfo diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc index 1a38cac3b7f7..b8f8c6dbdea6 100644 --- a/src/libstore/local-fs-store.cc +++ b/src/libstore/local-fs-store.cc @@ -55,7 +55,7 @@ struct LocalStoreAccessor : PosixSourceAccessor void requireStoreObject(const CanonPath & path) { auto [storePath, rest] = store->toStorePath(store->storeDir + path.abs()); - if (requireValidPath && !store->isValidPath(storePath)) + if (requireValidPath && !store->maybeQueryPathInfo(storePath)) throw InvalidPath("path '%1%' is not a valid store path", store->printStorePath(storePath)); } diff --git a/src/libstore/local-store-active-builds.cc b/src/libstore/local-store-active-builds.cc new file mode 100644 index 000000000000..25c6ece58970 --- /dev/null +++ b/src/libstore/local-store-active-builds.cc @@ -0,0 +1,282 @@ +#include "nix/store/local-store.hh" +#include "nix/util/json-utils.hh" +#ifdef __linux__ +# include "nix/util/cgroup.hh" +# include +# include +# include +#endif + +#ifdef __APPLE__ +# include +# include +# include +#endif + +#include +#include + +namespace nix { + +#ifdef __linux__ +static ActiveBuildInfo::ProcessInfo getProcessInfo(pid_t pid) +{ + ActiveBuildInfo::ProcessInfo info; + info.pid = pid; + info.argv = + tokenizeString>(readFile(fmt("/proc/%d/cmdline", pid)), std::string("\000", 1)); + + auto statPath = fmt("/proc/%d/stat", pid); + + AutoCloseFD statFd = open(statPath.c_str(), O_RDONLY | O_CLOEXEC); + if (!statFd) + throw SysError("opening '%s'", statPath); + + // Get the UID from the ownership of the stat file. + struct stat st; + if (fstat(statFd.get(), &st) == -1) + throw SysError("getting ownership of '%s'", statPath); + info.user = UserInfo::fromUid(st.st_uid); + + // Read /proc/[pid]/stat for parent PID and CPU times. + // Format: pid (comm) state ppid ... + // Note that the comm field can contain spaces, so use a regex to parse it. + auto statContent = trim(readFile(statFd.get())); + static std::regex statRegex(R"((\d+) \(([^)]*)\) (.*))"); + std::smatch match; + if (!std::regex_match(statContent, match, statRegex)) + throw Error("failed to parse /proc/%d/stat", pid); + + // Parse the remaining fields after (comm). + auto remainingFields = tokenizeString>(match[3].str()); + + if (remainingFields.size() > 1) + info.parentPid = string2Int(remainingFields[1]).value_or(0); + + static long clkTck = sysconf(_SC_CLK_TCK); + if (remainingFields.size() > 14 && clkTck > 0) { + if (auto utime = string2Int(remainingFields[11])) + info.utime = std::chrono::microseconds((*utime * 1'000'000) / clkTck); + if (auto stime = string2Int(remainingFields[12])) + info.stime = std::chrono::microseconds((*stime * 1'000'000) / clkTck); + if (auto cutime = string2Int(remainingFields[13])) + info.cutime = std::chrono::microseconds((*cutime * 1'000'000) / clkTck); + if (auto cstime = string2Int(remainingFields[14])) + info.cstime = std::chrono::microseconds((*cstime * 1'000'000) / clkTck); + } + + return info; +} + +/** + * Recursively get all descendant PIDs of a given PID using /proc/[pid]/task/[pid]/children. + */ +static std::set getDescendantPids(pid_t pid) +{ + std::set descendants; + + [&](this auto self, pid_t pid) -> void { + try { + descendants.insert(pid); + for (const auto & childPidStr : + tokenizeString>(readFile(fmt("/proc/%d/task/%d/children", pid, pid)))) + if (auto childPid = string2Int(childPidStr)) + self(*childPid); + } catch (...) { + // Process may have exited. + ignoreExceptionExceptInterrupt(); + } + }(pid); + + return descendants; +} +#endif + +#ifdef __APPLE__ +static ActiveBuildInfo::ProcessInfo getProcessInfo(pid_t pid) +{ + ActiveBuildInfo::ProcessInfo info; + info.pid = pid; + + // Get basic process info including ppid and uid. + struct proc_bsdinfo procInfo; + if (proc_pidinfo(pid, PROC_PIDTBSDINFO, 0, &procInfo, sizeof(procInfo)) != sizeof(procInfo)) + throw SysError("getting process info for pid %d", pid); + + info.parentPid = procInfo.pbi_ppid; + info.user = UserInfo::fromUid(procInfo.pbi_uid); + + // Get CPU times. + struct proc_taskinfo taskInfo; + if (proc_pidinfo(pid, PROC_PIDTASKINFO, 0, &taskInfo, sizeof(taskInfo)) == sizeof(taskInfo)) { + + mach_timebase_info_data_t timebase; + mach_timebase_info(&timebase); + auto nanosecondsPerTick = (double) timebase.numer / (double) timebase.denom; + + // Convert nanoseconds to microseconds. + info.utime = + std::chrono::microseconds((uint64_t) ((double) taskInfo.pti_total_user * nanosecondsPerTick / 1000)); + info.stime = + std::chrono::microseconds((uint64_t) ((double) taskInfo.pti_total_system * nanosecondsPerTick / 1000)); + } + + // Get argv using sysctl. + int mib[3] = {CTL_KERN, KERN_PROCARGS2, pid}; + size_t size = 0; + + // First call to get size. + if (sysctl(mib, 3, nullptr, &size, nullptr, 0) == 0 && size > 0) { + std::vector buffer(size); + if (sysctl(mib, 3, buffer.data(), &size, nullptr, 0) == 0) { + // Format: argc (int), followed by executable path, followed by null-terminated args + if (size >= sizeof(int)) { + int argc; + memcpy(&argc, buffer.data(), sizeof(argc)); + + // Skip past argc and executable path (null-terminated). + size_t pos = sizeof(int); + while (pos < size && buffer[pos] != '\0') + pos++; + pos++; // Skip the null terminator + + // Parse the arguments. + while (pos < size && info.argv.size() < (size_t) argc) { + size_t argStart = pos; + while (pos < size && buffer[pos] != '\0') + pos++; + + if (pos > argStart) + info.argv.emplace_back(buffer.data() + argStart, pos - argStart); + + pos++; // Skip the null terminator + } + } + } + } + + return info; +} + +/** + * Recursively get all descendant PIDs using sysctl with KERN_PROC. + */ +static std::set getDescendantPids(pid_t startPid) +{ + // Get all processes. + int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_ALL, 0}; + size_t size = 0; + + if (sysctl(mib, 4, nullptr, &size, nullptr, 0) == -1) + return {startPid}; + + std::vector procs(size / sizeof(struct kinfo_proc)); + if (sysctl(mib, 4, procs.data(), &size, nullptr, 0) == -1) + return {startPid}; + + // Get the children of all processes. + std::map> children; + size_t count = size / sizeof(struct kinfo_proc); + for (size_t i = 0; i < count; i++) { + pid_t childPid = procs[i].kp_proc.p_pid; + pid_t parentPid = procs[i].kp_eproc.e_ppid; + children[parentPid].insert(childPid); + } + + // Get all children of `pid`. + std::set descendants; + std::queue todo; + todo.push(startPid); + while (auto pid = pop(todo)) { + if (!descendants.insert(*pid).second) + continue; + for (auto & child : children[*pid]) + todo.push(child); + } + + return descendants; +} +#endif + +std::vector LocalStore::queryActiveBuilds() +{ + std::vector result; + + for (auto & entry : DirectoryIterator{activeBuildsDir}) { + auto path = entry.path(); + + try { + // Open the file. If we can lock it, the build is not active. + auto fd = openLockFile(path, false); + if (!fd || lockFile(fd.get(), ltRead, false)) { + AutoDelete(path, false); + continue; + } + + ActiveBuildInfo info(nlohmann::json::parse(readFile(fd.get())).get()); + +#if defined(__linux__) || defined(__APPLE__) + /* Read process information. */ + try { +# ifdef __linux__ + if (info.cgroup) { + for (auto pid : getPidsInCgroup(*info.cgroup)) + info.processes.push_back(getProcessInfo(pid)); + + /* Read CPU statistics from the cgroup. */ + auto stats = getCgroupStats(*info.cgroup); + info.utime = stats.cpuUser; + info.stime = stats.cpuSystem; + } else +# endif + { + for (auto pid : getDescendantPids(info.mainPid)) + info.processes.push_back(getProcessInfo(pid)); + } + } catch (...) { + ignoreExceptionExceptInterrupt(); + } +#endif + + result.push_back(std::move(info)); + } catch (...) { + ignoreExceptionExceptInterrupt(); + } + } + + return result; +} + +LocalStore::BuildHandle LocalStore::buildStarted(const ActiveBuild & build) +{ + // Write info about the active build to the active-builds directory where it can be read by `queryBuilds()`. + static std::atomic nextId{1}; + + auto id = nextId++; + + auto infoFileName = fmt("%d-%d", getpid(), id); + auto infoFilePath = activeBuildsDir / infoFileName; + + auto infoFd = openLockFile(infoFilePath, true); + + // Lock the file to denote that the build is active. + lockFile(infoFd.get(), ltWrite, true); + + writeFile(infoFilePath, nlohmann::json(build).dump(), 0600, FsSync::Yes); + + activeBuilds.lock()->emplace( + id, + ActiveBuildFile{ + .fd = std::move(infoFd), + .del = AutoDelete(infoFilePath, false), + }); + + return BuildHandle(*this, id); +} + +void LocalStore::buildFinished(const BuildHandle & handle) +{ + activeBuilds.lock()->erase(handle.id); +} + +} // namespace nix diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index b625b6c1bf2a..047fb30f84a1 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -20,6 +20,7 @@ #include "nix/util/users.hh" #include "nix/store/store-open.hh" #include "nix/store/store-registration.hh" +#include "nix/util/provenance.hh" #include #include @@ -125,6 +126,7 @@ LocalStore::LocalStore(ref config) , schemaPath(dbDir + "/schema") , tempRootsDir(config->stateDir + "/temproots") , fnTempRoots(fmt("%s/%d", tempRootsDir, getpid())) + , activeBuildsDir(config->stateDir + "/active-builds") { auto state(_state->lock()); state->stmts = std::make_unique(); @@ -146,6 +148,7 @@ LocalStore::LocalStore(ref config) createDirs(gcRootsDir); replaceSymlink(profilesDir, gcRootsDir + "/profiles"); } + createDirs(activeBuildsDir); for (auto & perUserDir : {profilesDir + "/per-user", gcRootsDir + "/per-user"}) { createDirs(perUserDir); @@ -334,13 +337,16 @@ LocalStore::LocalStore(ref config) /* Prepare SQL statements. */ state->stmts->RegisterValidPath.create( state->db, - "insert into ValidPaths (path, hash, registrationTime, deriver, narSize, ultimate, sigs, ca) values (?, ?, ?, ?, ?, ?, ?, ?);"); + fmt("insert into ValidPaths (path, hash, registrationTime, deriver, narSize, ultimate, sigs, ca%s) values (?, ?, ?, ?, ?, ?, ?, ?%s);", + experimentalFeatureSettings.isEnabled(Xp::Provenance) ? ", provenance" : "", + experimentalFeatureSettings.isEnabled(Xp::Provenance) ? ", ?" : "")); state->stmts->UpdatePathInfo.create( state->db, "update ValidPaths set narSize = ?, hash = ?, ultimate = ?, sigs = ?, ca = ? where path = ?;"); state->stmts->AddReference.create(state->db, "insert or replace into Refs (referrer, reference) values (?, ?);"); state->stmts->QueryPathInfo.create( state->db, - "select id, hash, registrationTime, deriver, narSize, ultimate, sigs, ca from ValidPaths where path = ?;"); + fmt("select id, hash, registrationTime, deriver, narSize, ultimate, sigs, ca%s from ValidPaths where path = ?;", + experimentalFeatureSettings.isEnabled(Xp::Provenance) ? ", provenance" : "")); state->stmts->QueryReferences.create( state->db, "select path from Refs join ValidPaths on reference = id where referrer = ?;"); state->stmts->QueryReferrers.create( @@ -595,6 +601,9 @@ void LocalStore::upgradeDBSchema(State & state) "20220326-ca-derivations", #include "ca-specific-schema.sql.gen.hh" ); + + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) + doUpgrade("20241024-provenance", "alter table ValidPaths add column provenance text"); } /* To improve purity, users may want to make the Nix store a read-only @@ -690,13 +699,14 @@ uint64_t LocalStore::addValidPath(State & state, const ValidPathInfo & info, boo "cannot add path '%s' to the Nix store because it claims to be content-addressed but isn't", printStorePath(info.path)); - state.stmts->RegisterValidPath - .use()(printStorePath(info.path))(info.narHash.to_string(HashFormat::Base16, true))( - info.registrationTime == 0 ? time(0) : info.registrationTime)( - info.deriver ? printStorePath(*info.deriver) : "", - (bool) info.deriver)(info.narSize, info.narSize != 0)(info.ultimate ? 1 : 0, info.ultimate)( - concatStringsSep(" ", info.sigs), !info.sigs.empty())(renderContentAddress(info.ca), (bool) info.ca) - .exec(); + auto query = state.stmts->RegisterValidPath.use()(printStorePath(info.path))( + info.narHash.to_string(HashFormat::Base16, true))(info.registrationTime == 0 ? time(0) : info.registrationTime)( + info.deriver ? printStorePath(*info.deriver) : "", + (bool) info.deriver)(info.narSize, info.narSize != 0)(info.ultimate ? 1 : 0, info.ultimate)( + concatStringsSep(" ", info.sigs), !info.sigs.empty())(renderContentAddress(info.ca), (bool) info.ca); + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) + query(info.provenance ? info.provenance->to_json_str() : "", (bool) info.provenance); + query.exec(); uint64_t id = state.db.getLastInsertedRowId(); /* If this is a derivation, then store the derivation outputs in @@ -786,6 +796,12 @@ std::shared_ptr LocalStore::queryPathInfoInternal(State & s while (useQueryReferences.next()) info->references.insert(parseStorePath(useQueryReferences.getStr(0))); + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) { + auto prov = (const char *) sqlite3_column_text(state.stmts->QueryPathInfo, 8); + if (prov) + info->provenance = Provenance::from_json_str(prov); + } + return info; } @@ -1167,7 +1183,8 @@ StorePath LocalStore::addToStoreFromDump( ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) + RepairFlag repair, + std::shared_ptr provenance) { /* For computing the store path. */ auto hashSink = std::make_unique(hashAlgo); @@ -1311,6 +1328,7 @@ StorePath LocalStore::addToStoreFromDump( auto info = ValidPathInfo::makeFromCA(*this, name, std::move(desc), narHash.hash); info.narSize = narHash.numBytesDigested; + info.provenance = provenance; registerValidPath(info); } diff --git a/src/libstore/meson.build b/src/libstore/meson.build index 088d8bcbb591..7a53fd65d86a 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -13,6 +13,8 @@ project( license : 'LGPL-2.1-or-later', ) +fs = import('fs') + cxx = meson.get_compiler('cpp') subdir('nix-meson-build-support/deps-lists') @@ -22,6 +24,12 @@ configdata_priv = configuration_data() # TODO rename, because it will conflict with downstream projects configdata_priv.set_quoted('PACKAGE_VERSION', meson.project_version()) +configdata_priv.set('IS_STATIC', get_option('default_library') == 'static') + +configdata_priv.set_quoted( + 'DETERMINATE_NIX_VERSION', + fs.read('../../.version-determinate').strip(), +) subdir('nix-meson-build-support/default-system-cpu') @@ -202,8 +210,6 @@ if get_option('embedded-sandbox-shell') generated_headers += embedded_sandbox_shell_gen endif -fs = import('fs') - prefix = get_option('prefix') # For each of these paths, assume that it is relative to the prefix unless # it is already an absolute path (which is the default for store-dir, localstatedir, and log-dir). @@ -261,6 +267,19 @@ configdata_priv.set_quoted( : 'lsof', ) +link_args = [] + +wasmtime_required = get_option('wasm').disable_if( + get_option('default_library') == 'static', + error_message : 'Building with wasmtime and static linking is not supported', +) + +if wasmtime_required.enabled() + link_args += '-lwasmtime' +endif + +configdata_priv.set('NIX_USE_WASMTIME', wasmtime_required.enabled().to_int()) + config_priv_h = configure_file( configuration : configdata_priv, output : 'store-config-private.hh', @@ -269,6 +288,8 @@ config_priv_h = configure_file( subdir('nix-meson-build-support/common') sources = files( + 'active-builds.cc', + 'async-path-writer.cc', 'binary-cache-store.cc', 'build-result.cc', 'build/derivation-builder.cc', @@ -307,6 +328,7 @@ sources = files( 'local-binary-cache-store.cc', 'local-fs-store.cc', 'local-overlay-store.cc', + 'local-store-active-builds.cc', 'local-store.cc', 'log-store.cc', 'machines.cc', @@ -325,6 +347,7 @@ sources = files( 'pathlocks.cc', 'posix-fs-canonicalise.cc', 'profiles.cc', + 'provenance.cc', 'realisation.cc', 'references.cc', 'remote-fs-accessor.cc', @@ -374,8 +397,8 @@ this_library = library( soversion : nix_soversion, dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, - link_args : linker_export_flags, - prelink : true, # For C++ static initializers + link_args : linker_export_flags + link_args, + prelink : prelink, # For C++ static initializers install : true, cpp_pch : do_pch ? [ 'pch/precompiled-headers.hh' ] : [], ) diff --git a/src/libstore/meson.options b/src/libstore/meson.options index c822133df46e..6bae2ab11f17 100644 --- a/src/libstore/meson.options +++ b/src/libstore/meson.options @@ -39,3 +39,9 @@ option( type : 'feature', description : 'build support for AWS authentication with S3', ) + +option( + 'wasm', + type : 'feature', + description : 'enable wasmtime integration into the Nix derivation builder', +) diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index c32c6cd2b31b..8bdf92021f4a 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -3,6 +3,7 @@ #include "nix/util/sync.hh" #include "nix/store/sqlite.hh" #include "nix/store/globals.hh" +#include "nix/store/provenance.hh" #include #include @@ -36,6 +37,7 @@ create table if not exists NARs ( deriver text, sigs text, ca text, + provenance text, timestamp integer not null, present integer not null, primary key (cache, hashPart), @@ -65,9 +67,6 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache /* How often to purge expired entries from the cache. */ const int purgeInterval = 24 * 3600; - /* How long to cache binary cache info (i.e. /nix-cache-info) */ - const int cacheInfoTtl = 7 * 24 * 3600; - struct Cache { int id; @@ -86,7 +85,7 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache Sync _state; - NarInfoDiskCacheImpl(Path dbPath = (getCacheDir() / "binary-cache-v7.sqlite").string()) + NarInfoDiskCacheImpl(Path dbPath = (getCacheDir() / "binary-cache-v8.sqlite").string()) { auto state(_state.lock()); @@ -109,14 +108,14 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache state->insertNAR.create( state->db, "insert or replace into NARs(cache, hashPart, namePart, url, compression, fileHash, fileSize, narHash, " - "narSize, refs, deriver, sigs, ca, timestamp, present) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)"); + "narSize, refs, deriver, sigs, ca, provenance, timestamp, present) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)"); state->insertMissingNAR.create( state->db, "insert or replace into NARs(cache, hashPart, timestamp, present) values (?, ?, ?, 0)"); state->queryNAR.create( state->db, - "select present, namePart, url, compression, fileHash, fileSize, narHash, narSize, refs, deriver, sigs, ca from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))"); + "select present, namePart, url, compression, fileHash, fileSize, narHash, narSize, refs, deriver, sigs, ca, provenance from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))"); state->insertRealisation.create( state->db, @@ -182,7 +181,7 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache { auto i = state.caches.find(uri); if (i == state.caches.end()) { - auto queryCache(state.queryCache.use()(uri)(time(0) - cacheInfoTtl)); + auto queryCache(state.queryCache.use()(uri)(time(0) - settings.ttlNarInfoCacheMeta)); if (!queryCache.next()) return std::nullopt; auto cache = Cache{ @@ -279,6 +278,8 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache for (auto & sig : tokenizeString(queryNAR.getStr(10), " ")) narInfo->sigs.insert(sig); narInfo->ca = ContentAddress::parseOpt(queryNAR.getStr(11)); + if (experimentalFeatureSettings.isEnabled(Xp::Provenance) && !queryNAR.isNull(12)) + narInfo->provenance = Provenance::from_json_str_optional(queryNAR.getStr(12)); return {oValid, narInfo}; }); @@ -337,8 +338,10 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache narInfo && narInfo->fileHash)( narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize)(info->narHash.to_string( HashFormat::Nix32, true))(info->narSize)(concatStringsSep(" ", info->shortRefs()))( - info->deriver ? std::string(info->deriver->to_string()) : "", (bool) info->deriver)( - concatStringsSep(" ", info->sigs))(renderContentAddress(info->ca))(time(0)) + info->deriver ? std::string(info->deriver->to_string()) : "", + (bool) info->deriver)(concatStringsSep(" ", info->sigs))(renderContentAddress(info->ca))( + info->provenance ? info->provenance->to_json_str() : "", + experimentalFeatureSettings.isEnabled(Xp::Provenance) && info->provenance)(time(0)) .exec(); } else { diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc index 27ed5a76143b..ef72987699d9 100644 --- a/src/libstore/nar-info.cc +++ b/src/libstore/nar-info.cc @@ -3,6 +3,7 @@ #include "nix/store/store-api.hh" #include "nix/util/strings.hh" #include "nix/util/json-utils.hh" +#include "nix/util/provenance.hh" namespace nix { @@ -84,7 +85,8 @@ NarInfo::NarInfo(const StoreDirConfig & store, const std::string & s, const std: throw corrupt("extra CA"); // FIXME: allow blank ca or require skipping field? ca = ContentAddress::parseOpt(value); - } + } else if (name == "Provenance" && experimentalFeatureSettings.isEnabled(Xp::Provenance)) + provenance = Provenance::from_json_str(value); pos = eol + 1; line += 1; @@ -129,6 +131,9 @@ std::string NarInfo::to_string(const StoreDirConfig & store) const if (ca) res += "CA: " + renderContentAddress(*ca) + "\n"; + if (provenance && experimentalFeatureSettings.isEnabled(Xp::Provenance)) + res += "Provenance: " + provenance->to_json_str() + "\n"; + return res; } diff --git a/src/libstore/package.nix b/src/libstore/package.nix index b451b4041463..a660b9a4646a 100644 --- a/src/libstore/package.nix +++ b/src/libstore/package.nix @@ -4,7 +4,7 @@ mkMesonLibrary, unixtools, - darwin, + apple-sdk, nix-util, boost, @@ -13,6 +13,7 @@ libseccomp, nlohmann_json, sqlite, + wasmtime, busybox-sandbox-shell ? null, @@ -20,11 +21,13 @@ version, - embeddedSandboxShell ? stdenv.hostPlatform.isStatic, + embeddedSandboxShell ? stdenv.hostPlatform.isStatic && !stdenv.hostPlatform.isDarwin, withAWS ? # Default is this way because there have been issues building this dependency stdenv.hostPlatform == stdenv.buildPlatform && (stdenv.isLinux || stdenv.isDarwin), + + enableWasm ? !stdenv.hostPlatform.isStatic, }: let @@ -32,15 +35,17 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-store"; + pname = "determinate-nix-store"; inherit version; workDir = ./.; fileset = fileset.unions [ ../../nix-meson-build-support ./nix-meson-build-support + # FIXME: get rid of these symlinks. ../../.version ./.version + ../../.version-determinate ./meson.build ./meson.options ./include/nix/store/meson.build @@ -64,7 +69,8 @@ mkMesonLibrary (finalAttrs: { sqlite ] ++ lib.optional stdenv.hostPlatform.isLinux libseccomp - ++ lib.optional withAWS aws-crt-cpp; + ++ lib.optional withAWS aws-crt-cpp + ++ lib.optional enableWasm wasmtime; propagatedBuildInputs = [ nix-util @@ -75,6 +81,7 @@ mkMesonLibrary (finalAttrs: { (lib.mesonEnable "seccomp-sandboxing" stdenv.hostPlatform.isLinux) (lib.mesonBool "embedded-sandbox-shell" embeddedSandboxShell) (lib.mesonEnable "s3-aws-auth" withAWS) + (lib.mesonEnable "wasm" enableWasm) ] ++ lib.optionals stdenv.hostPlatform.isLinux [ (lib.mesonOption "sandbox-shell" "${busybox-sandbox-shell}/bin/busybox") diff --git a/src/libstore/path-info.cc b/src/libstore/path-info.cc index ebab52cec81f..c5a9eba16c86 100644 --- a/src/libstore/path-info.cc +++ b/src/libstore/path-info.cc @@ -5,6 +5,7 @@ #include "nix/util/json-utils.hh" #include "nix/util/comparator.hh" #include "nix/util/strings.hh" +#include "nix/util/provenance.hh" namespace nix { @@ -214,6 +215,9 @@ UnkeyedValidPathInfo::toJSON(const StoreDirConfig * store, bool includeImpureInf auto & sigsObj = jsonObject["signatures"] = json::array(); for (auto & sig : sigs) sigsObj.push_back(sig); + + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) + jsonObject["provenance"] = provenance ? provenance->to_json() : nullptr; } return jsonObject; @@ -289,6 +293,12 @@ UnkeyedValidPathInfo UnkeyedValidPathInfo::fromJSON(const StoreDirConfig * store if (auto * rawSignatures = optionalValueAt(json, "signatures")) res.sigs = getStringSet(*rawSignatures); + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) { + auto prov = json.find("provenance"); + if (prov != json.end() && !prov->second.is_null()) + res.provenance = Provenance::from_json(prov->second); + } + return res; } diff --git a/src/libstore/provenance.cc b/src/libstore/provenance.cc new file mode 100644 index 000000000000..0fa38658d769 --- /dev/null +++ b/src/libstore/provenance.cc @@ -0,0 +1,52 @@ +#include "nix/store/provenance.hh" +#include "nix/util/json-utils.hh" + +namespace nix { + +nlohmann::json BuildProvenance::to_json() const +{ + return { + {"type", "build"}, + {"drv", drvPath.to_string()}, + {"output", output}, + {"buildHost", buildHost}, + {"system", system}, + {"next", next ? next->to_json() : nlohmann::json(nullptr)}, + }; +} + +Provenance::Register registerBuildProvenance("build", [](nlohmann::json json) { + auto & obj = getObject(json); + std::shared_ptr next; + if (auto p = optionalValueAt(obj, "next"); p && !p->is_null()) + next = Provenance::from_json(*p); + std::optional buildHost; + if (auto p = optionalValueAt(obj, "buildHost")) + buildHost = p->get>(); + auto buildProv = make_ref( + StorePath(getString(valueAt(obj, "drv"))), + getString(valueAt(obj, "output")), + buildHost, + getString(valueAt(obj, "system")), + next); + return buildProv; +}); + +nlohmann::json CopiedProvenance::to_json() const +{ + return { + {"type", "copied"}, + {"from", from}, + {"next", next ? next->to_json() : nlohmann::json(nullptr)}, + }; +} + +Provenance::Register registerCopiedProvenance("copied", [](nlohmann::json json) { + auto & obj = getObject(json); + std::shared_ptr next; + if (auto p = optionalValueAt(obj, "next"); p && !p->is_null()) + next = Provenance::from_json(*p); + return make_ref(getString(valueAt(obj, "from")), next); +}); + +} // namespace nix diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 6d1204570d8b..0274df18cbbc 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -18,6 +18,7 @@ #include "nix/util/callback.hh" #include "nix/store/filetransfer.hh" #include "nix/util/signals.hh" +#include "nix/util/provenance.hh" #include @@ -71,8 +72,11 @@ void RemoteStore::initConnection(Connection & conn) StringSink saved; TeeSource tee(conn.from, saved); try { + auto myFeatures = WorkerProto::allFeatures; + if (!experimentalFeatureSettings.isEnabled(Xp::Provenance)) + myFeatures.erase(std::string(WorkerProto::featureProvenance)); auto [protoVersion, features] = - WorkerProto::BasicClientConnection::handshake(conn.to, tee, PROTOCOL_VERSION, WorkerProto::allFeatures); + WorkerProto::BasicClientConnection::handshake(conn.to, tee, PROTOCOL_VERSION, myFeatures); if (protoVersion < MINIMUM_PROTOCOL_VERSION) throw Error("the Nix daemon version is too old"); conn.protoVersion = protoVersion; @@ -311,7 +315,8 @@ ref RemoteStore::addCAToStore( ContentAddressMethod caMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) + RepairFlag repair, + std::shared_ptr provenance) { std::optional conn_(getConnection()); auto & conn = *conn_; @@ -321,6 +326,8 @@ ref RemoteStore::addCAToStore( conn->to << WorkerProto::Op::AddToStore << name << caMethod.renderWithAlgo(hashAlgo); WorkerProto::write(*this, *conn, references); conn->to << repair; + if (conn->features.contains(WorkerProto::featureProvenance)) + conn->to << (provenance ? provenance->to_json_str() : ""); // The dump source may invoke the store, so we need to make some room. connections->incCapacity(); @@ -398,7 +405,8 @@ StorePath RemoteStore::addToStoreFromDump( ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) + RepairFlag repair, + std::shared_ptr provenance) { FileSerialisationMethod fsm; switch (hashMethod.getFileIngestionMethod()) { @@ -417,7 +425,7 @@ StorePath RemoteStore::addToStoreFromDump( } if (fsm != dumpMethod) unsupported("RemoteStore::addToStoreFromDump doesn't support this `dumpMethod` `hashMethod` combination"); - auto storePath = addCAToStore(dump, name, hashMethod, hashAlgo, references, repair)->path; + auto storePath = addCAToStore(dump, name, hashMethod, hashAlgo, references, repair, provenance)->path; invalidatePathInfoCacheFor(storePath); return storePath; } @@ -431,8 +439,10 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, Repair WorkerProto::write(*this, *conn, info.deriver); conn->to << info.narHash.to_string(HashFormat::Base16, false); WorkerProto::write(*this, *conn, info.references); - conn->to << info.registrationTime << info.narSize << info.ultimate << info.sigs << renderContentAddress(info.ca) - << repair << !checkSigs; + conn->to << info.registrationTime << info.narSize << info.ultimate << info.sigs << renderContentAddress(info.ca); + if (conn->features.contains(WorkerProto::featureProvenance)) + conn->to << (info.provenance ? info.provenance->to_json_str() : ""); + conn->to << repair << !checkSigs; if (GET_PROTOCOL_MINOR(conn->protoVersion) >= 23) { conn.withFramedSink([&](Sink & sink) { copyNAR(source, sink); }); @@ -769,6 +779,16 @@ void RemoteStore::addBuildLog(const StorePath & drvPath, std::string_view log) readInt(conn->from); } +std::vector RemoteStore::queryActiveBuilds() +{ + auto conn(getConnection()); + if (!conn->features.count(WorkerProto::featureQueryActiveBuilds)) + throw Error("remote store does not support querying active builds"); + conn->to << WorkerProto::Op::QueryActiveBuilds; + conn.processStderr(); + return nlohmann::json::parse(readString(conn->from)).get>(); +} + std::optional RemoteStore::getVersion() { auto conn(getConnection()); diff --git a/src/libstore/restricted-store.cc b/src/libstore/restricted-store.cc index ef8aaa3801d9..e4602f69c060 100644 --- a/src/libstore/restricted-store.cc +++ b/src/libstore/restricted-store.cc @@ -98,7 +98,8 @@ struct RestrictedStore : public virtual IndirectRootStore, public virtual GcStor ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) override; + RepairFlag repair, + std::shared_ptr provenance) override; void narFromPath(const StorePath & path, Sink & sink) override; @@ -215,9 +216,10 @@ StorePath RestrictedStore::addToStoreFromDump( ContentAddressMethod hashMethod, HashAlgorithm hashAlgo, const StorePathSet & references, - RepairFlag repair) + RepairFlag repair, + std::shared_ptr provenance) { - auto path = next->addToStoreFromDump(dump, name, dumpMethod, hashMethod, hashAlgo, references, repair); + auto path = next->addToStoreFromDump(dump, name, dumpMethod, hashMethod, hashAlgo, references, repair, provenance); goal.addDependency(path); return path; } diff --git a/src/libstore/s3-url.cc b/src/libstore/s3-url.cc index 503c0cd91059..e6b5553661a6 100644 --- a/src/libstore/s3-url.cc +++ b/src/libstore/s3-url.cc @@ -1,8 +1,14 @@ #include "nix/store/s3-url.hh" +#include "nix/util/abstract-setting-to-json.hh" +#include "nix/util/config-impl.hh" #include "nix/util/error.hh" +#include "nix/util/logging.hh" +#include "nix/util/json-impls.hh" #include "nix/util/split.hh" #include "nix/util/strings-inline.hh" +#include +#include #include #include @@ -10,6 +16,30 @@ using namespace std::string_view_literals; namespace nix { +S3AddressingStyle parseS3AddressingStyle(std::string_view style) +{ + if (style == "auto") + return S3AddressingStyle::Auto; + if (style == "path") + return S3AddressingStyle::Path; + if (style == "virtual") + return S3AddressingStyle::Virtual; + throw InvalidS3AddressingStyle("unknown S3 addressing style '%s', expected 'auto', 'path', or 'virtual'", style); +} + +std::string_view showS3AddressingStyle(S3AddressingStyle style) +{ + switch (style) { + case S3AddressingStyle::Auto: + return "auto"; + case S3AddressingStyle::Path: + return "path"; + case S3AddressingStyle::Virtual: + return "virtual"; + } + unreachable(); +} + ParsedS3URL ParsedS3URL::parse(const ParsedURL & parsed) try { if (parsed.scheme != "s3"sv) @@ -49,6 +79,9 @@ try { .region = getOptionalParam("region"), .scheme = getOptionalParam("scheme"), .versionId = getOptionalParam("versionId"), + .addressingStyle = getOptionalParam("addressing-style").transform([](const std::string & s) { + return parseS3AddressingStyle(s); + }), .endpoint = [&]() -> decltype(ParsedS3URL::endpoint) { if (!endpoint) return std::monostate(); @@ -65,6 +98,9 @@ try { } catch (BadURL & e) { e.addTrace({}, "while parsing S3 URI: '%s'", parsed.to_string()); throw; +} catch (InvalidS3AddressingStyle & e) { + e.addTrace({}, "while parsing S3 URI: '%s'", parsed.to_string()); + throw; } ParsedURL ParsedS3URL::toHttpsUrl() const @@ -80,41 +116,95 @@ ParsedURL ParsedS3URL::toHttpsUrl() const queryParams["versionId"] = *versionId; } + auto style = addressingStyle.value_or(S3AddressingStyle::Auto); + + // Virtual-hosted-style prepends the bucket name to the hostname, so bucket + // names containing dots produce multi-level subdomains (e.g. + // my.bucket.s3.amazonaws.com) that break TLS wildcard certificate validation. + // In auto mode, fall back to path-style; only error on explicit virtual. + auto hasDottedBucket = bucket.find('.') != std::string::npos; + auto useVirtualForEndpoint = [&](bool defaultVirtual) { + auto useVirtual = defaultVirtual ? style != S3AddressingStyle::Path : style == S3AddressingStyle::Virtual; + if (useVirtual && hasDottedBucket) { + if (style == S3AddressingStyle::Virtual) + throw Error( + "bucket name '%s' contains a dot, which is incompatible with " + "virtual-hosted-style addressing (causes TLS certificate errors); " + "use 'addressing-style=path' or 'addressing-style=auto' instead", + bucket); + static std::atomic warnedDottedBucket{false}; + warnOnce( + warnedDottedBucket, + "bucket name '%s' contains a dot; falling back to path-style addressing " + "(virtual-hosted-style requires non-dotted bucket names for TLS certificate validity); " + "set 'addressing-style=path' to silence this warning", + bucket); + return false; + } + return useVirtual; + }; + // Handle endpoint configuration using std::visit return std::visit( overloaded{ [&](const std::monostate &) { - // No custom endpoint, use standard AWS S3 endpoint + // No custom endpoint: use virtual-hosted-style by default (auto), + // path-style when explicitly requested or for dotted bucket names. + auto useVirtual = useVirtualForEndpoint(/* defaultVirtual = */ true); std::vector path{""}; - path.push_back(bucket); + if (!useVirtual) + path.push_back(bucket); path.insert(path.end(), key.begin(), key.end()); return ParsedURL{ .scheme = std::string{schemeStr}, - .authority = ParsedURL::Authority{.host = "s3." + regionStr + ".amazonaws.com"}, + .authority = + ParsedURL::Authority{ + .host = useVirtual ? bucket + ".s3." + regionStr + ".amazonaws.com" + : "s3." + regionStr + ".amazonaws.com"}, .path = std::move(path), .query = std::move(queryParams), }; }, [&](const ParsedURL::Authority & auth) { - // Endpoint is just an authority (hostname/port) + // Custom endpoint authority: use path-style by default (auto), + // virtual-hosted-style only when explicitly requested (not for dotted buckets). + auto useVirtual = useVirtualForEndpoint(/* defaultVirtual = */ false); + if (useVirtual && auth.host.empty()) + throw Error( + "cannot use virtual-hosted-style addressing with endpoint '%s' " + "because it has no hostname; use 'addressing-style=path' instead", + auth.to_string()); std::vector path{""}; - path.push_back(bucket); + if (!useVirtual) + path.push_back(bucket); path.insert(path.end(), key.begin(), key.end()); return ParsedURL{ .scheme = std::string{schemeStr}, - .authority = auth, + .authority = + useVirtual ? ParsedURL::Authority{.host = bucket + "." + auth.host, .port = auth.port} : auth, .path = std::move(path), .query = std::move(queryParams), }; }, [&](const ParsedURL & endpointUrl) { - // Endpoint is already a ParsedURL (e.g., http://server:9000) + // Full endpoint URL: use path-style by default (auto), + // virtual-hosted-style only when explicitly requested (not for dotted buckets). + auto useVirtual = useVirtualForEndpoint(/* defaultVirtual = */ false); + if (useVirtual && (!endpointUrl.authority || endpointUrl.authority->host.empty())) + throw Error( + "cannot use virtual-hosted-style addressing with endpoint '%s' " + "because it has no authority (hostname)", + endpointUrl.to_string()); auto path = endpointUrl.path; - path.push_back(bucket); + if (!useVirtual) + path.push_back(bucket); path.insert(path.end(), key.begin(), key.end()); return ParsedURL{ .scheme = endpointUrl.scheme, - .authority = endpointUrl.authority, + .authority = useVirtual ? std::optional{ParsedURL::Authority{ + .host = bucket + "." + endpointUrl.authority->host, + .port = endpointUrl.authority->port}} + : endpointUrl.authority, .path = std::move(path), .query = std::move(queryParams), }; @@ -123,4 +213,42 @@ ParsedURL ParsedS3URL::toHttpsUrl() const endpoint); } +void to_json(nlohmann::json & j, const S3AddressingStyle & e) +{ + j = std::string{showS3AddressingStyle(e)}; +} + +void from_json(const nlohmann::json & j, S3AddressingStyle & e) +{ + e = parseS3AddressingStyle(j.get()); +} + +template<> +struct json_avoids_null : std::true_type +{}; + +template<> +S3AddressingStyle BaseSetting::parse(const std::string & str) const +{ + try { + return parseS3AddressingStyle(str); + } catch (InvalidS3AddressingStyle &) { + throw UsageError("option '%s' has invalid value '%s', expected 'auto', 'path', or 'virtual'", name, str); + } +} + +template<> +std::string BaseSetting::to_string() const +{ + return std::string{showS3AddressingStyle(value)}; +} + +template<> +struct BaseSetting::trait +{ + static constexpr bool appendable = false; +}; + +template class BaseSetting; + } // namespace nix diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc index dc70b4ba8dee..1d69ca640f2a 100644 --- a/src/libstore/ssh-store.cc +++ b/src/libstore/ssh-store.cc @@ -54,6 +54,11 @@ struct alignas(8) /* Work around ASAN failures on i686-linux. */ { } + bool includeInProvenance() override + { + return true; + } + // FIXME extend daemon protocol, move implementation to RemoteStore std::optional getBuildLogExact(const StorePath & path) override { diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index bfec87ab7b36..2e3d7e528f5f 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -18,14 +18,13 @@ // `addMultipleToStore`. #include "nix/store/worker-protocol.hh" #include "nix/util/signals.hh" +#include "nix/store/provenance.hh" #include #include #include "nix/util/strings.hh" -using json = nlohmann::json; - namespace nix { Path StoreConfigBase::getDefaultNixStoreDir() @@ -109,9 +108,14 @@ StorePath Store::addToStore( std::optional storePath; auto sink = sourceToSink([&](Source & source) { LengthSource lengthSource(source); - storePath = addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair); - if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold) - warn("copied large path '%s' to the store (%s)", path, renderSize(lengthSource.total)); + storePath = + addToStoreFromDump(lengthSource, name, fsm, method, hashAlgo, references, repair, path.getProvenance()); + if (settings.warnLargePathThreshold && lengthSource.total >= settings.warnLargePathThreshold) { + static bool failOnLargePath = getEnv("_NIX_TEST_FAIL_ON_LARGE_PATH").value_or("") == "1"; + if (failOnLargePath) + throw Error("doesn't copy large path '%s' to the store (%d)", path, renderSize(lengthSource.total)); + warn("copied large path '%s' to the store (%d)", path, renderSize(lengthSource.total)); + } }); dumpPath(path, *sink, fsm, filter); sink->finish(); @@ -291,6 +295,7 @@ ValidPathInfo Store::addToStoreSlow( }), narHash); info.narSize = narSize; + info.provenance = srcPath.getProvenance(); if (!isValidPath(info.path)) { auto source = sinkToSource([&](Sink & scratchpadSink) { srcPath.dumpPath(scratchpadSink); }); @@ -529,6 +534,23 @@ ref Store::queryPathInfo(const StorePath & storePath) return promise.get_future().get(); } +std::shared_ptr Store::maybeQueryPathInfo(const StorePath & storePath) +{ + std::promise> promise; + + queryPathInfo(storePath, {[&](std::future> result) { + try { + promise.set_value(result.get()); + } catch (InvalidPath &) { + promise.set_value(nullptr); + } catch (...) { + promise.set_exception(std::current_exception()); + } + }}); + + return promise.get_future().get(); +} + static bool goodStorePath(const StorePath & expected, const StorePath & actual) { return expected.hashPart() == actual.hashPart() @@ -850,13 +872,26 @@ makeCopyPathMessage(const StoreConfig & srcCfg, const StoreConfig & dstCfg, std: "copying path '%s' from '%s' to '%s'", storePath, srcCfg.getHumanReadableURI(), dstCfg.getHumanReadableURI()); } -void copyStorePath( +/** + * Wrap upstream provenance in a "copied" provenance record to record + * where the path was copied from. But uninformative origins like + * LocalStore are omitted. + */ +static std::shared_ptr +addCopiedProvenance(std::shared_ptr provenance, Store & srcStore) +{ + if (!srcStore.includeInProvenance()) + return provenance; + return std::make_shared(srcStore.config.getReference().render(false), provenance); +} + +std::shared_ptr copyStorePath( Store & srcStore, Store & dstStore, const StorePath & storePath, RepairFlag repair, CheckSigsFlag checkSigs) { /* Bail out early (before starting a download from srcStore) if dstStore already has this path. */ if (!repair && dstStore.isValidPath(storePath)) - return; + return nullptr; const auto & srcCfg = srcStore.config; const auto & dstCfg = dstStore.config; @@ -869,25 +904,22 @@ void copyStorePath( {storePathS, srcCfg.getHumanReadableURI(), dstCfg.getHumanReadableURI()}); PushActivity pact(act.id); - auto info = srcStore.queryPathInfo(storePath); + auto srcInfo = srcStore.queryPathInfo(storePath); + auto info = std::make_shared(*srcInfo); uint64_t total = 0; // recompute store path on the chance dstStore does it differently if (info->ca && info->references.empty()) { - auto info2 = make_ref(*info); - info2->path = + info->path = dstStore.makeFixedOutputPathFromCA(info->path.name(), info->contentAddressWithReferences().value()); if (dstStore.storeDir == srcStore.storeDir) - assert(info->path == info2->path); - info = info2; + assert(info->path == srcInfo->path); } - if (info->ultimate) { - auto info2 = make_ref(*info); - info2->ultimate = false; - info = info2; - } + info->ultimate = false; + + info->provenance = addCopiedProvenance(info->provenance, srcStore); auto source = sinkToSource( [&](Sink & sink) { @@ -906,6 +938,8 @@ void copyStorePath( }); dstStore.addToStore(*info, *source, repair, checkSigs); + + return info; } std::map copyPaths( @@ -1014,6 +1048,7 @@ std::map copyPaths( ValidPathInfo infoForDst = *info; infoForDst.path = storePathForDst; + infoForDst.provenance = addCopiedProvenance(info->provenance, srcStore); auto source = sinkToSource([&, narSize = info->narSize](Sink & sink) { // We can reasonably assume that the copy will happen whenever we diff --git a/src/libstore/unix/build/chroot-derivation-builder.cc b/src/libstore/unix/build/chroot-derivation-builder.cc index 354a604f5350..a7bf94cf934f 100644 --- a/src/libstore/unix/build/chroot-derivation-builder.cc +++ b/src/libstore/unix/build/chroot-derivation-builder.cc @@ -153,7 +153,7 @@ struct ChrootDerivationBuilder : virtual DerivationBuilderImpl return Strings({store.printStorePath(drvPath), chrootRootDir}); } - Path realPathInSandbox(const Path & p) override + Path realPathInHost(const Path & p) override { // FIXME: why the needsHashRewrite() conditional? return !needsHashRewrite() ? chrootRootDir + p : store.toRealPath(p); diff --git a/src/libstore/unix/build/derivation-builder.cc b/src/libstore/unix/build/derivation-builder.cc index 360b6a484871..a0790d7523b4 100644 --- a/src/libstore/unix/build/derivation-builder.cc +++ b/src/libstore/unix/build/derivation-builder.cc @@ -1,6 +1,7 @@ #include "nix/store/build/derivation-builder.hh" #include "nix/util/file-system.hh" #include "nix/store/local-store.hh" +#include "nix/store/active-builds.hh" #include "nix/util/processes.hh" #include "nix/store/builtins.hh" #include "nix/store/path-references.hh" @@ -19,6 +20,7 @@ #include "nix/store/globals.hh" #include "nix/store/build/derivation-env-desugar.hh" #include "nix/util/terminal.hh" +#include "nix/store/provenance.hh" #include @@ -83,6 +85,11 @@ class DerivationBuilderImpl : public DerivationBuilder, public DerivationBuilder */ Pid pid; + /** + * Handles to track active builds for `nix ps`. + */ + std::optional activeBuildHandle; + LocalStore & store; std::unique_ptr miscMethods; @@ -238,6 +245,11 @@ class DerivationBuilderImpl : public DerivationBuilder, public DerivationBuilder return acquireUserLock(1, false); } + /** + * Construct the `ActiveBuild` object for `ActiveBuildsTracker`. + */ + virtual ActiveBuild getActiveBuild(); + /** * Return the paths that should be made available in the sandbox. * This includes: @@ -283,7 +295,7 @@ class DerivationBuilderImpl : public DerivationBuilder, public DerivationBuilder return Strings({store.printStorePath(drvPath)}); } - virtual Path realPathInSandbox(const Path & p) + virtual Path realPathInHost(const Path & p) { return store.toRealPath(p); } @@ -493,6 +505,8 @@ bool DerivationBuilderImpl::killChild() killSandbox(true); pid.wait(); + + activeBuildHandle.reset(); } return ret; } @@ -526,6 +540,8 @@ SingleDrvOutputs DerivationBuilderImpl::unprepareBuild() root. */ killSandbox(true); + activeBuildHandle.reset(); + /* Terminate the recursive Nix daemon. */ stopDaemon(); @@ -839,17 +855,43 @@ std::optional DerivationBuilderImpl::startBuild() pid.setSeparatePG(true); + /* Make the build visible to `nix ps`. */ + if (auto tracker = dynamic_cast(&store)) + activeBuildHandle.emplace(tracker->buildStarted(getActiveBuild())); + processSandboxSetupMessages(); return builderOut.get(); } +ActiveBuild DerivationBuilderImpl::getActiveBuild() +{ + return { + .nixPid = getpid(), + .clientPid = std::nullopt, // FIXME + .clientUid = std::nullopt, // FIXME + .mainPid = pid, + .mainUser = UserInfo::fromUid(buildUser ? buildUser->getUID() : getuid()), + .startTime = buildResult.startTime, + .derivation = drvPath, + }; +} + PathsInChroot DerivationBuilderImpl::getPathsInSandbox() { /* Allow a user-configurable set of directories from the host file system. */ PathsInChroot pathsInChroot = defaultPathsInChroot; + for (auto & p : pathsInChroot) + if (!p.second.optional +#if HAVE_EMBEDDED_SANDBOX_SHELL + && p.second.source != SANDBOX_SHELL +#endif + && !maybeLstat(p.second.source)) + throw SysError( + "path '%s' is configured as part of the `sandbox-paths` option, but is inaccessible", p.second.source); + if (hasPrefix(store.storeDir, tmpDirInSandbox())) { throw Error("`sandbox-build-dir` must not contain the storeDir"); } @@ -1001,7 +1043,7 @@ void DerivationBuilderImpl::processSandboxSetupMessages() "while waiting for the build environment for '%s' to initialize (%s, previous messages: %s)", store.printStorePath(drvPath), statusToString(status), - concatStringsSep("|", msgs)); + concatStringsSep("\n", msgs)); throw; } }(); @@ -1415,7 +1457,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() for (auto & [outputName, _] : drv.outputs) { auto scratchOutput = get(scratchOutputs, outputName); assert(scratchOutput); - auto actualPath = realPathInSandbox(store.printStorePath(*scratchOutput)); + auto actualPath = realPathInHost(store.printStorePath(*scratchOutput)); outputsToSort.insert(outputName); @@ -1535,7 +1577,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() auto output = get(drv.outputs, outputName); auto scratchPath = get(scratchOutputs, outputName); assert(output && scratchPath); - auto actualPath = realPathInSandbox(store.printStorePath(*scratchPath)); + auto actualPath = realPathInHost(store.printStorePath(*scratchPath)); auto finish = [&](StorePath finalStorePath) { /* Store the final path */ @@ -1829,6 +1871,9 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() newInfo.deriver = drvPath; newInfo.ultimate = true; + if (experimentalFeatureSettings.isEnabled(Xp::Provenance)) + newInfo.provenance = std::make_shared( + drvPath, outputName, settings.getHostName(), drv.platform, drvProvenance); store.signPathInfo(newInfo); finish(newInfo.path); @@ -1839,8 +1884,8 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() This is also good so that if a fixed-output produces the wrong path, we still store the result (just don't consider - the derivation sucessful, so if someone fixes the problem by - just changing the wanted hash, the redownload (or whateer + the derivation successful, so if someone fixes the problem by + just changing the wanted hash, the redownload (or whatever possibly quite slow thing it was) doesn't have to be done again. */ if (newInfo.ca) @@ -1855,7 +1900,7 @@ SingleDrvOutputs DerivationBuilderImpl::registerOutputs() /* Apply output checks. This includes checking of the wanted vs got hash of fixed-outputs. */ - checkOutputs(store, drvPath, drv.outputs, drvOptions.outputChecks, infos); + checkOutputs(store, drvPath, drv.outputs, drvOptions.outputChecks, infos, *act); if (buildMode == bmCheck) { return {}; @@ -1963,6 +2008,10 @@ StorePath DerivationBuilderImpl::makeFallbackPath(const StorePath & path) #include "darwin-derivation-builder.cc" #include "external-derivation-builder.cc" +#if NIX_USE_WASMTIME +# include "wasi-derivation-builder.cc" +#endif + namespace nix { void DerivationBuilderDeleter::operator()(DerivationBuilder * builder) noexcept @@ -2006,6 +2055,11 @@ std::unique_ptr makeDerivationBuild useSandbox = params.drv.type().isSandboxed() && !params.drvOptions.noChroot; } +#if NIX_USE_WASMTIME + if (params.drv.platform == "wasm32-wasip1") + return DerivationBuilderUnique(new WasiDerivationBuilder(store, std::move(miscMethods), std::move(params))); +#endif + if (store.storeDir != store.config->realStoreDir.get()) { #ifdef __linux__ useSandbox = true; diff --git a/src/libstore/unix/build/linux-derivation-builder.cc b/src/libstore/unix/build/linux-derivation-builder.cc index d15e6e1ae7a3..fc2140817d7a 100644 --- a/src/libstore/unix/build/linux-derivation-builder.cc +++ b/src/libstore/unix/build/linux-derivation-builder.cc @@ -711,6 +711,9 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu void addDependencyImpl(const StorePath & path) override { + if (isAllowed(path)) + return; + auto [source, target] = ChrootDerivationBuilder::addDependencyPrep(path); /* Bind-mount the path into the sandbox. This requires @@ -733,6 +736,13 @@ struct ChrootLinuxDerivationBuilder : ChrootDerivationBuilder, LinuxDerivationBu if (status != 0) throw Error("could not add path '%s' to sandbox", store.printStorePath(path)); } + + ActiveBuild getActiveBuild() override + { + auto build = DerivationBuilderImpl::getActiveBuild(); + build.cgroup = cgroup; + return build; + } }; } // namespace nix diff --git a/src/libstore/unix/build/wasi-derivation-builder.cc b/src/libstore/unix/build/wasi-derivation-builder.cc new file mode 100644 index 000000000000..126e95944622 --- /dev/null +++ b/src/libstore/unix/build/wasi-derivation-builder.cc @@ -0,0 +1,80 @@ +#include + +namespace nix { + +// FIXME: cut&paste +template +T unwrap(wasmtime::Result && res) +{ + if (res) + return res.ok(); + throw Error(res.err().message()); +} + +// FIXME: cut&paste +static std::span string2span(std::string_view s) +{ + return std::span((uint8_t *) s.data(), s.size()); +} + +struct WasiDerivationBuilder : DerivationBuilderImpl +{ + WasiDerivationBuilder( + LocalStore & store, std::unique_ptr miscMethods, DerivationBuilderParams params) + : DerivationBuilderImpl(store, std::move(miscMethods), std::move(params)) + { + experimentalFeatureSettings.require(Xp::WasmDerivations); + } + + void execBuilder(const Strings & args, const Strings & envStrs) override + { + using namespace wasmtime; + + Engine engine; + Linker linker(engine); + unwrap(linker.define_wasi()); + + WasiConfig wasiConfig; + wasiConfig.inherit_stdin(); + wasiConfig.inherit_stdout(); + wasiConfig.inherit_stderr(); + wasiConfig.argv(std::vector(args.begin(), args.end())); + { + std::vector> env2; + for (auto & [k, v] : env) + env2.emplace_back(k, rewriteStrings(v, inputRewrites)); + wasiConfig.env(env2); + } + if (!wasiConfig.preopen_dir( + store.config->realStoreDir.get(), + store.storeDir, + WASMTIME_WASI_DIR_PERMS_READ | WASMTIME_WASI_DIR_PERMS_WRITE, + WASMTIME_WASI_FILE_PERMS_READ | WASMTIME_WASI_FILE_PERMS_WRITE)) + throw Error("cannot add store directory to WASI config"); + if (!wasiConfig.preopen_dir( + tmpDir, + tmpDirInSandbox(), + WASMTIME_WASI_DIR_PERMS_READ | WASMTIME_WASI_DIR_PERMS_WRITE, + WASMTIME_WASI_FILE_PERMS_READ | WASMTIME_WASI_FILE_PERMS_WRITE)) + throw Error("cannot add temporary directory to WASI config"); + + auto module = unwrap(Module::compile(engine, string2span(readFile(realPathInHost(drv.builder))))); + wasmtime::Store wasmStore(engine); + unwrap(wasmStore.context().set_wasi(std::move(wasiConfig))); + auto instance = unwrap(linker.instantiate(wasmStore, module)); + + auto startName = "_start"; + auto ext = instance.get(wasmStore, startName); + if (!ext) + throw Error("Wasm module '%s' does not export function '%s'", drv.builder, startName); + auto fun = std::get_if(&*ext); + if (!fun) + throw Error("export '%s' of Wasm module '%s' is not a function", startName, drv.builder); + + unwrap(fun->call(wasmStore.context(), {})); + + _exit(0); + } +}; + +} // namespace nix diff --git a/src/libstore/worker-protocol-connection.cc b/src/libstore/worker-protocol-connection.cc index 8a37662904d5..7f41b0c47e7e 100644 --- a/src/libstore/worker-protocol-connection.cc +++ b/src/libstore/worker-protocol-connection.cc @@ -5,7 +5,8 @@ namespace nix { -const WorkerProto::FeatureSet WorkerProto::allFeatures{}; +const WorkerProto::FeatureSet WorkerProto::allFeatures{ + {std::string(WorkerProto::featureQueryActiveBuilds), std::string(WorkerProto::featureProvenance)}}; WorkerProto::BasicClientConnection::~BasicClientConnection() { diff --git a/src/libstore/worker-protocol.cc b/src/libstore/worker-protocol.cc index 2788222c0d73..6dc1f837102e 100644 --- a/src/libstore/worker-protocol.cc +++ b/src/libstore/worker-protocol.cc @@ -7,6 +7,7 @@ #include "nix/store/worker-protocol-impl.hh" #include "nix/util/archive.hh" #include "nix/store/path-info.hh" +#include "nix/util/provenance.hh" #include #include @@ -303,6 +304,8 @@ UnkeyedValidPathInfo WorkerProto::Serialise::read(const St info.sigs = readStrings(conn.from); info.ca = ContentAddress::parseOpt(readString(conn.from)); } + if (conn.provenance) + info.provenance = Provenance::from_json_str_optional(readString(conn.from)); return info; } @@ -316,6 +319,8 @@ void WorkerProto::Serialise::write( if (GET_PROTOCOL_MINOR(conn.version) >= 16) { conn.to << pathInfo.ultimate << pathInfo.sigs << renderContentAddress(pathInfo.ca); } + if (conn.provenance) + conn.to << (pathInfo.provenance ? pathInfo.provenance->to_json_str() : ""); } WorkerProto::ClientHandshakeInfo diff --git a/src/libutil-c/meson.build b/src/libutil-c/meson.build index 1806dbb6f9a0..93817efd726b 100644 --- a/src/libutil-c/meson.build +++ b/src/libutil-c/meson.build @@ -57,7 +57,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libutil-c/nix_api_util.cc b/src/libutil-c/nix_api_util.cc index f28a9168e30b..23eafa8e8ff3 100644 --- a/src/libutil-c/nix_api_util.cc +++ b/src/libutil-c/nix_api_util.cc @@ -2,6 +2,7 @@ #include "nix/util/config-global.hh" #include "nix/util/error.hh" #include "nix_api_util_internal.h" +#include "nix/util/signals.hh" #include "nix/util/util.hh" #include @@ -111,6 +112,9 @@ nix_err nix_libutil_init(nix_c_context * context) context->last_err_code = NIX_OK; try { nix::initLibUtil(); +#ifndef _WIN32 + nix::unix::startSignalHandlerThread(); +#endif return NIX_OK; } NIXC_CATCH_ERRS diff --git a/src/libutil-c/package.nix b/src/libutil-c/package.nix index f26f57775d4f..a1605bf5bb85 100644 --- a/src/libutil-c/package.nix +++ b/src/libutil-c/package.nix @@ -14,7 +14,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util-c"; + pname = "determinate-nix-util-c"; inherit version; workDir = ./.; diff --git a/src/libutil-test-support/meson.build b/src/libutil-test-support/meson.build index 64231107eb6b..f9254b616725 100644 --- a/src/libutil-test-support/meson.build +++ b/src/libutil-test-support/meson.build @@ -47,7 +47,7 @@ this_library = library( # TODO: Remove `-lrapidcheck` when https://github.com/emil-e/rapidcheck/pull/326 # is available. See also ../libutil/build.meson link_args : linker_export_flags + [ '-lrapidcheck' ], - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, ) diff --git a/src/libutil-test-support/package.nix b/src/libutil-test-support/package.nix index f8e92c271137..40ff65d61357 100644 --- a/src/libutil-test-support/package.nix +++ b/src/libutil-test-support/package.nix @@ -17,7 +17,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util-test-support"; + pname = "determinate-nix-util-test-support"; inherit version; workDir = ./.; diff --git a/src/libutil-tests/alignment.cc b/src/libutil-tests/alignment.cc index bef0c435dc0f..d68bcde4f475 100644 --- a/src/libutil-tests/alignment.cc +++ b/src/libutil-tests/alignment.cc @@ -15,4 +15,32 @@ TEST(alignUp, notAPowerOf2) ASSERT_DEATH({ alignUp(1u, 42); }, "alignment must be a power of 2"); } +template +class alignUpOverflowTest : public ::testing::Test +{}; + +using UnsignedTypes = ::testing::Types; +TYPED_TEST_SUITE(alignUpOverflowTest, UnsignedTypes); + +TYPED_TEST(alignUpOverflowTest, lastSafeValue) +{ + constexpr auto max = std::numeric_limits::max(); + ASSERT_EQ(alignUp(max - 15, 16), (max - 15) & ~TypeParam{15}); + ASSERT_NO_THROW(alignUp(max - 15, 16)); +} + +TYPED_TEST(alignUpOverflowTest, overflowThrows) +{ + constexpr auto max = std::numeric_limits::max(); + ASSERT_THROW(alignUp(max - 14, 16), Error); + ASSERT_THROW(alignUp(max, 16), Error); + ASSERT_THROW(alignUp(max, 2), Error); +} + +TYPED_TEST(alignUpOverflowTest, alignmentOneNeverOverflows) +{ + constexpr auto max = std::numeric_limits::max(); + ASSERT_EQ(alignUp(max, 1), max); +} + } // namespace nix diff --git a/src/libutil-tests/config.cc b/src/libutil-tests/config.cc index 5fb2229b6b9d..87c1e556b736 100644 --- a/src/libutil-tests/config.cc +++ b/src/libutil-tests/config.cc @@ -218,7 +218,7 @@ TEST(Config, toJSONOnNonEmptyConfigWithExperimentalSetting) "description", {}, true, - Xp::Flakes, + Xp::CaDerivations, }; setting.assign("value"); @@ -231,7 +231,7 @@ TEST(Config, toJSONOnNonEmptyConfigWithExperimentalSetting) "description": "description\n", "documentDefault": true, "value": "value", - "experimentalFeature": "flakes" + "experimentalFeature": "ca-derivations" } })#"_json); } diff --git a/src/libutil/args.cc b/src/libutil/args.cc index c6d450a0bc65..bd3dc9c95dfa 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -513,7 +513,7 @@ void Args::checkArgs() { for (auto & [name, flag] : longFlags) { if (flag->required && flag->timesUsed == 0) - throw UsageError("required argument '--%s' is missing", name); + throw UsageError("required argument '%s' is missing", "--" + name); } } @@ -607,7 +607,7 @@ Strings argvToStrings(int argc, char ** argv) std::optional Command::experimentalFeature() { - return {Xp::NixCommand}; + return {}; } MultiCommand::MultiCommand(std::string_view commandName, const Commands & commands_) diff --git a/src/libutil/configuration.cc b/src/libutil/configuration.cc index 832099dab991..407320a6b51b 100644 --- a/src/libutil/configuration.cc +++ b/src/libutil/configuration.cc @@ -398,11 +398,11 @@ std::set BaseSetting>::parse( { std::set res; for (auto & s : tokenizeString(str)) { - if (auto thisXpFeature = parseExperimentalFeature(s); thisXpFeature) { + if (auto thisXpFeature = parseExperimentalFeature(s)) res.insert(thisXpFeature.value()); - if (thisXpFeature.value() == Xp::Flakes) - res.insert(Xp::FetchTree); - } else + else if (stabilizedFeatures.count(s)) + debug("experimental feature '%s' is now stable", s); + else warn("unknown experimental feature '%s'", s); } return res; diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 69ba62b5619d..ad692ff3d243 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -17,7 +17,7 @@ struct ExperimentalFeatureDetails /** * If two different PRs both add an experimental feature, and we just - * used a number for this, we *woudln't* get merge conflict and the + * used a number for this, we *wouldn't* get merge conflict and the * counter will be incremented once instead of twice, causing a build * failure. * @@ -25,7 +25,7 @@ struct ExperimentalFeatureDetails * feature, we either have no issue at all if few features are not added * at the end of the list, or a proper merge conflict if they are. */ -constexpr size_t numXpFeatures = 1 + static_cast(Xp::BLAKE3Hashes); +constexpr size_t numXpFeatures = 1 + static_cast(Xp::Provenance); constexpr std::array xpFeatureDetails = {{ { @@ -71,38 +71,21 @@ constexpr std::array xpFeatureDetails )", .trackingUrl = "https://github.com/NixOS/nix/milestone/42", }, - { - .tag = Xp::Flakes, - .name = "flakes", - .description = R"( - Enable flakes. See the manual entry for [`nix - flake`](@docroot@/command-ref/new-cli/nix3-flake.md) for details. - )", - .trackingUrl = "https://github.com/NixOS/nix/milestone/27", - }, { .tag = Xp::FetchTree, .name = "fetch-tree", .description = R"( + *Enabled for Determinate Nix Installer users since 2.24* + Enable the use of the [`fetchTree`](@docroot@/language/builtins.md#builtins-fetchTree) built-in function in the Nix language. `fetchTree` exposes a generic interface for fetching remote file system trees from different types of remote sources. - The [`flakes`](#xp-feature-flakes) feature flag always enables `fetch-tree`. This built-in was previously guarded by the `flakes` experimental feature because of that overlap. Enabling just this feature serves as a "release candidate", allowing users to try it out in isolation. )", .trackingUrl = "https://github.com/NixOS/nix/milestone/31", }, - { - .tag = Xp::NixCommand, - .name = "nix-command", - .description = R"( - Enable the new `nix` subcommands. See the manual on - [`nix`](@docroot@/command-ref/new-cli/nix.md) for details. - )", - .trackingUrl = "https://github.com/NixOS/nix/milestone/28", - }, { .tag = Xp::GitHashing, .name = "git-hashing", @@ -143,14 +126,14 @@ constexpr std::array xpFeatureDetails arbitrary substitutions. For example, running ``` - nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10 + nix-store -r /nix/store/lrs9qfm60jcgsk83qhyypj3m4jqsgdid-hello-2.10 ``` in the above `runCommand` script would be disallowed, as this could lead to derivations with hidden dependencies or breaking reproducibility by relying on the current state of the Nix store. An exception would be if - `/nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10` were + `/nix/store/lrs9qfm60jcgsk83qhyypj3m4jqsgdid-hello-2.10` were already in the build inputs or built by a previous recursive Nix call. )", @@ -171,7 +154,7 @@ constexpr std::array xpFeatureDetails "http://foo" ``` - But enabling this experimental feature will cause the Nix parser to + But enabling this experimental feature causes the Nix parser to throw an error when encountering a URL literal: ``` @@ -321,6 +304,49 @@ constexpr std::array xpFeatureDetails )", .trackingUrl = "", }, + { + .tag = Xp::BuildTimeFetchTree, + .name = "build-time-fetch-tree", + .description = R"( + Enable the built-in derivation `builtin:fetch-tree`, as well as the flake input attribute `buildTime`. + )", + .trackingUrl = "", + }, + { + .tag = Xp::ParallelEval, + .name = "parallel-eval", + .description = R"( + Enable built-in functions for parallel evaluation. + )", + .trackingUrl = "", + }, + { + .tag = Xp::WasmBuiltin, + .name = "wasm-builtin", + .description = R"( + Enable the use of the [`builtins.wasm`](@docroot@/language/builtins.md) built-in function in the Nix language. + `builtins.wasm` allows calling WebAssembly functions from Nix expressions. + )", + .trackingUrl = "", + }, + { + .tag = Xp::WasmDerivations, + .name = "wasm-derivations", + .description = R"( + Allow derivations to target the WebAssembly system type (`wasm32-wasip1`). + When enabled, derivations with `system = "wasm32-wasip1"` can be built locally + using a WASI runtime environment. + )", + .trackingUrl = "", + }, + { + .tag = Xp::Provenance, + .name = "provenance", + .description = R"( + Enable keeping track of the provenance of store paths. + )", + .trackingUrl = "", + }, }}; static_assert( @@ -332,6 +358,12 @@ static_assert( }(), "array order does not match enum tag order"); +/** + * A set of previously experimental features that are now considered + * stable. We don't warn if users have these in `experimental-features`. + */ +std::set stabilizedFeatures{"flakes", "nix-command"}; + const std::optional parseExperimentalFeature(const std::string_view & name) { using ReverseXpMap = std::map; diff --git a/src/libutil/include/nix/util/alignment.hh b/src/libutil/include/nix/util/alignment.hh index a4e5af4d6c09..4bf48f11576a 100644 --- a/src/libutil/include/nix/util/alignment.hh +++ b/src/libutil/include/nix/util/alignment.hh @@ -1,9 +1,11 @@ #pragma once ///@file +#include "nix/util/error.hh" + #include +#include #include -#include #include namespace nix { @@ -16,7 +18,10 @@ template constexpr T alignUp(T val, unsigned alignment) { assert(std::has_single_bit(alignment) && "alignment must be a power of 2"); - T mask = ~(T{alignment} - 1u); + assert(alignment <= std::numeric_limits::max()); + T mask = ~(static_cast(alignment) - 1u); + if (val > std::numeric_limits::max() - (alignment - 1)) /* Overflow check. */ + throw Error("can't align %d to %d: value is too large", val, alignment); return (val + alignment - 1) & mask; } diff --git a/src/libutil/include/nix/util/configuration.hh b/src/libutil/include/nix/util/configuration.hh index 541febdb5f95..6b9f2d6f5d02 100644 --- a/src/libutil/include/nix/util/configuration.hh +++ b/src/libutil/include/nix/util/configuration.hh @@ -444,7 +444,7 @@ struct ExperimentalFeatureSettings : Config Example: ``` - experimental-features = nix-command flakes + experimental-features = ca-derivations ``` The following experimental features are available: diff --git a/src/libutil/include/nix/util/experimental-features.hh b/src/libutil/include/nix/util/experimental-features.hh index aca14bfbb416..f8955ec8ca9f 100644 --- a/src/libutil/include/nix/util/experimental-features.hh +++ b/src/libutil/include/nix/util/experimental-features.hh @@ -19,9 +19,7 @@ namespace nix { enum struct ExperimentalFeature { CaDerivations, ImpureDerivations, - Flakes, FetchTree, - NixCommand, GitHashing, RecursiveNix, NoUrlLiterals, @@ -39,8 +37,15 @@ enum struct ExperimentalFeature { PipeOperators, ExternalBuilders, BLAKE3Hashes, + BuildTimeFetchTree, + ParallelEval, + WasmBuiltin, + WasmDerivations, + Provenance, }; +extern std::set stabilizedFeatures; + /** * Just because writing `ExperimentalFeature::CaDerivations` is way too long */ diff --git a/src/libutil/include/nix/util/forwarding-source-accessor.hh b/src/libutil/include/nix/util/forwarding-source-accessor.hh new file mode 100644 index 000000000000..02474a3a7f31 --- /dev/null +++ b/src/libutil/include/nix/util/forwarding-source-accessor.hh @@ -0,0 +1,57 @@ +#pragma once + +#include "source-accessor.hh" + +namespace nix { + +/** + * A source accessor that just forwards every operation to another + * accessor. This is not useful in itself but can be used as a + * superclass for accessors that do change some operations. + */ +struct ForwardingSourceAccessor : SourceAccessor +{ + ref next; + + ForwardingSourceAccessor(ref next) + : next(next) + { + } + + std::string readFile(const CanonPath & path) override + { + return next->readFile(path); + } + + void readFile(const CanonPath & path, Sink & sink, std::function sizeCallback) override + { + next->readFile(path, sink, sizeCallback); + } + + std::optional maybeLstat(const CanonPath & path) override + { + return next->maybeLstat(path); + } + + DirEntries readDirectory(const CanonPath & path) override + { + return next->readDirectory(path); + } + + std::string readLink(const CanonPath & path) override + { + return next->readLink(path); + } + + std::string showPath(const CanonPath & path) override + { + return next->showPath(path); + } + + std::optional getPhysicalPath(const CanonPath & path) override + { + return next->getPhysicalPath(path); + } +}; + +} // namespace nix diff --git a/src/libutil/include/nix/util/logging.hh b/src/libutil/include/nix/util/logging.hh index 4673895aad68..de2c3f683df6 100644 --- a/src/libutil/include/nix/util/logging.hh +++ b/src/libutil/include/nix/util/logging.hh @@ -39,6 +39,8 @@ typedef enum { resSetExpected = 106, resPostBuildLogLine = 107, resFetchStatus = 108, + resHashMismatch = 109, + resBuildResult = 110, } ResultType; typedef uint64_t ActivityId; @@ -59,7 +61,7 @@ struct LoggerSettings : Config {}, "json-log-path", R"( - A file or unix socket to which JSON records of Nix's log output are + A file or Unix domain socket to which JSON records of Nix's log output are written, in the same format as `--log-format internal-json` (without the `@nix ` prefixes on each line). Concurrent writes to the same file by multiple Nix processes are not supported and @@ -158,6 +160,8 @@ public: virtual void result(ActivityId act, ResultType type, const Fields & fields) {}; + virtual void result(ActivityId act, ResultType type, const nlohmann::json & json) {}; + virtual void writeToStdout(std::string_view s); template @@ -222,6 +226,11 @@ struct Activity result(resSetExpected, type2, expected); } + void result(ResultType type, const nlohmann::json & json) const + { + logger.result(id, type, json); + } + template void result(ResultType type, const Args &... args) const { diff --git a/src/libutil/include/nix/util/meson.build b/src/libutil/include/nix/util/meson.build index 894de0c44c2b..8e0336bbdaab 100644 --- a/src/libutil/include/nix/util/meson.build +++ b/src/libutil/include/nix/util/meson.build @@ -38,6 +38,7 @@ headers = files( 'file-system.hh', 'finally.hh', 'fmt.hh', + 'forwarding-source-accessor.hh', 'fs-sink.hh', 'git.hh', 'hash.hh', @@ -52,12 +53,14 @@ headers = files( 'muxable-pipe.hh', 'nar-accessor.hh', 'os-string.hh', + 'override-provenance-source-accessor.hh', 'pool.hh', 'pos-idx.hh', 'pos-table.hh', 'position.hh', 'posix-source-accessor.hh', 'processes.hh', + 'provenance.hh', 'ref.hh', 'regex-combinators.hh', 'repair-flag.hh', diff --git a/src/libutil/include/nix/util/override-provenance-source-accessor.hh b/src/libutil/include/nix/util/override-provenance-source-accessor.hh new file mode 100644 index 000000000000..5ed937db02ad --- /dev/null +++ b/src/libutil/include/nix/util/override-provenance-source-accessor.hh @@ -0,0 +1,21 @@ +#pragma once + +#include "nix/util/forwarding-source-accessor.hh" + +namespace nix { + +struct OverrideProvenanceSourceAccessor : ForwardingSourceAccessor +{ + OverrideProvenanceSourceAccessor(ref next, std::shared_ptr provenance) + : ForwardingSourceAccessor(std::move(next)) + { + this->provenance = std::move(provenance); + } + + std::shared_ptr getProvenance(const CanonPath & path) override + { + return provenance; + } +}; + +} // namespace nix diff --git a/src/libutil/include/nix/util/pool.hh b/src/libutil/include/nix/util/pool.hh index a9091c2dee2f..952c29ad5de3 100644 --- a/src/libutil/include/nix/util/pool.hh +++ b/src/libutil/include/nix/util/pool.hh @@ -211,6 +211,12 @@ public: left.push_back(p); std::swap(state_->idle, left); } + + std::vector> clear() + { + auto state_(state.lock()); + return std::exchange(state_->idle, {}); + } }; } // namespace nix diff --git a/src/libutil/include/nix/util/pos-idx.hh b/src/libutil/include/nix/util/pos-idx.hh index 8e668176c619..7b7d16ca3a4d 100644 --- a/src/libutil/include/nix/util/pos-idx.hh +++ b/src/libutil/include/nix/util/pos-idx.hh @@ -15,12 +15,12 @@ class PosIdx private: uint32_t id; +public: explicit PosIdx(uint32_t id) : id(id) { } -public: PosIdx() : id(0) { @@ -45,6 +45,11 @@ public: { return std::hash{}(id); } + + uint32_t get() const + { + return id; + } }; inline PosIdx noPos = {}; diff --git a/src/libutil/include/nix/util/pos-table.hh b/src/libutil/include/nix/util/pos-table.hh index c5f93a3d5979..954138afbc8e 100644 --- a/src/libutil/include/nix/util/pos-table.hh +++ b/src/libutil/include/nix/util/pos-table.hh @@ -49,20 +49,29 @@ private: */ using LinesCache = LRUCache; - std::map origins; - mutable Sync linesCache; + // FIXME: this could be made lock-free (at least for access) if we + // have a data structure where pointers to existing positions are + // never invalidated. + struct State + { + std::map origins; + }; + + SharedSync state_; + const Origin * resolve(PosIdx p) const { if (p.id == 0) return nullptr; + auto state(state_.readLock()); const auto idx = p.id - 1; - /* we want the last key <= idx, so we'll take prev(first key > idx). - this is guaranteed to never rewind origin.begin because the first - key is always 0. */ - const auto pastOrigin = origins.upper_bound(idx); + /* We want the last key <= idx, so we'll take prev(first key > + idx). This is guaranteed to never rewind origin.begin + because the first key is always 0. */ + const auto pastOrigin = state->origins.upper_bound(idx); return &std::prev(pastOrigin)->second; } @@ -74,15 +83,16 @@ public: Origin addOrigin(Pos::Origin origin, size_t size) { + auto state(state_.lock()); uint32_t offset = 0; - if (auto it = origins.rbegin(); it != origins.rend()) + if (auto it = state->origins.rbegin(); it != state->origins.rend()) offset = it->first + it->second.size; // +1 because all PosIdx are offset by 1 to begin with, and // another +1 to ensure that all origins can point to EOF, eg // on (invalid) empty inputs. if (2 + offset + size < offset) return Origin{origin, offset, 0}; - return origins.emplace(offset, Origin{origin, offset, size}).first->second; + return state->origins.emplace(offset, Origin{origin, offset, size}).first->second; } PosIdx add(const Origin & origin, size_t offset) @@ -119,7 +129,7 @@ public: { auto lines = linesCache.lock(); lines->clear(); - origins.clear(); + state_.lock()->origins.clear(); } }; diff --git a/src/libutil/include/nix/util/posix-source-accessor.hh b/src/libutil/include/nix/util/posix-source-accessor.hh index 29561a3daafe..006ba0e7e4d1 100644 --- a/src/libutil/include/nix/util/posix-source-accessor.hh +++ b/src/libutil/include/nix/util/posix-source-accessor.hh @@ -78,6 +78,8 @@ public: return trackLastModified ? std::optional{mtime} : std::nullopt; } + void invalidateCache(const CanonPath & path) override; + private: /** diff --git a/src/libutil/include/nix/util/provenance.hh b/src/libutil/include/nix/util/provenance.hh new file mode 100644 index 000000000000..da8005b31817 --- /dev/null +++ b/src/libutil/include/nix/util/provenance.hh @@ -0,0 +1,59 @@ +#pragma once + +#include "nix/util/ref.hh" +#include "nix/util/canon-path.hh" + +#include + +#include + +namespace nix { + +struct Provenance +{ + virtual ~Provenance() = default; + + static ref from_json_str(std::string_view); + + static std::shared_ptr from_json_str_optional(std::string_view); + + static ref from_json(const nlohmann::json & json); + + std::string to_json_str() const; + + virtual nlohmann::json to_json() const = 0; + +protected: + + using ProvenanceFactory = std::function(nlohmann::json)>; + + using RegisteredTypes = std::map; + + static RegisteredTypes & registeredTypes(); + +public: + + struct Register + { + Register(const std::string & type, ProvenanceFactory && factory) + { + registeredTypes().insert_or_assign(type, std::move(factory)); + } + }; +}; + +struct SubpathProvenance : public Provenance +{ + std::shared_ptr next; + CanonPath subpath; + + SubpathProvenance(std::shared_ptr next, const CanonPath & subpath) + : next(std::move(next)) + , subpath(subpath) + { + } + + nlohmann::json to_json() const override; +}; + +} // namespace nix diff --git a/src/libutil/include/nix/util/source-accessor.hh b/src/libutil/include/nix/util/source-accessor.hh index 1006895b33c0..b1948438f2e1 100644 --- a/src/libutil/include/nix/util/source-accessor.hh +++ b/src/libutil/include/nix/util/source-accessor.hh @@ -9,6 +9,7 @@ namespace nix { struct Sink; +struct Provenance; /** * Note there is a decent chance this type soon goes away because the problem is solved another way. @@ -209,6 +210,18 @@ struct SourceAccessor : std::enable_shared_from_this { return std::nullopt; } + + std::shared_ptr provenance; + + /** + * Return the provenance of the specified path, or `nullptr` if not available. + */ + virtual std::shared_ptr getProvenance(const CanonPath & path); + + /** + * Invalidate any cached value the accessor may have for the specified path. + */ + virtual void invalidateCache(const CanonPath & path) {} }; /** diff --git a/src/libutil/include/nix/util/source-path.hh b/src/libutil/include/nix/util/source-path.hh index 08f9fe580b03..2810d8c56d51 100644 --- a/src/libutil/include/nix/util/source-path.hh +++ b/src/libutil/include/nix/util/source-path.hh @@ -114,6 +114,16 @@ struct SourcePath return {accessor, accessor->resolveSymlinks(path, mode)}; } + std::shared_ptr getProvenance() const + { + return accessor->getProvenance(path); + } + + void invalidateCache() const + { + accessor->invalidateCache(path); + } + friend class std::hash; }; diff --git a/src/libutil/include/nix/util/table.hh b/src/libutil/include/nix/util/table.hh index 13e4506d5a30..0af33b66cc3b 100644 --- a/src/libutil/include/nix/util/table.hh +++ b/src/libutil/include/nix/util/table.hh @@ -2,10 +2,26 @@ #include "nix/util/types.hh" +#include + namespace nix { -typedef std::vector> Table; +struct TableCell +{ + std::string content; + + enum Alignment { Left, Right } alignment = Left; + + TableCell(std::string content, Alignment alignment = Left) + : content(std::move(content)) + , alignment(alignment) + { + } +}; + +using TableRow = std::vector; +using Table = std::vector; -void printTable(std::ostream & out, Table & table); +void printTable(std::ostream & out, Table & table, unsigned int width = std::numeric_limits::max()); } // namespace nix diff --git a/src/libutil/include/nix/util/terminal.hh b/src/libutil/include/nix/util/terminal.hh index 5e35cbb95408..c70006bc51e3 100644 --- a/src/libutil/include/nix/util/terminal.hh +++ b/src/libutil/include/nix/util/terminal.hh @@ -44,6 +44,11 @@ void updateWindowSize(); */ std::pair getWindowSize(); +/** + * @return The number of columns of the terminal, or std::numeric_limits::max() if unknown. + */ +unsigned int getWindowWidth(); + /** * Get the slave name of a pseudoterminal in a thread-safe manner. * diff --git a/src/libutil/include/nix/util/thread-pool.hh b/src/libutil/include/nix/util/thread-pool.hh index a07354146632..63f1141f6a53 100644 --- a/src/libutil/include/nix/util/thread-pool.hh +++ b/src/libutil/include/nix/util/thread-pool.hh @@ -52,6 +52,12 @@ public: */ void process(); + /** + * Shut down all worker threads and wait until they've exited. + * Active work items are finished, but any pending work items are discarded. + */ + void shutdown(); + private: size_t maxThreads; @@ -72,8 +78,6 @@ private: std::condition_variable work; void doWork(bool mainThread); - - void shutdown(); }; /** @@ -85,21 +89,24 @@ template void processGraph( const std::set & nodes, std::function(const T &)> getEdges, - std::function processNode) + std::function processNode, + bool discoverNodes = false, + size_t maxThreads = 0) { struct Graph { + std::set known; std::set left; std::map> refs, rrefs; }; - Sync graph_(Graph{nodes, {}, {}}); + Sync graph_(Graph{nodes, nodes, {}, {}}); std::function worker; - /* Create pool last to ensure threads are stopped before other destructors - * run */ - ThreadPool pool; + /* Create pool last to ensure threads are stopped before other + destructors run. */ + ThreadPool pool(maxThreads); worker = [&](const T & node) { { @@ -116,11 +123,19 @@ void processGraph( { auto graph(graph_.lock()); - for (auto & ref : refs) + for (auto & ref : refs) { + if (discoverNodes) { + auto [i, inserted] = graph->known.insert(ref); + if (inserted) { + pool.enqueue(std::bind(worker, std::ref(*i))); + graph->left.insert(ref); + } + } if (graph->left.count(ref)) { graph->refs[node].insert(ref); graph->rrefs[ref].insert(node); } + } if (graph->refs[node].empty()) goto doWork; } diff --git a/src/libutil/include/nix/util/url.hh b/src/libutil/include/nix/util/url.hh index 55c475df651c..5f9eab4e9de9 100644 --- a/src/libutil/include/nix/util/url.hh +++ b/src/libutil/include/nix/util/url.hh @@ -228,6 +228,11 @@ struct ParsedURL */ std::string renderPath(bool encode = false) const; + /** + * Like to_string(), but removes query strings and passwords. + */ + std::string renderSanitized() const; + auto operator<=>(const ParsedURL & other) const noexcept = default; /** diff --git a/src/libutil/include/nix/util/util.hh b/src/libutil/include/nix/util/util.hh index 7556663cd1d2..8130c52ed27a 100644 --- a/src/libutil/include/nix/util/util.hh +++ b/src/libutil/include/nix/util/util.hh @@ -298,9 +298,15 @@ typename T::mapped_type * get(T & map, const K & key) template typename T::mapped_type * get(T && map, const K & key) = delete; -/** - * Look up a value in a `boost::concurrent_flat_map`. - */ +template +std::optional getOptional(const T & map, const typename T::key_type & key) +{ + auto i = map.find(key); + if (i == map.end()) + return std::nullopt; + return {i->second}; +} + template std::optional getConcurrent(const T & map, const typename T::key_type & key) { diff --git a/src/libutil/linux/cgroup.cc b/src/libutil/linux/cgroup.cc index 928b44d6c50d..802b56336d1e 100644 --- a/src/libutil/linux/cgroup.cc +++ b/src/libutil/linux/cgroup.cc @@ -174,4 +174,23 @@ std::string getRootCgroup() return rootCgroup; } +std::set getPidsInCgroup(const std::filesystem::path & cgroup) +{ + if (!pathExists(cgroup)) + return {}; + + auto procsFile = cgroup / "cgroup.procs"; + + std::set result; + + for (auto & pidStr : tokenizeString>(readFile(procsFile))) { + if (auto o = string2Int(pidStr)) + result.insert(*o); + else + throw Error("invalid PID '%s'", pidStr); + } + + return result; +} + } // namespace nix diff --git a/src/libutil/linux/include/nix/util/cgroup.hh b/src/libutil/linux/include/nix/util/cgroup.hh index a759bdd0852b..ad777347670c 100644 --- a/src/libutil/linux/include/nix/util/cgroup.hh +++ b/src/libutil/linux/include/nix/util/cgroup.hh @@ -40,4 +40,9 @@ std::string getCurrentCgroup(); */ std::string getRootCgroup(); +/** + * Get the PIDs of all processes in the given cgroup. + */ +std::set getPidsInCgroup(const std::filesystem::path & cgroup); + } // namespace nix diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index 8f7ec2d294ed..842381acf66e 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -336,6 +336,16 @@ struct JSONLogger : Logger addFields(json, fields); write(json); } + + void result(ActivityId act, ResultType type, const nlohmann::json & j) override + { + nlohmann::json json; + json["action"] = "result"; + json["id"] = act; + json["type"] = type; + json["payload"] = j; + write(json); + } }; std::unique_ptr makeJSONLogger(Descriptor fd, bool includeNixPrefix) diff --git a/src/libutil/meson.build b/src/libutil/meson.build index 0b4a0841f906..283f4b96f55c 100644 --- a/src/libutil/meson.build +++ b/src/libutil/meson.build @@ -152,6 +152,7 @@ sources = [ config_priv_h ] + files( 'pos-table.cc', 'position.cc', 'posix-source-accessor.cc', + 'provenance.cc', 'serialise.cc', 'signature/local-keys.cc', 'signature/signer.cc', @@ -203,7 +204,7 @@ this_library = library( dependencies : deps_public + deps_private + deps_other, include_directories : include_dirs, link_args : linker_export_flags, - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, cpp_pch : do_pch ? [ 'pch/precompiled-headers.hh' ] : [], ) diff --git a/src/libutil/mounted-source-accessor.cc b/src/libutil/mounted-source-accessor.cc index d9398045cc56..22e5acf70b45 100644 --- a/src/libutil/mounted-source-accessor.cc +++ b/src/libutil/mounted-source-accessor.cc @@ -99,6 +99,18 @@ struct MountedSourceAccessorImpl : MountedSourceAccessor auto [accessor, subpath] = resolve(path); return accessor->getFingerprint(subpath); } + + std::shared_ptr getProvenance(const CanonPath & path) override + { + auto [accessor, subpath] = resolve(path); + return accessor->getProvenance(subpath); + } + + void invalidateCache(const CanonPath & path) override + { + auto [accessor, subpath] = resolve(path); + accessor->invalidateCache(subpath); + } }; ref makeMountedSourceAccessor(std::map> mounts) diff --git a/src/libutil/package.nix b/src/libutil/package.nix index 3deb7ba3ae3c..287e6c6a1139 100644 --- a/src/libutil/package.nix +++ b/src/libutil/package.nix @@ -22,7 +22,7 @@ let in mkMesonLibrary (finalAttrs: { - pname = "nix-util"; + pname = "determinate-nix-util"; inherit version; workDir = ./.; diff --git a/src/libutil/posix-source-accessor.cc b/src/libutil/posix-source-accessor.cc index abbab45db21c..632504e74a05 100644 --- a/src/libutil/posix-source-accessor.cc +++ b/src/libutil/posix-source-accessor.cc @@ -87,11 +87,11 @@ bool PosixSourceAccessor::pathExists(const CanonPath & path) return nix::pathExists(makeAbsPath(path).string()); } +using Cache = boost::concurrent_flat_map>; +static Cache cache; + std::optional PosixSourceAccessor::cachedLstat(const CanonPath & path) { - using Cache = boost::concurrent_flat_map>; - static Cache cache; - // Note: we convert std::filesystem::path to Path because the // former is not hashable on libc++. Path absPath = makeAbsPath(path).string(); @@ -108,6 +108,11 @@ std::optional PosixSourceAccessor::cachedLstat(const CanonPath & pa return st; } +void PosixSourceAccessor::invalidateCache(const CanonPath & path) +{ + cache.erase(makeAbsPath(path).string()); +} + std::optional PosixSourceAccessor::maybeLstat(const CanonPath & path) { if (auto parent = path.parent()) diff --git a/src/libutil/provenance.cc b/src/libutil/provenance.cc new file mode 100644 index 000000000000..3130e148f540 --- /dev/null +++ b/src/libutil/provenance.cc @@ -0,0 +1,74 @@ +#include "nix/util/provenance.hh" +#include "nix/util/json-utils.hh" + +namespace nix { + +struct UnknownProvenance : Provenance +{ + nlohmann::json payload; + + UnknownProvenance(nlohmann::json payload) + : payload(std::move(payload)) + { + } + + nlohmann::json to_json() const override + { + return payload; + } +}; + +Provenance::RegisteredTypes & Provenance::registeredTypes() +{ + static Provenance::RegisteredTypes types; + return types; +} + +ref Provenance::from_json_str(std::string_view s) +{ + return from_json(nlohmann::json::parse(s)); +} + +std::shared_ptr Provenance::from_json_str_optional(std::string_view s) +{ + if (s.empty()) + return nullptr; + return Provenance::from_json_str(s); +} + +ref Provenance::from_json(const nlohmann::json & json) +{ + auto & obj = getObject(json); + + auto type = getString(valueAt(obj, "type")); + + auto it = registeredTypes().find(type); + if (it == registeredTypes().end()) + return make_ref(obj); + + return it->second(obj); +} + +std::string Provenance::to_json_str() const +{ + return to_json().dump(); +} + +nlohmann::json SubpathProvenance::to_json() const +{ + return { + {"type", "subpath"}, + {"subpath", subpath.abs()}, + {"next", next ? next->to_json() : nlohmann::json(nullptr)}, + }; +} + +Provenance::Register registerSubpathProvenance("subpath", [](nlohmann::json json) { + auto & obj = getObject(json); + std::shared_ptr next; + if (auto p = optionalValueAt(obj, "next"); p && !p->is_null()) + next = Provenance::from_json(*p); + return make_ref(next, CanonPath(getString(valueAt(obj, "subpath")))); +}); + +} // namespace nix diff --git a/src/libutil/source-accessor.cc b/src/libutil/source-accessor.cc index 3c2d658290c0..76f3edc17a04 100644 --- a/src/libutil/source-accessor.cc +++ b/src/libutil/source-accessor.cc @@ -1,5 +1,7 @@ #include + #include "nix/util/source-accessor.hh" +#include "nix/util/provenance.hh" namespace nix { @@ -126,4 +128,9 @@ CanonPath SourceAccessor::resolveSymlinks(const CanonPath & path, SymlinkResolut return res; } +std::shared_ptr SourceAccessor::getProvenance(const CanonPath & path) +{ + return provenance && !path.isRoot() ? std::make_shared(provenance, path) : provenance; +} + } // namespace nix diff --git a/src/libutil/table.cc b/src/libutil/table.cc index fa1bf110d93f..215171dc02fc 100644 --- a/src/libutil/table.cc +++ b/src/libutil/table.cc @@ -1,4 +1,5 @@ #include "nix/util/table.hh" +#include "nix/util/terminal.hh" #include #include @@ -7,7 +8,7 @@ namespace nix { -void printTable(std::ostream & out, Table & table) +void printTable(std::ostream & out, Table & table, unsigned int width) { auto nrColumns = table.size() > 0 ? table.front().size() : 0; @@ -18,19 +19,31 @@ void printTable(std::ostream & out, Table & table) assert(i.size() == nrColumns); size_t column = 0; for (auto j = i.begin(); j != i.end(); ++j, ++column) - if (j->size() > widths[column]) - widths[column] = j->size(); + // TODO: take ANSI escapes into account when calculating width. + widths[column] = std::max(widths[column], j->content.size()); } for (auto & i : table) { size_t column = 0; + std::string line; for (auto j = i.begin(); j != i.end(); ++j, ++column) { - std::string s = *j; + std::string s = j->content; replace(s.begin(), s.end(), '\n', ' '); - out << s; - if (column < nrColumns - 1) - out << std::string(widths[column] - s.size() + 2, ' '); + + auto padding = std::string(widths[column] - s.size(), ' '); + if (j->alignment == TableCell::Right) { + line += padding; + line += s; + } else { + line += s; + if (column + 1 < nrColumns) + line += padding; + } + + if (column + 1 < nrColumns) + line += " "; } + out << filterANSIEscapes(line, false, width); out << std::endl; } } diff --git a/src/libutil/tee-logger.cc b/src/libutil/tee-logger.cc index 8433168a5a82..889b82ca02b8 100644 --- a/src/libutil/tee-logger.cc +++ b/src/libutil/tee-logger.cc @@ -65,6 +65,12 @@ struct TeeLogger : Logger logger->result(act, type, fields); } + void result(ActivityId act, ResultType type, const nlohmann::json & json) override + { + for (auto & logger : loggers) + logger->result(act, type, json); + } + void writeToStdout(std::string_view s) override { for (auto & logger : loggers) { diff --git a/src/libutil/terminal.cc b/src/libutil/terminal.cc index 401ce16043d1..c52cc14975b2 100644 --- a/src/libutil/terminal.cc +++ b/src/libutil/terminal.cc @@ -189,6 +189,14 @@ std::pair getWindowSize() return *windowSize.lock(); } +unsigned int getWindowWidth() +{ + unsigned int width = getWindowSize().second; + if (width <= 0) + width = std::numeric_limits::max(); + return width; +} + #ifndef _WIN32 std::string getPtsName(int fd) { diff --git a/src/libutil/union-source-accessor.cc b/src/libutil/union-source-accessor.cc index ca239563c662..7be670400b05 100644 --- a/src/libutil/union-source-accessor.cc +++ b/src/libutil/union-source-accessor.cc @@ -88,6 +88,22 @@ struct UnionSourceAccessor : SourceAccessor } return {path, std::nullopt}; } + + std::shared_ptr getProvenance(const CanonPath & path) override + { + for (auto & accessor : accessors) { + auto prov = accessor->getProvenance(path); + if (prov) + return prov; + } + return nullptr; + } + + void invalidateCache(const CanonPath & path) override + { + for (auto & accessor : accessors) + accessor->invalidateCache(path); + } }; ref makeUnionSourceAccessor(std::vector> && accessors) diff --git a/src/libutil/url.cc b/src/libutil/url.cc index 0a8b64528140..d9f61078cc48 100644 --- a/src/libutil/url.cc +++ b/src/libutil/url.cc @@ -344,6 +344,15 @@ std::string ParsedURL::renderPath(bool encode) const return concatStringsSep("/", path); } +std::string ParsedURL::renderSanitized() const +{ + auto url = *this; + if (url.authority) + url.authority->password.reset(); + url.query.clear(); + return url.to_string(); +} + std::string ParsedURL::renderAuthorityAndPath() const { std::string res; diff --git a/src/nix/app.cc b/src/nix/app.cc index 634db04f3fe1..4a8ca815c0fa 100644 --- a/src/nix/app.cc +++ b/src/nix/app.cc @@ -74,6 +74,7 @@ UnresolvedApp InstallableValue::toApp(EvalState & state) std::visit( overloaded{ [&](const NixStringContextElem::DrvDeep & d) -> DerivedPath { + state.waitForPath(d.drvPath); /* We want all outputs of the drv */ return DerivedPath::Built{ .drvPath = makeConstantStorePathRef(d.drvPath), @@ -81,6 +82,7 @@ UnresolvedApp InstallableValue::toApp(EvalState & state) }; }, [&](const NixStringContextElem::Built & b) -> DerivedPath { + state.waitForPath(*b.drvPath); return DerivedPath::Built{ .drvPath = b.drvPath, .outputs = OutputsSpec::Names{b.output}, @@ -88,16 +90,19 @@ UnresolvedApp InstallableValue::toApp(EvalState & state) }, [&](const NixStringContextElem::Opaque & o) -> DerivedPath { return DerivedPath::Opaque{ - .path = o.path, + .path = state.devirtualize(o.path), }; }, + [&](const NixStringContextElem::Path & p) -> DerivedPath { + throw Error("'program' attribute of an 'app' output cannot have no context"); + }, }, c.raw)); } return UnresolvedApp{App{ .context = std::move(context2), - .program = program, + .program = state.devirtualize(program, context), }}; } diff --git a/src/nix/build.md b/src/nix/build.md index 5dfdd44a71f7..b5964e13dde6 100644 --- a/src/nix/build.md +++ b/src/nix/build.md @@ -21,15 +21,15 @@ R""( ```console # nix build nixpkgs#hello nixpkgs#cowsay # ls -l result* - lrwxrwxrwx 1 … result -> /nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10 - lrwxrwxrwx 1 … result-1 -> /nix/store/rkfrm0z6x6jmi7d3gsmma4j53h15mg33-cowsay-3.03+dfsg2 + lrwxrwxrwx 1 … result -> /nix/store/10l19qifk7hjjq47px8m2prqk1gv4isy-hello-2.10 + lrwxrwxrwx 1 … result-1 -> /nix/store/frzgk3v1ycnarpfc2rkynravng27a86d-cowsay-3.03+dfsg2 ``` * Build GNU Hello and print the resulting store path. ```console # nix build nixpkgs#hello --print-out-paths - /nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10 + /nix/store/10l19qifk7hjjq47px8m2prqk1gv4isy-hello-2.10 ``` * Build a specific output: @@ -37,19 +37,19 @@ R""( ```console # nix build nixpkgs#glibc.dev # ls -ld ./result-dev - lrwxrwxrwx 1 … ./result-dev -> /nix/store/dkm3gwl0xrx0wrw6zi5x3px3lpgjhlw4-glibc-2.32-dev + lrwxrwxrwx 1 … ./result-dev -> /nix/store/hb4lb9n3gv855llky72hrs4pglpxq70m-glibc-2.32-dev ``` * Build all outputs: ```console # nix build "nixpkgs#openssl^*" --print-out-paths - /nix/store/gvad6v0cmq1qccmc4wphsazqbj0xzjsl-openssl-3.0.13-bin - /nix/store/a07jqdrc8afnk8r6f3lnhh4gvab7chk4-openssl-3.0.13-debug - /nix/store/yg75achq89wgqn2fi3gglgsd77kjpi03-openssl-3.0.13-dev - /nix/store/bvdcihi8c88fw31cg6gzzmpnwglpn1jv-openssl-3.0.13-doc - /nix/store/gjqcvq47cmxazxga0cirspm3jywkmvfv-openssl-3.0.13-man - /nix/store/7nmrrad8skxr47f9hfl3xc0pfqmwq51b-openssl-3.0.13 + /nix/store/ah1slww3lfsj02w563wjf1xcz5fayj36-openssl-3.0.13-bin + /nix/store/vswlynn75s0bpba3vl6bi3wyzjym95yi-openssl-3.0.13-debug + /nix/store/z71nwwni9dcxdmd3v3a7j24v70c7v7z3-openssl-3.0.13-dev + /nix/store/iabzsa5c73p4f10zfmf5r2qsrn0hl4lk-openssl-3.0.13-doc + /nix/store/zqmfrpxvcll69a2lyawnpvp15zh421v2-openssl-3.0.13-man + /nix/store/l3nlzki957anyy7yb25qvwk6cqrnvb67-openssl-3.0.13 ``` * Build attribute `build.x86_64-linux` from (non-flake) Nix expression @@ -89,7 +89,7 @@ R""( already exist: ```console - # nix build /nix/store/rkfrm0z6x6jmi7d3gsmma4j53h15mg33-cowsay-3.03+dfsg2 + # nix build /nix/store/frzgk3v1ycnarpfc2rkynravng27a86d-cowsay-3.03+dfsg2 ``` # Description diff --git a/src/nix/bundle.cc b/src/nix/bundle.cc index e11f37b847ee..20bfd4d6df60 100644 --- a/src/nix/bundle.cc +++ b/src/nix/bundle.cc @@ -58,21 +58,9 @@ struct CmdBundle : InstallableValueCommand return catSecondary; } - // FIXME: cut&paste from CmdRun. - Strings getDefaultFlakeAttrPaths() override + StringSet getRoles() override { - Strings res{"apps." + settings.thisSystem.get() + ".default", "defaultApp." + settings.thisSystem.get()}; - for (auto & s : SourceExprCommand::getDefaultFlakeAttrPaths()) - res.push_back(s); - return res; - } - - Strings getDefaultFlakeAttrPathPrefixes() override - { - Strings res{"apps." + settings.thisSystem.get() + "."}; - for (auto & s : SourceExprCommand::getDefaultFlakeAttrPathPrefixes()) - res.push_back(s); - return res; + return {"nix-run"}; } void run(ref store, ref installable) override @@ -90,9 +78,9 @@ struct CmdBundle : InstallableValueCommand std::move(bundlerFlakeRef), bundlerName, std::move(extendedOutputsSpec), - {"bundlers." + settings.thisSystem.get() + ".default", "defaultBundler." + settings.thisSystem.get()}, - {"bundlers." + settings.thisSystem.get() + "."}, - lockFlags}; + {"nix-bundler"}, + lockFlags, + getDefaultFlakeSchemas()}; auto vRes = evalState->allocValue(); evalState->callFunction(*bundler.toValue(*evalState).first, *val, *vRes, noPos); @@ -107,6 +95,8 @@ struct CmdBundle : InstallableValueCommand NixStringContext context2; auto drvPath = evalState->coerceToStorePath(attr1->pos, *attr1->value, context2, ""); + evalState->waitForAllPaths(); + drvPath.requireDerivation(); auto attr2 = vRes->attrs()->get(evalState->s.outPath); @@ -115,6 +105,8 @@ struct CmdBundle : InstallableValueCommand auto outPath = evalState->coerceToStorePath(attr2->pos, *attr2->value, context2, ""); + evalState->waitForAllPaths(); + store->buildPaths({ DerivedPath::Built{ .drvPath = makeConstantStorePathRef(drvPath), diff --git a/src/nix/crash-handler.cc b/src/nix/crash-handler.cc index 17c948dab143..29c4f2027ca4 100644 --- a/src/nix/crash-handler.cc +++ b/src/nix/crash-handler.cc @@ -34,7 +34,7 @@ void logFatal(std::string const & s) void onTerminate() { logFatal( - "Nix crashed. This is a bug. Please report this at https://github.com/NixOS/nix/issues with the following information included:\n"); + "Determinate Nix crashed. This is a bug. Please report this at https://github.com/DeterminateSystems/nix-src/issues with the following information included:\n"); try { std::exception_ptr eptr = std::current_exception(); if (eptr) { diff --git a/src/nix/develop.cc b/src/nix/develop.cc index 68ff3fcf9655..cfbb6ca1bba1 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -1,5 +1,6 @@ #include "nix/util/config-global.hh" #include "nix/expr/eval.hh" +#include "nix/fetchers/fetch-settings.hh" #include "nix/cmd/installable-flake.hh" #include "nix/cmd/command-installable-value.hh" #include "nix/main/common-args.hh" @@ -459,22 +460,9 @@ struct Common : InstallableCommand, MixProfile rewrites.insert({BuildEnvironment::getString(fileInBuilderEnv->second), targetFilePath.string()}); } - Strings getDefaultFlakeAttrPaths() override + StringSet getRoles() override { - Strings paths{ - "devShells." + settings.thisSystem.get() + ".default", - "devShell." + settings.thisSystem.get(), - }; - for (auto & p : SourceExprCommand::getDefaultFlakeAttrPaths()) - paths.push_back(p); - return paths; - } - - Strings getDefaultFlakeAttrPathPrefixes() override - { - auto res = SourceExprCommand::getDefaultFlakeAttrPathPrefixes(); - res.emplace_front("devShells." + settings.thisSystem.get() + "."); - return res; + return {"nix-develop"}; } StorePath getShellOutPath(ref store, ref installable) @@ -657,9 +645,9 @@ struct CmdDevelop : Common, MixEnvironment std::move(nixpkgs), "bashInteractive", ExtendedOutputsSpec::Default(), - Strings{}, - Strings{"legacyPackages." + settings.thisSystem.get() + "."}, - nixpkgsLockFlags); + StringSet{"nix-build"}, + nixpkgsLockFlags, + std::nullopt); for (auto & path : Installable::toStorePathSet( getEvalStore(), store, Realise::Outputs, OperateOn::Output, {bashInstallable})) { diff --git a/src/nix/diff-closures.cc b/src/nix/diff-closures.cc index d36a21d746ff..a71efa042e84 100644 --- a/src/nix/diff-closures.cc +++ b/src/nix/diff-closures.cc @@ -54,10 +54,10 @@ GroupedPaths getClosureInfo(ref store, const StorePath & toplevel) std::string showVersions(const StringSet & versions) { if (versions.empty()) - return "∅"; + return "(absent)"; StringSet versions2; for (auto & version : versions) - versions2.insert(version.empty() ? "ε" : version); + versions2.insert(version.empty() ? "(no version)" : version); return concatStringsSep(", ", versions2); } @@ -104,8 +104,13 @@ void printClosureDiff( if (showDelta || !removed.empty() || !added.empty()) { std::vector items; - if (!removed.empty() || !added.empty()) + if (!removed.empty() && !added.empty()) { items.push_back(fmt("%s → %s", showVersions(removed), showVersions(added))); + } else if (!removed.empty()) { + items.push_back(fmt("%s removed", showVersions(removed))); + } else if (!added.empty()) { + items.push_back(fmt("%s added", showVersions(added))); + } if (showDelta) items.push_back(fmt("%s%s" ANSI_NORMAL, sizeDelta > 0 ? ANSI_RED : ANSI_GREEN, renderSize(sizeDelta))); logger->cout("%s%s: %s", indent, name, concatStringsSep(", ", items)); diff --git a/src/nix/diff-closures.md b/src/nix/diff-closures.md index 0294c0d8def7..6b07af28f958 100644 --- a/src/nix/diff-closures.md +++ b/src/nix/diff-closures.md @@ -11,8 +11,8 @@ R""( baloo-widgets: 20.08.1 → 20.08.2 bluez-qt: +12.6 KiB dolphin: 20.08.1 → 20.08.2, +13.9 KiB - kdeconnect: 20.08.2 → ∅, -6597.8 KiB - kdeconnect-kde: ∅ → 20.08.2, +6599.7 KiB + kdeconnect: 20.08.2 removed, -6597.8 KiB + kdeconnect-kde: 20.08.2 added, +6599.7 KiB … ``` @@ -34,9 +34,9 @@ dolphin: 20.08.1 → 20.08.2, +13.9 KiB No size change is shown if it's below the threshold. If the package does not exist in either the *before* or *after* closures, it is -represented using `∅` (empty set) on the appropriate side of the -arrow. If a package has an empty version string, the version is -rendered as `ε` (epsilon). +represented using `added` or `removed`. +If a package has an empty version string, the version is +rendered as `(no version)`. There may be multiple versions of a package in each closure. In that case, only the changed versions are shown. Thus, diff --git a/src/nix/eval.cc b/src/nix/eval.cc index 584b2122f09e..2f1ba63956fd 100644 --- a/src/nix/eval.cc +++ b/src/nix/eval.cc @@ -116,11 +116,14 @@ struct CmdEval : MixJSON, InstallableValueCommand, MixReadOnlyOption logger->stop(); writeFull( getStandardOutput(), - *state->coerceToString(noPos, *v, context, "while generating the eval command output")); + state->devirtualize( + *state->coerceToString(noPos, *v, context, "while generating the eval command output"), context)); } else if (json) { - printJSON(printValueAsJSON(*state, true, *v, pos, context, false)); + // FIXME: use printJSON + auto j = printValueAsJSON(*state, true, *v, pos, context, false); + logger->cout("%s", state->devirtualize(outputPretty ? j.dump(2) : j.dump(), context)); } else { diff --git a/src/nix/flake-check.md b/src/nix/flake-check.md index 007640c27c94..a700c5cbe6fe 100644 --- a/src/nix/flake-check.md +++ b/src/nix/flake-check.md @@ -18,66 +18,20 @@ R""( # Description This command verifies that the flake specified by flake reference -*flake-url* can be evaluated successfully (as detailed below), and -that the derivations specified by the flake's `checks` output can be -built successfully. +*flake-url* can be evaluated and built successfully according to its +`schemas` flake output. For every flake output that has a schema +definition, `nix flake check` uses the schema to extract the contents +of the output. Then, for every item in the contents: + +* It evaluates the elements of the `evalChecks` attribute set returned + by the schema for that item, printing an error or warning for every + check that fails to evaluate or that evaluates to `false`. + +* It builds the `derivation` attribute returned by the schema for that + item, if the item has the `isFlakeCheck` attribute. If the `keep-going` option is set to `true`, Nix will keep evaluating as much as it can and report the errors as it encounters them. Otherwise it will stop at the first error. -# Evaluation checks - -The following flake output attributes must be derivations: - -* `checks.`*system*`.`*name* -* `devShells.`*system*`.default` -* `devShells.`*system*`.`*name* -* `nixosConfigurations.`*name*`.config.system.build.toplevel` -* `packages.`*system*`.default` -* `packages.`*system*`.`*name* - -The following flake output attributes must be [app -definitions](./nix3-run.md): - -* `apps.`*system*`.default` -* `apps.`*system*`.`*name* - -The following flake output attributes must be [template -definitions](./nix3-flake-init.md): - -* `templates.default` -* `templates.`*name* - -The following flake output attributes must be *Nixpkgs overlays*: - -* `overlays.default` -* `overlays.`*name* - -The following flake output attributes must be *NixOS modules*: - -* `nixosModules.default` -* `nixosModules.`*name* - -The following flake output attributes must be -[bundlers](./nix3-bundle.md): - -* `bundlers.default` -* `bundlers.`*name* - -Old default attributes are renamed, they will work but will emit a warning: - -* `defaultPackage.` → `packages.`*system*`.default` -* `defaultApps.` → `apps.`*system*`.default` -* `defaultTemplate` → `templates.default` -* `defaultBundler.` → `bundlers.`*system*`.default` -* `overlay` → `overlays.default` -* `devShell.` → `devShells.`*system*`.default` -* `nixosModule` → `nixosModules.default` - -In addition, the `hydraJobs` output is evaluated in the same way as -Hydra's `hydra-eval-jobs` (i.e. as a arbitrarily deeply nested -attribute set of derivations). Similarly, the -`legacyPackages`.*system* output is evaluated like `nix-env --query --available `. - )"" diff --git a/src/nix/flake-metadata.md b/src/nix/flake-metadata.md index adfd3dc96bbf..b99770049950 100644 --- a/src/nix/flake-metadata.md +++ b/src/nix/flake-metadata.md @@ -9,7 +9,7 @@ R""( Resolved URL: github:edolstra/dwarffs Locked URL: github:edolstra/dwarffs/f691e2c991e75edb22836f1dbe632c40324215c5 Description: A filesystem that fetches DWARF debug info from the Internet on demand - Path: /nix/store/769s05vjydmc2lcf6b02az28wsa9ixh1-source + Path: /nix/store/vdyf2s1pygcl4y3dn3bm9wy7mnl8hxcv-source Revision: f691e2c991e75edb22836f1dbe632c40324215c5 Last modified: 2021-01-21 15:41:26 Inputs: @@ -40,7 +40,7 @@ R""( "type": "indirect" }, "originalUrl": "flake:dwarffs", - "path": "/nix/store/hang3792qwdmm2n0d9nsrs5n6bsws6kv-source", + "path": "/nix/store/l06r23gw4psl1f547il2hbnwnxaplbaz-source", "resolved": { "owner": "edolstra", "repo": "dwarffs", diff --git a/src/nix/flake-prefetch-inputs.cc b/src/nix/flake-prefetch-inputs.cc index 4ea6342c3695..19fbb0b574bb 100644 --- a/src/nix/flake-prefetch-inputs.cc +++ b/src/nix/flake-prefetch-inputs.cc @@ -43,11 +43,14 @@ struct CmdFlakePrefetchInputs : FlakeCommand return; if (auto lockedNode = dynamic_cast(&node)) { + if (lockedNode->buildTime) + return; try { Activity act(*logger, lvlInfo, actUnknown, fmt("fetching '%s'", lockedNode->lockedRef)); auto accessor = lockedNode->lockedRef.input.getAccessor(fetchSettings, *store).first; - fetchToStore( - fetchSettings, *store, accessor, FetchMode::Copy, lockedNode->lockedRef.input.getName()); + if (!evalSettings.lazyTrees) + fetchToStore( + fetchSettings, *store, accessor, FetchMode::Copy, lockedNode->lockedRef.input.getName()); } catch (Error & e) { printError("%s", e.what()); nrFailed++; diff --git a/src/nix/flake-prefetch-inputs.md b/src/nix/flake-prefetch-inputs.md index a69f7d367915..b571fa348370 100644 --- a/src/nix/flake-prefetch-inputs.md +++ b/src/nix/flake-prefetch-inputs.md @@ -12,6 +12,6 @@ R""( Fetch the inputs of a flake. This ensures that they are already available for any subsequent evaluation of the flake. -This operation is recursive: it will fetch not just the direct inputs of the top-level flake, but also transitive inputs. +This operation is recursive: it fetches not just the direct inputs of the top-level flake, but also transitive inputs. It skips build-time inputs, i.e. inputs that have the attribute `buildTime = true`. )"" diff --git a/src/nix/flake-prefetch.md b/src/nix/flake-prefetch.md index 4666aadc4df3..a634c502262a 100644 --- a/src/nix/flake-prefetch.md +++ b/src/nix/flake-prefetch.md @@ -20,7 +20,7 @@ R""( ```console # nix flake prefetch dwarffs --json {"hash":"sha256-VHg3MYVgQ12LeRSU2PSoDeKlSPD8PYYEFxxwkVVDRd0=" - ,"storePath":"/nix/store/hang3792qwdmm2n0d9nsrs5n6bsws6kv-source"} + ,"storePath":"/nix/store/l06r23gw4psl1f547il2hbnwnxaplbaz-source"} ``` # Description diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 5324e0121d5b..454e10d4ecf2 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -17,6 +17,10 @@ #include "nix/fetchers/fetch-to-store.hh" #include "nix/store/local-fs-store.hh" #include "nix/store/globals.hh" +#include "nix/expr/parallel-eval.hh" +#include "nix/util/exit.hh" +#include "nix/cmd/flake-schemas.hh" +#include "nix/store/names.hh" #include #include @@ -132,6 +136,7 @@ struct CmdFlakeUpdate : FlakeCommand lockFlags.recreateLockFile = updateAll; lockFlags.writeLockFile = true; lockFlags.applyNixConfig = true; + lockFlags.requireLockable = false; lockFlake(); } @@ -164,38 +169,12 @@ struct CmdFlakeLock : FlakeCommand lockFlags.writeLockFile = true; lockFlags.failOnUnlocked = true; lockFlags.applyNixConfig = true; + lockFlags.requireLockable = false; lockFlake(); } }; -static void enumerateOutputs( - EvalState & state, - Value & vFlake, - std::function callback) -{ - auto pos = vFlake.determinePos(noPos); - state.forceAttrs(vFlake, pos, "while evaluating a flake to get its outputs"); - - auto aOutputs = vFlake.attrs()->get(state.symbols.create("outputs")); - assert(aOutputs); - - state.forceAttrs(*aOutputs->value, pos, "while evaluating the outputs of a flake"); - - auto sHydraJobs = state.symbols.create("hydraJobs"); - - /* Hack: ensure that hydraJobs is evaluated before anything - else. This way we can disable IFD for hydraJobs and then enable - it for other outputs. */ - if (auto attr = aOutputs->value->attrs()->get(sHydraJobs)) - callback(state.symbols[attr->name], *attr->value, attr->pos); - - for (auto & attr : *aOutputs->value->attrs()) { - if (attr.name != sHydraJobs) - callback(state.symbols[attr.name], *attr.value, attr.pos); - } -} - struct CmdFlakeMetadata : FlakeCommand, MixJSON { std::string description() override @@ -212,11 +191,17 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON void run(nix::ref store) override { + lockFlags.requireLockable = false; auto lockedFlake = lockFlake(); auto & flake = lockedFlake.flake; - // Currently, all flakes are in the Nix store via the rootFS accessor. - auto storePath = store->printStorePath(store->toStorePath(flake.path.path.abs()).first); + /* Hack to show the store path if available. */ + std::optional storePath; + if (store->isInStore(flake.path.path.abs())) { + auto path = store->toStorePath(flake.path.path.abs()).first; + if (store->isValidPath(path)) + storePath = path; + } if (json) { nlohmann::json j; @@ -238,7 +223,8 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON j["revCount"] = *revCount; if (auto lastModified = flake.lockedRef.input.getLastModified()) j["lastModified"] = *lastModified; - j["path"] = storePath; + if (storePath) + j["path"] = store->printStorePath(*storePath); j["locks"] = lockedFlake.lockFile.toJSON().first; if (auto fingerprint = lockedFlake.getFingerprint(*store, fetchSettings)) j["fingerprint"] = fingerprint->to_string(HashFormat::Base16, false); @@ -249,7 +235,8 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON logger->cout(ANSI_BOLD "Locked URL:" ANSI_NORMAL " %s", flake.lockedRef.to_string()); if (flake.description) logger->cout(ANSI_BOLD "Description:" ANSI_NORMAL " %s", *flake.description); - logger->cout(ANSI_BOLD "Path:" ANSI_NORMAL " %s", storePath); + if (storePath) + logger->cout(ANSI_BOLD "Path:" ANSI_NORMAL " %s", store->printStorePath(*storePath)); if (auto rev = flake.lockedRef.input.getRev()) logger->cout(ANSI_BOLD "Revision:" ANSI_NORMAL " %s", rev->to_string(HashFormat::Base16, false)); if (auto dirtyRev = fetchers::maybeGetStrAttr(flake.lockedRef.toAttrs(), "dirtyRev")) @@ -281,7 +268,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON "%s" ANSI_BOLD "%s" ANSI_NORMAL ": %s%s", prefix + (last ? treeLast : treeConn), input.first, - (*lockedNode)->lockedRef, + (*lockedNode)->lockedRef.to_string(true), lastModifiedStr); bool firstVisit = visited.insert(*lockedNode).second; @@ -310,9 +297,26 @@ struct CmdFlakeInfo : CmdFlakeMetadata } }; -struct CmdFlakeCheck : FlakeCommand +/** + * Log the current exception, after forcing cached evaluation errors. + */ +static void logEvalError() +{ + try { + try { + throw; + } catch (eval_cache::CachedEvalError & e) { + e.force(); + } + } catch (Error & e) { + logError(e.info()); + } +} + +struct CmdFlakeCheck : FlakeCommand, MixFlakeSchemas { bool build = true; + bool buildAll = false; bool checkAllSystems = false; CmdFlakeCheck() @@ -322,6 +326,11 @@ struct CmdFlakeCheck : FlakeCommand .description = "Do not build checks.", .handler = {&build, false}, }); + addFlag({ + .longName = "build-all", + .description = "Build all derivations, not just checks.", + .handler = {&buildAll, true}, + }); addFlag({ .longName = "all-systems", .description = "Check the outputs for all systems.", @@ -351,441 +360,122 @@ struct CmdFlakeCheck : FlakeCommand auto state = getEvalState(); lockFlags.applyNixConfig = true; - auto flake = lockFlake(); + auto flake = std::make_shared(lockFlake()); auto localSystem = std::string(settings.thisSystem.get()); - bool hasErrors = false; - auto reportError = [&](const Error & e) { - try { - throw e; - } catch (Interrupted & e) { - throw; - } catch (Error & e) { - if (settings.keepGoing) { - logError(e.info()); - hasErrors = true; - } else - throw; - } - }; - - StringSet omittedSystems; - - // FIXME: rewrite to use EvalCache. - - auto resolve = [&](PosIdx p) { return state->positions[p]; }; - - auto argHasName = [&](Symbol arg, std::string_view expected) { - std::string_view name = state->symbols[arg]; - return name == expected || name == "_" || (hasPrefix(name, "_") && name.substr(1) == expected); - }; - - auto checkSystemName = [&](std::string_view system, const PosIdx pos) { - // FIXME: what's the format of "system"? - if (system.find('-') == std::string::npos) - reportError(Error("'%s' is not a valid system type, at %s", system, resolve(pos))); - }; - - auto checkSystemType = [&](std::string_view system, const PosIdx pos) { - if (!checkAllSystems && system != localSystem) { - omittedSystems.insert(std::string(system)); - return false; - } else { - return true; - } - }; - - auto checkDerivation = - [&](const std::string & attrPath, Value & v, const PosIdx pos) -> std::optional { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking derivation %s", attrPath)); - auto packageInfo = getDerivation(*state, v, false); - if (!packageInfo) - throw Error("flake attribute '%s' is not a derivation", attrPath); - else { - // FIXME: check meta attributes - auto storePath = packageInfo->queryDrvPath(); - if (storePath) { - logger->log( - lvlInfo, fmt("derivation evaluated to %s", store->printStorePath(storePath.value()))); - } - return storePath; - } - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the derivation '%s'", attrPath)); - reportError(e); - } - return std::nullopt; - }; - - std::map> attrPathsByDrv; - - auto checkApp = [&](const std::string & attrPath, Value & v, const PosIdx pos) { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking app '%s'", attrPath)); - state->forceAttrs(v, pos, ""); - if (auto attr = v.attrs()->get(state->symbols.create("type"))) - state->forceStringNoCtx(*attr->value, attr->pos, ""); - else - throw Error("app '%s' lacks attribute 'type'", attrPath); - - if (auto attr = v.attrs()->get(state->symbols.create("program"))) { - if (attr->name == state->symbols.create("program")) { - NixStringContext context; - state->forceString(*attr->value, context, attr->pos, ""); - } - } else - throw Error("app '%s' lacks attribute 'program'", attrPath); - - if (auto attr = v.attrs()->get(state->symbols.create("meta"))) { - state->forceAttrs(*attr->value, attr->pos, ""); - if (auto dAttr = attr->value->attrs()->get(state->symbols.create("description"))) - state->forceStringNoCtx(*dAttr->value, dAttr->pos, ""); - else - logWarning({ - .msg = HintFmt("app '%s' lacks attribute 'meta.description'", attrPath), - }); - } else - logWarning({ - .msg = HintFmt("app '%s' lacks attribute 'meta'", attrPath), - }); - - for (auto & attr : *v.attrs()) { - std::string_view name(state->symbols[attr.name]); - if (name != "type" && name != "program" && name != "meta") - throw Error("app '%s' has unsupported attribute '%s'", attrPath, name); - } - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the app definition '%s'", attrPath)); - reportError(e); - } - }; - - auto checkOverlay = [&](std::string_view attrPath, Value & v, const PosIdx pos) { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking overlay '%s'", attrPath)); - state->forceValue(v, pos); - if (!v.isLambda()) { - throw Error("overlay is not a function, but %s instead", showType(v)); - } - if (v.lambda().fun->getFormals() || !argHasName(v.lambda().fun->arg, "final")) - throw Error("overlay does not take an argument named 'final'"); - // FIXME: if we have a 'nixpkgs' input, use it to - // evaluate the overlay. - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the overlay '%s'", attrPath)); - reportError(e); - } - }; - - auto checkModule = [&](std::string_view attrPath, Value & v, const PosIdx pos) { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking NixOS module '%s'", attrPath)); - state->forceValue(v, pos); - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the NixOS module '%s'", attrPath)); - reportError(e); - } - }; - - std::function checkHydraJobs; - - checkHydraJobs = [&](std::string_view attrPath, Value & v, const PosIdx pos) { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking Hydra job '%s'", attrPath)); - state->forceAttrs(v, pos, ""); + auto cache = flake_schemas::call(*state, flake, getDefaultFlakeSchemas()); - if (state->isDerivation(v)) - throw Error("jobset should not be a derivation at top-level"); - - for (auto & attr : *v.attrs()) { - state->forceAttrs(*attr.value, attr.pos, ""); - auto attrPath2 = concatStrings(attrPath, ".", state->symbols[attr.name]); - if (state->isDerivation(*attr.value)) { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking Hydra job '%s'", attrPath2)); - checkDerivation(attrPath2, *attr.value, attr.pos); - } else - checkHydraJobs(attrPath2, *attr.value, attr.pos); - } + auto inventory = cache->getRoot()->getAttr("inventory"); + auto outputs = cache->getRoot()->getAttr("outputs"); - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the Hydra jobset '%s'", attrPath)); - reportError(e); - } - }; + FutureVector futures(*state->executor); - auto checkNixOSConfiguration = [&](const std::string & attrPath, Value & v, const PosIdx pos) { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking NixOS configuration '%s'", attrPath)); - Bindings & bindings = Bindings::emptyBindings; - auto vToplevel = findAlongAttrPath(*state, "config.system.build.toplevel", bindings, v).first; - state->forceValue(*vToplevel, pos); - if (!state->isDerivation(*vToplevel)) - throw Error("attribute 'config.system.build.toplevel' is not a derivation"); - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the NixOS configuration '%s'", attrPath)); - reportError(e); - } - }; + Sync> drvPaths_; + Sync> uncheckedOutputs; + Sync> omittedSystems; + Sync>> derivedPathToAttrPaths_; - auto checkTemplate = [&](std::string_view attrPath, Value & v, const PosIdx pos) { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking template '%s'", attrPath)); + std::function node)> visit; - state->forceAttrs(v, pos, ""); + std::atomic_bool hasErrors = false; - if (auto attr = v.attrs()->get(state->symbols.create("path"))) { - if (attr->name == state->symbols.create("path")) { - NixStringContext context; - auto path = state->coerceToPath(attr->pos, *attr->value, context, ""); - if (!path.pathExists()) - throw Error("template '%s' refers to a non-existent path '%s'", attrPath, path); - // TODO: recursively check the flake in 'path'. - } - } else - throw Error("template '%s' lacks attribute 'path'", attrPath); + visit = [&](ref node) { + flake_schemas::visit( + checkAllSystems ? std::optional() : localSystem, + node, - if (auto attr = v.attrs()->get(state->symbols.create("description"))) - state->forceStringNoCtx(*attr->value, attr->pos, ""); - else - throw Error("template '%s' lacks attribute 'description'", attrPath); - - for (auto & attr : *v.attrs()) { - std::string_view name(state->symbols[attr.name]); - if (name != "path" && name != "description" && name != "welcomeText") - throw Error("template '%s' has unsupported attribute '%s'", attrPath, name); - } - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the template '%s'", attrPath)); - reportError(e); - } - }; - - auto checkBundler = [&](const std::string & attrPath, Value & v, const PosIdx pos) { - try { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking bundler '%s'", attrPath)); - state->forceValue(v, pos); - if (!v.isLambda()) - throw Error("bundler must be a function"); - // TODO: check types of inputs/outputs? - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking the template '%s'", attrPath)); - reportError(e); - } - }; - - { - Activity act(*logger, lvlInfo, actUnknown, "evaluating flake"); - - auto vFlake = state->allocValue(); - flake::callFlake(*state, flake, *vFlake); - - enumerateOutputs(*state, *vFlake, [&](std::string_view name, Value & vOutput, const PosIdx pos) { - Activity act(*logger, lvlInfo, actUnknown, fmt("checking flake output '%s'", name)); - - try { - evalSettings.enableImportFromDerivation.setDefault(name != "hydraJobs"); - - state->forceValue(vOutput, pos); - - std::string_view replacement = name == "defaultPackage" ? "packages..default" - : name == "defaultApp" ? "apps..default" - : name == "defaultTemplate" ? "templates.default" - : name == "defaultBundler" ? "bundlers..default" - : name == "overlay" ? "overlays.default" - : name == "devShell" ? "devShells..default" - : name == "nixosModule" ? "nixosModules.default" - : ""; - if (replacement != "") - warn("flake output attribute '%s' is deprecated; use '%s' instead", name, replacement); - - if (name == "checks") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - std::string_view attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) { - auto drvPath = checkDerivation( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - if (drvPath && attr_name == settings.thisSystem.get()) { - auto path = DerivedPath::Built{ - .drvPath = makeConstantStorePathRef(*drvPath), - .outputs = OutputsSpec::All{}, - }; - - // Build and store the attribute path for error reporting - AttrPath attrPath{state->symbols.create(name), attr.name, attr2.name}; - attrPathsByDrv[path].push_back(std::move(attrPath)); - } - } + [&](const flake_schemas::Leaf & leaf) { + try { + bool done = true; + bool buildSkipped = false; + + if (auto evalChecks = leaf.node->maybeGetAttr("evalChecks")) { + auto checkNames = evalChecks->getAttrs(); + for (auto & checkName : checkNames) { + auto cursor = evalChecks->getAttr(checkName); + Activity act( + *logger, + lvlInfo, + actUnknown, + fmt("running flake check '%s'", cursor->getAttrPathStr())); + auto b = cursor->getBool(); + if (!b) + throw Error("Evaluation check '%s' failed.", cursor->getAttrPathStr()); } } - } - - else if (name == "formatter") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkDerivation(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; - } - } - - else if (name == "packages" || name == "devShells") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) - checkDerivation( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - }; - } - } - - else if (name == "apps") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) - checkApp( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - }; - } - } - - else if (name == "defaultPackage" || name == "devShell") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkDerivation(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; - } - } - else if (name == "defaultApp") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkApp(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; + if (auto drv = leaf.derivation(outputs)) { + + /* Check whether this is a valid derivation. */ + if (!drv->maybeGetAttr("drvPath") || drv->getAttr("type")->getString() != "derivation") + throw Error("Flake output '%s' is not a derivation.", drv->getAttrPathStr()); + + DrvName parsedDrvName(drv->getAttr("name")->getString()); + + if (buildAll || leaf.isFlakeCheck()) { + auto drvPath = drv->forceDerivation(); + auto derivedPath = DerivedPath::Built{ + .drvPath = makeConstantStorePathRef(drvPath), + .outputs = OutputsSpec::All{}, + }; + (*derivedPathToAttrPaths_.lock())[derivedPath].push_back(leaf.node->getAttrPath()); + drvPaths_.lock()->push_back(std::move(derivedPath)); + if (build) + done = false; + } else + buildSkipped = true; } - } - - else if (name == "legacyPackages") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - checkSystemName(state->symbols[attr.name], attr.pos); - checkSystemType(state->symbols[attr.name], attr.pos); - // FIXME: do getDerivations? - } - } - - else if (name == "overlay") - checkOverlay(name, vOutput, pos); - else if (name == "overlays") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkOverlay(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); - } - - else if (name == "nixosModule") - checkModule(name, vOutput, pos); - - else if (name == "nixosModules") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkModule(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); - } - - else if (name == "nixosConfigurations") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkNixOSConfiguration( - fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); + if (done) + notice( + "✅ " ANSI_BOLD "%s" ANSI_NORMAL "%s", + leaf.node->getAttrPathStr(), + buildSkipped ? ANSI_ITALIC ANSI_FAINT " (build skipped)" : ""); + } catch (Interrupted & e) { + throw; + } catch (Error & e) { + printError("❌ " ANSI_RED "%s" ANSI_NORMAL, leaf.node->getAttrPathStr()); + if (settings.keepGoing) { + logEvalError(); + hasErrors = true; + } else + throw; } + }, - else if (name == "hydraJobs") - checkHydraJobs(name, vOutput, pos); - - else if (name == "defaultTemplate") - checkTemplate(name, vOutput, pos); - - else if (name == "templates") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) - checkTemplate(fmt("%s.%s", name, state->symbols[attr.name]), *attr.value, attr.pos); - } + [&](std::function forEachChild) { + forEachChild([&](Symbol attrName, ref node, bool isLast) { + state->spawn(futures, 2, [&visit, node]() { visit(node); }); + }); + }, - else if (name == "defaultBundler") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - checkBundler(fmt("%s.%s", name, attr_name), *attr.value, attr.pos); - }; - } - } + [&](ref node, const std::vector & systems) { + for (auto & s : systems) + omittedSystems.lock()->insert(s); + }); + }; - else if (name == "bundlers") { - state->forceAttrs(vOutput, pos, ""); - for (auto & attr : *vOutput.attrs()) { - const auto & attr_name = state->symbols[attr.name]; - checkSystemName(attr_name, attr.pos); - if (checkSystemType(attr_name, attr.pos)) { - state->forceAttrs(*attr.value, attr.pos, ""); - for (auto & attr2 : *attr.value->attrs()) { - checkBundler( - fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]), - *attr2.value, - attr2.pos); - } - }; - } - } + flake_schemas::forEachOutput( + inventory, + [&](Symbol outputName, + std::shared_ptr output, + const std::string & doc, + bool isLast) { + if (output) + state->spawn(futures, 1, [&visit, output(ref(output))]() { visit(output); }); + else + uncheckedOutputs.lock()->insert(std::string(state->symbols[outputName])); + }); - else if ( - name == "lib" || name == "darwinConfigurations" || name == "darwinModules" - || name == "flakeModule" || name == "flakeModules" || name == "herculesCI" - || name == "homeConfigurations" || name == "homeModule" || name == "homeModules" - || name == "nixopsConfigurations") - // Known but unchecked community attribute - ; + futures.finishAll(); - else - warn("unknown flake output '%s'", name); + if (!uncheckedOutputs.lock()->empty()) + warn("The following flake outputs are unchecked: %s.", concatStringsSep(", ", *uncheckedOutputs.lock())); - } catch (Error & e) { - e.addTrace(resolve(pos), HintFmt("while checking flake output '%s'", name)); - reportError(e); - } - }); - } + auto drvPaths(drvPaths_.lock()); + auto derivedPathToAttrPaths(derivedPathToAttrPaths_.lock()); - if (build && !attrPathsByDrv.empty()) { - auto keys = std::views::keys(attrPathsByDrv); - std::vector drvPaths(keys.begin(), keys.end()); + if (build && !drvPaths->empty()) { // TODO: This filtering of substitutable paths is a temporary workaround until // https://github.com/NixOS/nix/issues/5025 (union stores) is implemented. // @@ -797,61 +487,71 @@ struct CmdFlakeCheck : FlakeCommand // For now, we skip building derivations whose outputs are already available // via substitution, as `nix flake check` only needs to verify buildability, // not actually produce the outputs. - auto missing = store->queryMissing(drvPaths); + state->waitForAllPaths(); + auto missing = store->queryMissing(*drvPaths); std::vector toBuild; + std::set toBuildSet; for (auto & path : missing.willBuild) { - toBuild.emplace_back( - DerivedPath::Built{ - .drvPath = makeConstantStorePathRef(path), - .outputs = OutputsSpec::All{}, - }); + auto derivedPath = DerivedPath::Built{ + .drvPath = makeConstantStorePathRef(path), + .outputs = OutputsSpec::All{}, + }; + toBuild.emplace_back(derivedPath); + toBuildSet.insert(std::move(derivedPath)); } + for (auto & [derivedPath, attrPaths] : *derivedPathToAttrPaths) + if (!toBuildSet.contains(derivedPath)) + for (auto & attrPath : attrPaths) + notice( + "✅ " ANSI_BOLD "%s" ANSI_NORMAL ANSI_ITALIC ANSI_FAINT " (previously built)" ANSI_NORMAL, + attrPath.to_string(*state)); + + // FIXME: should start building while evaluating. Activity act(*logger, lvlInfo, actUnknown, fmt("running %d flake checks", toBuild.size())); - auto results = store->buildPathsWithResults(toBuild); - - // Report build failures with attribute paths - for (auto & result : results) { - if (auto * failure = result.tryGetFailure()) { - auto it = attrPathsByDrv.find(result.path); - if (it != attrPathsByDrv.end() && !it->second.empty()) { - for (auto & attrPath : it->second) { - reportError(Error( - "failed to build attribute '%s', build of '%s' failed: %s", - attrPath.to_string(*state), - result.path.to_string(*store), - failure->errorMsg)); - } - } else { - // Derivation has no attribute path (e.g., a build dependency) - reportError( - Error("build of '%s' failed: %s", result.path.to_string(*store), failure->errorMsg)); + + auto buildResults = store->buildPathsWithResults(toBuild); + assert(buildResults.size() == toBuild.size()); + + for (auto & buildResult : buildResults) { + if (auto failure = buildResult.tryGetFailure()) + try { + hasErrors = true; + for (auto & attrPath : (*derivedPathToAttrPaths)[buildResult.path]) + if (failure->status == BuildResult::Failure::Cancelled) + notice( + "❓ " ANSI_BOLD "%s" ANSI_NORMAL ANSI_FAINT " (cancelled)", + attrPath.to_string(*state)); + else + printError("❌ " ANSI_RED "%s" ANSI_NORMAL, attrPath.to_string(*state)); + if (failure->status != BuildResult::Failure::Cancelled) + failure->rethrow(); + } catch (Error & e) { + logError(e.info()); } - } + else + for (auto & attrPath : (*derivedPathToAttrPaths)[buildResult.path]) + notice("✅ " ANSI_BOLD "%s" ANSI_NORMAL, attrPath.to_string(*state)); } } - if (hasErrors) - throw Error("some errors were encountered during the evaluation"); - logger->log(lvlInfo, ANSI_GREEN "all checks passed!" ANSI_NORMAL); - - if (!omittedSystems.empty()) { + if (!omittedSystems.lock()->empty()) { // TODO: empty system is not visible; render all as nix strings? warn( "The check omitted these incompatible systems: %s\n" "Use '--all-systems' to check all.", - concatStringsSep(", ", omittedSystems)); - }; + concatStringsSep(", ", *omittedSystems.lock())); + } + + if (hasErrors) + throw Exit(1); }; }; -static Strings defaultTemplateAttrPathsPrefixes{"templates."}; -static Strings defaultTemplateAttrPaths = {"templates.default", "defaultTemplate"}; - -struct CmdFlakeInitCommon : virtual Args, EvalCommand +struct CmdFlakeInitCommon : virtual Args, EvalCommand, MixFlakeSchemas { - std::string templateUrl = "templates"; + std::string templateUrl = "https://flakehub.com/f/DeterminateSystems/flake-templates/0.1"; Path destDir; const LockFlags lockFlags{.writeLockFile = false}; @@ -865,13 +565,7 @@ struct CmdFlakeInitCommon : virtual Args, EvalCommand .labels = {"template"}, .handler = {&templateUrl}, .completer = {[&](AddCompletions & completions, size_t, std::string_view prefix) { - completeFlakeRefWithFragment( - completions, - getEvalState(), - lockFlags, - defaultTemplateAttrPathsPrefixes, - defaultTemplateAttrPaths, - prefix); + completeFlakeRefWithFragment(completions, getEvalState(), lockFlags, {"nix-template"}, prefix); }}, }); } @@ -891,9 +585,9 @@ struct CmdFlakeInitCommon : virtual Args, EvalCommand std::move(templateFlakeRef), templateName, ExtendedOutputsSpec::Default(), - defaultTemplateAttrPaths, - defaultTemplateAttrPathsPrefixes, - lockFlags); + {"nix-template"}, + lockFlags, + {}); auto cursor = installable.getCursor(*evalState); @@ -1083,7 +777,8 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun, MixNoCheckSigs StorePathSet sources; - auto storePath = store->toStorePath(flake.flake.path.path.abs()).first; + auto storePath = dryRun ? flake.flake.lockedRef.input.computeStorePath(*store) + : std::get(flake.flake.lockedRef.input.fetchToStore(fetchSettings, *store)); sources.insert(storePath); @@ -1096,7 +791,8 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun, MixNoCheckSigs std::optional storePath; if (!(*inputNode)->lockedRef.input.isRelative()) { storePath = dryRun ? (*inputNode)->lockedRef.input.computeStorePath(*store) - : (*inputNode)->lockedRef.input.fetchToStore(fetchSettings, *store).first; + : std::get( + (*inputNode)->lockedRef.input.fetchToStore(fetchSettings, *store)); sources.insert(*storePath); } if (json) { @@ -1129,10 +825,13 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun, MixNoCheckSigs } }; -struct CmdFlakeShow : FlakeCommand, MixJSON +struct CmdFlakeShow : FlakeCommand, MixJSON, MixFlakeSchemas { bool showLegacy = false; bool showAllSystems = false; + bool showOutputPaths = false; + bool showDrvPaths = false; + bool showDrvNames = false; CmdFlakeShow() { @@ -1146,6 +845,21 @@ struct CmdFlakeShow : FlakeCommand, MixJSON .description = "Show the contents of outputs for all systems.", .handler = {&showAllSystems, true}, }); + addFlag({ + .longName = "output-paths", + .description = "Include the store paths of derivation outputs in the JSON output.", + .handler = {&showOutputPaths, true}, + }); + addFlag({ + .longName = "drv-paths", + .description = "Include the store paths of derivations in the JSON output.", + .handler = {&showDrvPaths, true}, + }); + addFlag({ + .longName = "drv-names", + .description = "Show the names and versions of derivations.", + .handler = {&showDrvNames, true}, + }); } std::string description() override @@ -1162,299 +876,176 @@ struct CmdFlakeShow : FlakeCommand, MixJSON void run(nix::ref store) override { - evalSettings.enableImportFromDerivation.setDefault(false); + if (showOutputPaths && !json) + throw UsageError("The '--output-paths' flag requires '--json'."); + + if (showDrvPaths && !json) + throw UsageError("The '--drv-paths' flag requires '--json'."); auto state = getEvalState(); auto flake = make_ref(lockFlake()); auto localSystem = std::string(settings.thisSystem.get()); - std::function - hasContent; - - // For frameworks it's important that structures are as lazy as possible - // to prevent infinite recursions, performance issues and errors that - // aren't related to the thing to evaluate. As a consequence, they have - // to emit more attributes than strictly (sic) necessary. - // However, these attributes with empty values are not useful to the user - // so we omit them. - hasContent = [&](eval_cache::AttrCursor & visitor, const AttrPath & attrPath, const Symbol & attr) -> bool { - auto attrPath2(attrPath); - attrPath2.push_back(attr); - auto attrPathS = attrPath2.resolve(*state); - const auto & attrName = state->symbols[attr]; - - auto visitor2 = visitor.getAttr(attrName); - - try { - if ((attrPathS[0] == "apps" || attrPathS[0] == "checks" || attrPathS[0] == "devShells" - || attrPathS[0] == "legacyPackages" || attrPathS[0] == "packages") - && (attrPathS.size() == 1 || attrPathS.size() == 2)) { - for (const auto & subAttr : visitor2->getAttrs()) { - if (hasContent(*visitor2, attrPath2, subAttr)) { - return true; - } - } - return false; - } + auto cache = flake_schemas::call(*state, flake, getDefaultFlakeSchemas()); - if ((attrPathS.size() == 1) - && (attrPathS[0] == "formatter" || attrPathS[0] == "nixosConfigurations" - || attrPathS[0] == "nixosModules" || attrPathS[0] == "overlays")) { - for (const auto & subAttr : visitor2->getAttrs()) { - if (hasContent(*visitor2, attrPath2, subAttr)) { - return true; - } - } - return false; - } + auto inventory = cache->getRoot()->getAttr("inventory"); + auto outputs = cache->getRoot()->getAttr("outputs"); - // If we don't recognize it, it's probably content - return true; - } catch (EvalError & e) { - // Some attrs may contain errors, e.g. legacyPackages of - // nixpkgs. We still want to recurse into it, instead of - // skipping it at all. - return true; - } - }; + FutureVector futures(*state->executor); - std::function - visit; - - visit = [&](eval_cache::AttrCursor & visitor, - const AttrPath & attrPath, - const std::string & headerPrefix, - const std::string & nextPrefix) -> nlohmann::json { - auto j = nlohmann::json::object(); - - auto attrPathS = attrPath.resolve(*state); - - Activity act(*logger, lvlInfo, actUnknown, fmt("evaluating '%s'", attrPath.to_string(*state))); - - try { - auto recurse = [&]() { - if (!json) - logger->cout("%s", headerPrefix); - std::vector attrs; - for (const auto & attr : visitor.getAttrs()) { - if (hasContent(visitor, attrPath, attr)) - attrs.push_back(attr); - } + std::function node, nlohmann::json & obj)> visit; - for (const auto & [i, attr] : enumerate(attrs)) { - const auto & attrName = state->symbols[attr]; - bool last = i + 1 == attrs.size(); - auto visitor2 = visitor.getAttr(attrName); - auto attrPath2(attrPath); - attrPath2.push_back(attr); - auto j2 = visit( - *visitor2, - attrPath2, - fmt(ANSI_GREEN "%s%s" ANSI_NORMAL ANSI_BOLD "%s" ANSI_NORMAL, - nextPrefix, - last ? treeLast : treeConn, - attrName), - nextPrefix + (last ? treeNull : treeLine)); - if (json) - j.emplace(attrName, std::move(j2)); - } - }; + visit = [&](ref node, nlohmann::json & obj) { + flake_schemas::visit( + showAllSystems ? std::optional() : localSystem, + node, - auto showDerivation = [&]() { - auto name = visitor.getAttr(state->s.name)->getString(); + [&](const flake_schemas::Leaf & leaf) { + if (auto what = leaf.what()) + obj.emplace("what", *what); - if (json) { - std::optional description; - if (auto aMeta = visitor.maybeGetAttr(state->s.meta)) { - if (auto aDescription = aMeta->maybeGetAttr(state->s.description)) - description = aDescription->getString(); - } - j.emplace("type", "derivation"); - j.emplace("name", name); - j.emplace("description", description ? *description : ""); - } else { - logger->cout( - "%s: %s '%s'", - headerPrefix, - attrPath.size() == 2 && attrPathS[0] == "devShell" ? "development environment" - : attrPath.size() >= 2 && attrPathS[0] == "devShells" ? "development environment" - : attrPath.size() == 3 && attrPathS[0] == "checks" ? "derivation" - : attrPath.size() >= 1 && attrPathS[0] == "hydraJobs" ? "derivation" - : "package", - name); - } - }; + if (auto shortDescription = leaf.shortDescription()) + obj.emplace("shortDescription", *shortDescription); - if (attrPath.size() == 0 - || (attrPath.size() == 1 - && (attrPathS[0] == "defaultPackage" || attrPathS[0] == "devShell" - || attrPathS[0] == "formatter" || attrPathS[0] == "nixosConfigurations" - || attrPathS[0] == "nixosModules" || attrPathS[0] == "defaultApp" - || attrPathS[0] == "templates" || attrPathS[0] == "overlays")) - || ((attrPath.size() == 1 || attrPath.size() == 2) - && (attrPathS[0] == "checks" || attrPathS[0] == "packages" || attrPathS[0] == "devShells" - || attrPathS[0] == "apps"))) { - recurse(); - } + if (auto drv = leaf.derivation(outputs)) { + auto drvObj = nlohmann::json::object(); + + if (json || showDrvNames) + drvObj.emplace("name", drv->getAttr(state->s.name)->getString()); - else if ( - (attrPath.size() == 2 - && (attrPathS[0] == "defaultPackage" || attrPathS[0] == "devShell" || attrPathS[0] == "formatter")) - || (attrPath.size() == 3 - && (attrPathS[0] == "checks" || attrPathS[0] == "packages" || attrPathS[0] == "devShells"))) { - if (!showAllSystems && std::string(attrPathS[1]) != localSystem) { - if (!json) - logger->cout( - fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)", - headerPrefix)); - else { - logger->warn(fmt("%s omitted (use '--all-systems' to show)", attrPath.to_string(*state))); + if (showDrvPaths) { + auto drvPath = drv->forceDerivation(); + drvObj.emplace("path", store->printStorePath(drvPath)); } - } else { - try { - if (visitor.isDerivation()) - showDerivation(); - else { - auto name = visitor.getAttrPathStr(state->s.name); - logger->warn(fmt("%s is not a derivation", name)); - } - } catch (IFDError & e) { - if (!json) { - logger->cout( - fmt("%s " ANSI_WARNING "omitted due to use of import from derivation" ANSI_NORMAL, - headerPrefix)); - } else { - logger->warn( - fmt("%s omitted due to use of import from derivation", attrPath.to_string(*state))); + + if (showOutputPaths) { + auto outputs = nlohmann::json::object(); + auto drvPath = drv->forceDerivation(); + auto drv = getEvalStore()->derivationFromPath(drvPath); + for (auto & i : drv.outputsAndOptPaths(*store)) { + if (auto outPath = i.second.second) + outputs.emplace(i.first, store->printStorePath(*outPath)); + else + outputs.emplace(i.first, nullptr); } + drvObj.emplace("outputs", std::move(outputs)); } - } - } - else if (attrPath.size() > 0 && attrPathS[0] == "hydraJobs") { - try { - if (visitor.isDerivation()) - showDerivation(); - else - recurse(); - } catch (IFDError & e) { - if (!json) { - logger->cout( - fmt("%s " ANSI_WARNING "omitted due to use of import from derivation" ANSI_NORMAL, - headerPrefix)); - } else { - logger->warn( - fmt("%s omitted due to use of import from derivation", attrPath.to_string(*state))); - } + obj.emplace("derivation", std::move(drvObj)); } - } - else if (attrPath.size() > 0 && attrPathS[0] == "legacyPackages") { - if (attrPath.size() == 1) - recurse(); - else if (!showLegacy) { - if (!json) - logger->cout(fmt( - "%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--legacy' to show)", headerPrefix)); - else { - logger->warn(fmt("%s omitted (use '--legacy' to show)", attrPath.to_string(*state))); - } - } else if (!showAllSystems && std::string(attrPathS[1]) != localSystem) { - if (!json) - logger->cout( - fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)", - headerPrefix)); - else { - logger->warn(fmt("%s omitted (use '--all-systems' to show)", attrPath.to_string(*state))); - } - } else { - try { - if (visitor.isDerivation()) - showDerivation(); - else if (attrPath.size() <= 2) - // FIXME: handle recurseIntoAttrs - recurse(); - } catch (IFDError & e) { - if (!json) { - logger->cout( - fmt("%s " ANSI_WARNING "omitted due to use of import from derivation" ANSI_NORMAL, - headerPrefix)); - } else { - logger->warn( - fmt("%s omitted due to use of import from derivation", attrPath.to_string(*state))); + if (auto forSystems = leaf.forSystems()) + obj.emplace("forSystems", *forSystems); + }, + + [&](std::function forEachChild) { + auto children = nlohmann::json::object(); + forEachChild([&](Symbol attrName, ref node, bool isLast) { + auto & j = children.emplace(state->symbols[attrName], nlohmann::json::object()).first.value(); + state->spawn(futures, 1, [&visit, &j, node]() { + try { + visit(node, j); + } catch (EvalError & e) { + // FIXME: make it a flake schema attribute whether to ignore evaluation errors. + if (node->root->state.symbols[node->getAttrPath()[0]] == "legacyPackages") + j.emplace("failed", true); + else + throw; } - } - } - } + }); + }); + obj.emplace("children", std::move(children)); + }, - else if ( - (attrPath.size() == 2 && attrPathS[0] == "defaultApp") - || (attrPath.size() == 3 && attrPathS[0] == "apps")) { - auto aType = visitor.maybeGetAttr("type"); - std::optional description; - if (auto aMeta = visitor.maybeGetAttr(state->s.meta)) { - if (auto aDescription = aMeta->maybeGetAttr(state->s.description)) - description = aDescription->getString(); - } - if (!aType || aType->getString() != "app") - state->error("not an app definition").debugThrow(); - if (json) { - j.emplace("type", "app"); - if (description) - j.emplace("description", *description); - } else { - logger->cout( - "%s: app: " ANSI_BOLD "%s" ANSI_NORMAL, - headerPrefix, - description ? *description : "no description"); - } - } + [&](ref node, const std::vector & systems) { + obj.emplace("filtered", true); + }); + }; - else if ( - (attrPath.size() == 1 && attrPathS[0] == "defaultTemplate") - || (attrPath.size() == 2 && attrPathS[0] == "templates")) { - auto description = visitor.getAttr("description")->getString(); - if (json) { - j.emplace("type", "template"); - j.emplace("description", description); - } else { - logger->cout("%s: template: " ANSI_BOLD "%s" ANSI_NORMAL, headerPrefix, description); - } - } + auto inv = nlohmann::json::object(); + + flake_schemas::forEachOutput( + inventory, + [&](Symbol outputName, + std::shared_ptr output, + const std::string & doc, + bool isLast) { + auto & j = inv.emplace(state->symbols[outputName], nlohmann::json::object()).first.value(); + + if (!showLegacy && state->symbols[outputName] == "legacyPackages") { + j.emplace("skipped", true); + } else if (output) { + j.emplace("doc", doc); + auto & j2 = j.emplace("output", nlohmann::json::object()).first.value(); + state->spawn(futures, 1, [&visit, output, &j2]() { visit(ref(output), j2); }); + } else + j.emplace("unknown", true); + }); - else { - auto [type, description] = (attrPath.size() == 1 && attrPathS[0] == "overlay") - || (attrPath.size() == 2 && attrPathS[0] == "overlays") - ? std::make_pair("nixpkgs-overlay", "Nixpkgs overlay") - : attrPath.size() == 2 && attrPathS[0] == "nixosConfigurations" - ? std::make_pair("nixos-configuration", "NixOS configuration") - : (attrPath.size() == 1 && attrPathS[0] == "nixosModule") - || (attrPath.size() == 2 && attrPathS[0] == "nixosModules") - ? std::make_pair("nixos-module", "NixOS module") - : std::make_pair("unknown", "unknown"); - if (json) { - j.emplace("type", type); - } else { - logger->cout("%s: " ANSI_WARNING "%s" ANSI_NORMAL, headerPrefix, description); - } + futures.finishAll(); + + if (json) { + auto res = nlohmann::json{{"version", 2}, {"inventory", std::move(inv)}}; + printJSON(res); + } else { + + // Render the JSON into a tree representation. + std::function + render; + + render = [&](nlohmann::json j, const std::string & headerPrefix, const std::string & nextPrefix) { + auto what = j.find("what"); + auto filtered = j.find("filtered"); + auto derivation = j.find("derivation"); + + auto s = headerPrefix; + + if (what != j.end()) + s += fmt(": %s", (std::string) *what); + + if (derivation != j.end()) { + auto name = derivation->find("name"); + if (name != derivation->end()) + s += fmt(ANSI_ITALIC " [%s]" ANSI_NORMAL, (std::string) *name); } - } catch (EvalError & e) { - if (!(attrPath.size() > 0 && attrPathS[0] == "legacyPackages")) - throw; - } - return j; - }; + if (filtered != j.end() && (bool) *filtered) + s += " " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)"; - auto cache = openEvalCache(*state, ref(flake)); + logger->cout(s); - auto j = visit(*cache->getRoot(), {}, fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef), ""); - if (json) - printJSON(j); + auto children = j.find("children"); + + if (children != j.end()) { + for (const auto & [i, child] : enumerate(children->items())) { + bool last = i + 1 == children->size(); + render( + child.value(), + fmt(ANSI_GREEN "%s%s" ANSI_NORMAL ANSI_BOLD "%s" ANSI_NORMAL, + nextPrefix, + last ? treeLast : treeConn, + child.key()), + nextPrefix + (last ? treeNull : treeLine)); + } + } + }; + + logger->cout("%s", fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef)); + + for (const auto & [i, child] : enumerate(inv.items())) { + bool last = i + 1 == inv.size(); + auto nextPrefix = last ? treeNull : treeLine; + auto output = child.value().find("output"); + auto headerPrefix = fmt( + ANSI_GREEN "%s" ANSI_NORMAL ANSI_BOLD "%s" ANSI_NORMAL, last ? treeLast : treeConn, child.key()); + if (output != child.value().end()) + render(*output, headerPrefix, nextPrefix); + else if (child.value().contains("unknown")) + logger->cout(headerPrefix + ANSI_WARNING " unknown flake output" ANSI_NORMAL); + else if (child.value().contains("skipped")) + logger->cout(headerPrefix + ANSI_WARNING " omitted" ANSI_NORMAL " (use '--legacy' to show)"); + } + } } }; @@ -1490,9 +1081,7 @@ struct CmdFlakePrefetch : FlakeCommand, MixJSON { auto originalRef = getFlakeRef(); auto resolvedRef = originalRef.resolve(fetchSettings, *store); - auto [accessor, lockedRef] = resolvedRef.lazyFetch(getEvalState()->fetchSettings, *store); - auto storePath = - fetchToStore(getEvalState()->fetchSettings, *store, accessor, FetchMode::Copy, lockedRef.input.getName()); + auto [storePath, accessor, lockedRef] = resolvedRef.input.fetchToStore(fetchSettings, *store); auto hash = store->queryPathInfo(storePath)->narHash; if (json) { @@ -1501,7 +1090,6 @@ struct CmdFlakePrefetch : FlakeCommand, MixJSON res["hash"] = hash.to_string(HashFormat::SRI, true); res["original"] = fetchers::attrsToJSON(resolvedRef.toAttrs()); res["locked"] = fetchers::attrsToJSON(lockedRef.toAttrs()); - res["locked"].erase("__final"); // internal for now printJSON(res); } else { notice( @@ -1538,12 +1126,6 @@ struct CmdFlake : NixMultiCommand #include "flake.md" ; } - - void run() override - { - experimentalFeatureSettings.require(Xp::Flakes); - NixMultiCommand::run(); - } }; static auto rCmdFlake = registerCommand("flake"); diff --git a/src/nix/formatter.cc b/src/nix/formatter.cc index 2c0b5c62b39c..dfb77d87b5f4 100644 --- a/src/nix/formatter.cc +++ b/src/nix/formatter.cc @@ -34,14 +34,9 @@ static auto rCmdFormatter = registerCommand("formatter"); /** Common implementation bits for the `nix formatter` subcommands. */ struct MixFormatter : SourceExprCommand { - Strings getDefaultFlakeAttrPaths() override + StringSet getRoles() override { - return Strings{"formatter." + settings.thisSystem.get()}; - } - - Strings getDefaultFlakeAttrPathPrefixes() override - { - return Strings{}; + return {"nix-fmt"}; } }; diff --git a/src/nix/get-env.sh b/src/nix/get-env.sh index 39fa6f9ac8f5..5b6162b4ba26 100644 --- a/src/nix/get-env.sh +++ b/src/nix/get-env.sh @@ -17,6 +17,7 @@ __functions="$(declare -F)" __dumpEnv() { printf '{\n' + printf ' "version": 1,\n' printf ' "bashFunctions": {\n' local __first=1 diff --git a/src/nix/log.md b/src/nix/log.md index 01e9801df72b..9d526bb420ce 100644 --- a/src/nix/log.md +++ b/src/nix/log.md @@ -11,7 +11,7 @@ R""( * Get the build log of a specific store path: ```console - # nix log /nix/store/lmngj4wcm9rkv3w4dfhzhcyij3195hiq-thunderbird-52.2.1 + # nix log /nix/store/vaph2hfdmnipqr90v6g5mcdn8h5p5iss-thunderbird-52.2.1 ``` * Get a build log from a specific binary cache: diff --git a/src/nix/ls.cc b/src/nix/ls.cc index 66f52a18afc0..012850cc05db 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -4,12 +4,13 @@ #include "nix/main/common-args.hh" #include +#include "ls.hh" + using namespace nix; -struct MixLs : virtual Args, MixJSON +struct MixLs : virtual Args, MixJSON, MixLongListing { bool recursive = false; - bool verbose = false; bool showDirectory = false; MixLs() @@ -21,13 +22,6 @@ struct MixLs : virtual Args, MixJSON .handler = {&recursive, true}, }); - addFlag({ - .longName = "long", - .shortName = 'l', - .description = "Show detailed file information.", - .handler = {&verbose, true}, - }); - addFlag({ .longName = "directory", .shortName = 'd', @@ -41,13 +35,13 @@ struct MixLs : virtual Args, MixJSON std::function doPath; auto showFile = [&](const CanonPath & curPath, std::string_view relPath) { - if (verbose) { + if (longListing) { auto st = accessor->lstat(curPath); std::string tp = st.type == SourceAccessor::Type::tRegular ? (st.isExecutable ? "-r-xr-xr-x" : "-r--r--r--") : st.type == SourceAccessor::Type::tSymlink ? "lrwxrwxrwx" : "dr-xr-xr-x"; - auto line = fmt("%s %20d %s", tp, st.fileSize.value_or(0), relPath); + auto line = fmt("%s %9d %s", tp, st.fileSize.value_or(0), relPath); if (st.type == SourceAccessor::Type::tSymlink) line += " -> " + accessor->readLink(curPath); logger->cout(line); diff --git a/src/nix/ls.hh b/src/nix/ls.hh new file mode 100644 index 000000000000..36e61162035f --- /dev/null +++ b/src/nix/ls.hh @@ -0,0 +1,22 @@ +#pragma once + +#include "nix/util/args.hh" + +namespace nix { + +struct MixLongListing : virtual Args +{ + bool longListing = false; + + MixLongListing() + { + addFlag({ + .longName = "long", + .shortName = 'l', + .description = "Show detailed file information.", + .handler = {&longListing, true}, + }); + } +}; + +} // namespace nix diff --git a/src/nix/main.cc b/src/nix/main.cc index 93c1dc42a381..0711804a5447 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -86,6 +86,22 @@ static bool haveInternet() #endif } +static void disableNet() +{ + // FIXME: should check for command line overrides only. + if (!settings.useSubstitutes.overridden) + // FIXME: should not disable local substituters (like file:///). + settings.useSubstitutes = false; + if (!settings.tarballTtl.overridden) + settings.tarballTtl = std::numeric_limits::max(); + if (!settings.ttlNarInfoCacheMeta.overridden) + settings.ttlNarInfoCacheMeta = std::numeric_limits::max(); + if (!fileTransferSettings.tries.overridden) + fileTransferSettings.tries = 0; + if (!fileTransferSettings.connectTimeout.overridden) + fileTransferSettings.connectTimeout = 1; +} + std::string programPath; struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs @@ -119,7 +135,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs .description = "Print full build logs on standard error.", .category = loggingCategory, .handler = {[&]() { logger->setPrintBuildLogs(true); }}, - .experimentalFeature = Xp::NixCommand, }); addFlag({ @@ -135,7 +150,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs .description = "Disable substituters and consider all previously downloaded files up-to-date.", .category = miscCategory, .handler = {[&]() { useNet = false; }}, - .experimentalFeature = Xp::NixCommand, }); addFlag({ @@ -143,7 +157,6 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs .description = "Consider all previously downloaded files out-of-date.", .category = miscCategory, .handler = {[&]() { refresh = true; }}, - .experimentalFeature = Xp::NixCommand, }); aliases = { @@ -441,7 +454,6 @@ void mainWrapped(int argc, char ** argv) if (argc == 2 && std::string(argv[1]) == "__dump-language") { experimentalFeatureSettings.experimentalFeatures = { - Xp::Flakes, Xp::FetchClosure, Xp::DynamicDerivations, Xp::FetchTree, @@ -500,6 +512,12 @@ void mainWrapped(int argc, char ** argv) } }); + if (getEnv("NIX_GET_COMPLETIONS")) + /* Avoid fetching stuff during tab completion. We have to this + early because we haven't checked `haveInternet()` yet + (below). */ + disableNet(); + try { auto isNixCommand = std::regex_search(programName, std::regex("nix$")); auto allowShebang = isNixCommand && argc > 1; @@ -511,6 +529,8 @@ void mainWrapped(int argc, char ** argv) applyJSONLogger(); + printTalkative("Nix %s", version()); + if (args.helpRequested) { std::vector subcommand; MultiCommand * command = &args; @@ -543,22 +563,14 @@ void mainWrapped(int argc, char ** argv) args.useNet = false; } - if (!args.useNet) { - // FIXME: should check for command line overrides only. - if (!settings.useSubstitutes.overridden) - settings.useSubstitutes = false; - if (!settings.tarballTtl.overridden) - settings.tarballTtl = std::numeric_limits::max(); - if (!fileTransferSettings.tries.overridden) - fileTransferSettings.tries = 0; - if (!fileTransferSettings.connectTimeout.overridden) - fileTransferSettings.connectTimeout = 1; - } + if (!args.useNet) + disableNet(); if (args.refresh) { settings.tarballTtl = 0; settings.ttlNegativeNarInfoCache = 0; settings.ttlPositiveNarInfoCache = 0; + settings.ttlNarInfoCacheMeta = 0; } if (args.command->second->forceImpureByDefault() && !evalSettings.pureEval.overridden) { @@ -579,15 +591,15 @@ void mainWrapped(int argc, char ** argv) int main(int argc, char ** argv) { + using namespace nix; + // The CLI has a more detailed version than the libraries; see nixVersion. - nix::nixVersion = NIX_CLI_VERSION; + nixVersion = NIX_CLI_VERSION; #ifndef _WIN32 // Increase the default stack size for the evaluator and for // libstdc++'s std::regex. - // This used to be 64 MiB, but macOS as deployed on GitHub Actions has a - // hard limit slightly under that, so we round it down a bit. - nix::setStackSize(60 * 1024 * 1024); + setStackSize(evalStackSize); #endif - return nix::handleExceptions(argv[0], [&]() { nix::mainWrapped(argc, argv); }); + return handleExceptions(argv[0], [&]() { mainWrapped(argc, argv); }); } diff --git a/src/nix/make-content-addressed.md b/src/nix/make-content-addressed.md index e6a51c83ada2..4acbba6f4fb0 100644 --- a/src/nix/make-content-addressed.md +++ b/src/nix/make-content-addressed.md @@ -7,7 +7,7 @@ R""( ```console # nix store make-content-addressed nixpkgs#hello … - rewrote '/nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10' to '/nix/store/5skmmcb9svys5lj3kbsrjg7vf2irid63-hello-2.10' + rewrote '/nix/store/10l19qifk7hjjq47px8m2prqk1gv4isy-hello-2.10' to '/nix/store/5skmmcb9svys5lj3kbsrjg7vf2irid63-hello-2.10' ``` Since the resulting paths are content-addressed, they are always @@ -22,7 +22,7 @@ R""( ```console # nix copy --to /tmp/nix --trusted-public-keys '' nixpkgs#hello - cannot add path '/nix/store/zy9wbxwcygrwnh8n2w9qbbcr6zk87m26-libunistring-0.9.10' because it lacks a signature by a trusted key + cannot add path '/nix/store/gs7mh6q22l1ivxazxja2mjlsdwhw8zg9-libunistring-0.9.10' because it lacks a signature by a trusted key ``` * Create a content-addressed representation of the current NixOS diff --git a/src/nix/meson.build b/src/nix/meson.build index 61aac6b4d8fa..3b343614e421 100644 --- a/src/nix/meson.build +++ b/src/nix/meson.build @@ -78,6 +78,7 @@ nix_sources = [ config_priv_h ] + files( 'env.cc', 'eval.cc', 'flake-prefetch-inputs.cc', + 'flake-prefetch-inputs.cc', 'flake.cc', 'formatter.cc', 'hash.cc', @@ -87,11 +88,14 @@ nix_sources = [ config_priv_h ] + files( 'make-content-addressed.cc', 'man-pages.cc', 'nar.cc', + 'nario.cc', 'optimise-store.cc', 'path-from-hash-part.cc', 'path-info.cc', 'prefetch.cc', 'profile.cc', + 'provenance.cc', + 'ps.cc', 'realisation.cc', 'registry.cc', 'repl.cc', diff --git a/src/nix/nario-export.md b/src/nix/nario-export.md new file mode 100644 index 000000000000..2480733c1cae --- /dev/null +++ b/src/nix/nario-export.md @@ -0,0 +1,29 @@ +R""( + +# Examples + +* Export the closure of the build of `nixpkgs#hello`: + + ```console + # nix nario export --format 2 -r nixpkgs#hello > dump.nario + ``` + + It can be imported into another store: + + ```console + # nix nario import --no-check-sigs < dump.nario + ``` + +# Description + +This command prints to standard output a serialization of the specified store paths in `nario` format. This serialization can be imported into another store using `nix nario import`. + +References of a path are not exported by default; use `-r` to export a complete closure. +Paths are exported in topologically sorted order (i.e. if path `X` refers to `Y`, then `Y` appears before `X`). +You must specify the desired `nario` version. Currently the following versions are supported: + +* `1`: This version is compatible with the legacy `nix-store --export` and `nix-store --import` commands. It should be avoided because it is not memory-efficient on import. It does not support signatures, so you have to use `--no-check-sigs` on import. + +* `2`: The latest version. Recommended. + +)"" diff --git a/src/nix/nario-import.md b/src/nix/nario-import.md new file mode 100644 index 000000000000..9cba60c62203 --- /dev/null +++ b/src/nix/nario-import.md @@ -0,0 +1,15 @@ +R""( + +# Examples + +* Import store paths from the file named `dump`: + + ```console + # nix nario import < dump.nario + ``` + +# Description + +This command reads from standard input a serialization of store paths produced by `nix nario export` and adds them to the Nix store. + +)"" diff --git a/src/nix/nario-list.md b/src/nix/nario-list.md new file mode 100644 index 000000000000..c050457b3657 --- /dev/null +++ b/src/nix/nario-list.md @@ -0,0 +1,43 @@ +R""( + +# Examples + +* List the contents of a nario file: + + ```console + # nix nario list < dump.nario + /nix/store/f671jqvjcz37fsprzqn5jjsmyjj69p9b-xgcc-14.2.1.20250322-libgcc: 201856 bytes + /nix/store/n7iwblclbrz20xinvy4cxrvippdhvqll-libunistring-1.3: 2070240 bytes + … + ``` + +* Use `--json` to get detailed information in JSON format: + + ```console + # nix nario list --json < dump.nario + { + "paths": { + "/nix/store/m1r53pnn…-hello-2.12.1": { + "ca": null, + "deriver": "/nix/store/qa8is0vm…-hello-2.12.1.drv", + "narHash": "sha256-KSCYs4J7tFa+oX7W5M4D7ZYNvrWtdcWTdTL5fQk+za8=", + "narSize": 234672, + "references": [ + "/nix/store/g8zyryr9…-glibc-2.40-66", + "/nix/store/m1r53pnn…-hello-2.12.1" + ], + "registrationTime": 1756900709, + "signatures": [ "cache.nixos.org-1:QbG7A…" ], + "ultimate": false + }, + … + }, + "version": 1 + } + ``` + +# Description + +This command lists the contents of a nario file read from standard input. + +)"" diff --git a/src/nix/nario.cc b/src/nix/nario.cc new file mode 100644 index 000000000000..452c8c9ffaa8 --- /dev/null +++ b/src/nix/nario.cc @@ -0,0 +1,346 @@ +#include "nix/cmd/command.hh" +#include "nix/main/shared.hh" +#include "nix/store/store-api.hh" +#include "nix/store/export-import.hh" +#include "nix/util/callback.hh" +#include "nix/util/fs-sink.hh" +#include "nix/util/archive.hh" + +#include "ls.hh" + +#include + +using namespace nix; + +struct CmdNario : NixMultiCommand +{ + CmdNario() + : NixMultiCommand("nario", RegisterCommand::getCommandsFor({"nario"})) + { + } + + std::string description() override + { + return "operations for manipulating nario files"; + } + + Category category() override + { + return catUtility; + } +}; + +static auto rCmdNario = registerCommand("nario"); + +struct CmdNarioExport : StorePathsCommand +{ + unsigned int version = 0; + + CmdNarioExport() + { + addFlag({ + .longName = "format", + .description = "Version of the nario format to use. Must be `1` or `2`.", + .labels = {"nario-format"}, + .handler = {&version}, + .required = true, + }); + } + + std::string description() override + { + return "serialize store paths to standard output in nario format"; + } + + std::string doc() override + { + return +#include "nario-export.md" + ; + } + + void run(ref store, StorePaths && storePaths) override + { + auto fd = getStandardOutput(); + if (isatty(fd)) + throw UsageError("refusing to write nario to a terminal"); + FdSink sink(std::move(fd)); + exportPaths(*store, StorePathSet(storePaths.begin(), storePaths.end()), sink, version); + } +}; + +static auto rCmdNarioExport = registerCommand2({"nario", "export"}); + +static FdSource getNarioSource() +{ + auto fd = getStandardInput(); + if (isatty(fd)) + throw UsageError("refusing to read nario from a terminal"); + return FdSource(std::move(fd)); +} + +struct CmdNarioImport : StoreCommand, MixNoCheckSigs +{ + std::string description() override + { + return "import store paths from a nario file on standard input"; + } + + std::string doc() override + { + return +#include "nario-import.md" + ; + } + + void run(ref store) override + { + auto source{getNarioSource()}; + importPaths(*store, source, checkSigs); + } +}; + +static auto rCmdNarioImport = registerCommand2({"nario", "import"}); + +nlohmann::json listNar(Source & source) +{ + struct : FileSystemObjectSink + { + nlohmann::json root = nlohmann::json::object(); + + nlohmann::json & makeObject(const CanonPath & path, std::string_view type) + { + auto * cur = &root; + for (auto & c : path) { + assert((*cur)["type"] == "directory"); + auto i = (*cur)["entries"].emplace(c, nlohmann::json::object()).first; + cur = &i.value(); + } + auto inserted = cur->emplace("type", type).second; + assert(inserted); + return *cur; + } + + void createDirectory(const CanonPath & path) override + { + auto & j = makeObject(path, "directory"); + j["entries"] = nlohmann::json::object(); + } + + void createRegularFile(const CanonPath & path, std::function func) override + { + struct : CreateRegularFileSink + { + bool executable = false; + std::optional size; + + void operator()(std::string_view data) override {} + + void preallocateContents(uint64_t s) override + { + size = s; + } + + void isExecutable() override + { + executable = true; + } + } crf; + + crf.skipContents = true; + + func(crf); + + auto & j = makeObject(path, "regular"); + j.emplace("size", crf.size.value()); + if (crf.executable) + j.emplace("executable", true); + } + + void createSymlink(const CanonPath & path, const std::string & target) override + { + auto & j = makeObject(path, "symlink"); + j.emplace("target", target); + } + + } parseSink; + + parseDump(parseSink, source); + + return parseSink.root; +} + +void renderNarListing(const CanonPath & prefix, const nlohmann::json & root, bool longListing) +{ + std::function recurse; + recurse = [&](const nlohmann::json & json, const CanonPath & path) { + auto type = json["type"]; + + if (longListing) { + auto tp = type == "regular" ? (json.find("executable") != json.end() ? "-r-xr-xr-x" : "-r--r--r--") + : type == "symlink" ? "lrwxrwxrwx" + : "dr-xr-xr-x"; + auto line = fmt("%s %9d %s", tp, type == "regular" ? (uint64_t) json["size"] : 0, prefix / path); + if (type == "symlink") + line += " -> " + (std::string) json["target"]; + logger->cout(line); + } else + logger->cout(fmt("%s", prefix / path)); + + if (type == "directory") { + for (auto & entry : json["entries"].items()) { + recurse(entry.value(), path / entry.key()); + } + } + }; + + recurse(root, CanonPath::root); +} + +struct CmdNarioList : Command, MixJSON, MixLongListing +{ + bool listContents = false; + + CmdNarioList() + { + addFlag({ + .longName = "recursive", + .shortName = 'R', + .description = "List the contents of NARs inside the nario.", + .handler = {&listContents, true}, + }); + } + + std::string description() override + { + return "list the contents of a nario file"; + } + + std::string doc() override + { + return +#include "nario-list.md" + ; + } + + void run() override + { + struct Config : StoreConfig + { + Config(const Params & params) + : StoreConfig(params) + { + } + + ref openStore() const override + { + abort(); + } + }; + + struct ListingStore : Store + { + std::optional json; + CmdNarioList & cmd; + + ListingStore(ref config, CmdNarioList & cmd) + : Store{*config} + , cmd(cmd) + { + } + + void queryPathInfoUncached( + const StorePath & path, Callback> callback) noexcept override + { + callback(nullptr); + } + + std::optional isTrustedClient() override + { + return Trusted; + } + + std::optional queryPathFromHashPart(const std::string & hashPart) override + { + return std::nullopt; + } + + void + addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) override + { + std::optional contents; + if (cmd.listContents) + contents = listNar(source); + else + source.skip(info.narSize); + + if (json) { + // FIXME: make the JSON format configurable. + auto obj = info.toJSON(this, true, PathInfoJsonFormat::V1); + if (contents) + obj.emplace("contents", *contents); + json->emplace(printStorePath(info.path), std::move(obj)); + } else { + if (contents) + renderNarListing(CanonPath(printStorePath(info.path)), *contents, cmd.longListing); + else + logger->cout(fmt("%s: %d bytes", printStorePath(info.path), info.narSize)); + } + } + + StorePath addToStoreFromDump( + Source & dump, + std::string_view name, + FileSerialisationMethod dumpMethod, + ContentAddressMethod hashMethod, + HashAlgorithm hashAlgo, + const StorePathSet & references, + RepairFlag repair, + std::shared_ptr provenance) override + { + unsupported("addToStoreFromDump"); + } + + void narFromPath(const StorePath & path, Sink & sink) override + { + unsupported("narFromPath"); + } + + void queryRealisationUncached( + const DrvOutput &, Callback> callback) noexcept override + { + callback(nullptr); + } + + ref getFSAccessor(bool requireValidPath) override + { + return makeEmptySourceAccessor(); + } + + std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath) override + { + unsupported("getFSAccessor"); + } + + void registerDrvOutput(const Realisation & output) override + { + unsupported("registerDrvOutput"); + } + }; + + auto source{getNarioSource()}; + auto config = make_ref(StoreConfig::Params()); + ListingStore lister(config, *this); + if (json) + lister.json = nlohmann::json::object(); + importPaths(lister, source, NoCheckSigs); + if (json) { + auto j = nlohmann::json::object(); + j["version"] = 1; + j["paths"] = std::move(*lister.json); + printJSON(j); + } + } +}; + +static auto rCmdNarioList = registerCommand2({"nario", "list"}); diff --git a/src/nix/nix-build/nix-build.cc b/src/nix/nix-build/nix-build.cc index a21d1a56549b..217382ef8ee2 100644 --- a/src/nix/nix-build/nix-build.cc +++ b/src/nix/nix-build/nix-build.cc @@ -452,7 +452,9 @@ static void main_nix_build(int argc, char ** argv) throw UsageError("nix-shell requires a single derivation"); auto & packageInfo = drvs.front(); - auto drv = evalStore->derivationFromPath(packageInfo.requireDrvPath()); + auto drvPath = packageInfo.requireDrvPath(); + state->waitForPath(drvPath); + auto drv = evalStore->derivationFromPath(drvPath); std::vector pathsToBuild; RealisedPath::Set pathsToCopy; @@ -476,6 +478,7 @@ static void main_nix_build(int argc, char ** argv) throw Error("the 'bashInteractive' attribute in did not evaluate to a derivation"); auto bashDrv = drv->requireDrvPath(); + state->waitForPath(bashDrv); pathsToBuild.push_back( DerivedPath::Built{ .drvPath = makeConstantStorePathRef(bashDrv), @@ -682,6 +685,7 @@ static void main_nix_build(int argc, char ** argv) for (auto & packageInfo : drvs) { auto drvPath = packageInfo.requireDrvPath(); + state->waitForPath(drvPath); auto outputName = packageInfo.queryOutputName(); if (outputName == "") diff --git a/src/nix/nix-channel/nix-channel.cc b/src/nix/nix-channel/nix-channel.cc index 6d9a0ea58986..00723ba2b09b 100644 --- a/src/nix/nix-channel/nix-channel.cc +++ b/src/nix/nix-channel/nix-channel.cc @@ -179,6 +179,11 @@ static void update(const StringSet & channelNames) static int main_nix_channel(int argc, char ** argv) { + warn( + "nix-channel is deprecated in favor of flakes in Determinate Nix. \ +See https://zero-to-nix.com for a guide to Nix flakes. \ +For details and to offer feedback on the deprecation process, see: https://github.com/DeterminateSystems/nix-src/issues/34."); + { // Figure out the name of the `.nix-channels' file to use auto home = getHome(); diff --git a/src/nix/nix-env/nix-env.cc b/src/nix/nix-env/nix-env.cc index 31aa2b3f2cd4..f3e6d2acd810 100644 --- a/src/nix/nix-env/nix-env.cc +++ b/src/nix/nix-env/nix-env.cc @@ -747,6 +747,8 @@ static void opSet(Globals & globals, Strings opFlags, Strings opArgs) drv.setName(globals.forceName); auto drvPath = drv.queryDrvPath(); + if (drvPath) + globals.state->waitForPath(*drvPath); std::vector paths{ drvPath ? (DerivedPath) (DerivedPath::Built{ .drvPath = makeConstantStorePathRef(*drvPath), @@ -1062,7 +1064,7 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs) continue; /* For table output. */ - std::vector columns; + TableRow columns; /* For XML output. */ XMLAttrs attrs; diff --git a/src/nix/nix-env/user-env.cc b/src/nix/nix-env/user-env.cc index 5beed78f7247..ac36bf97011d 100644 --- a/src/nix/nix-env/user-env.cc +++ b/src/nix/nix-env/user-env.cc @@ -38,8 +38,10 @@ bool createUserEnv( exist already. */ std::vector drvsToBuild; for (auto & i : elems) - if (auto drvPath = i.queryDrvPath()) + if (auto drvPath = i.queryDrvPath()) { + state.waitForPath(*drvPath); drvsToBuild.push_back({*drvPath}); + } debug("building user environment dependencies"); state.store->buildPaths(toDerivedPaths(drvsToBuild), state.repair ? bmRepair : bmNormal); @@ -108,7 +110,7 @@ bool createUserEnv( environment. */ auto manifestFile = ({ std::ostringstream str; - printAmbiguous(manifest, state.symbols, str, nullptr, std::numeric_limits::max()); + printAmbiguous(state, manifest, str, nullptr, std::numeric_limits::max()); StringSource source{str.view()}; state.store->addToStoreFromDump( source, @@ -152,6 +154,7 @@ bool createUserEnv( debug("building user environment"); std::vector topLevelDrvs; topLevelDrvs.push_back({topLevelDrv}); + state.waitForPath(topLevelDrv); state.store->buildPaths(toDerivedPaths(topLevelDrvs), state.repair ? bmRepair : bmNormal); /* Switch the current user environment to the output path. */ diff --git a/src/nix/nix-instantiate/nix-instantiate.cc b/src/nix/nix-instantiate/nix-instantiate.cc index 3d5c3e26a46e..f09b4078a245 100644 --- a/src/nix/nix-instantiate/nix-instantiate.cc +++ b/src/nix/nix-instantiate/nix-instantiate.cc @@ -17,6 +17,8 @@ #include #include +#include + using namespace nix; static Path gcRoot; @@ -56,19 +58,23 @@ void processExpr( else state.autoCallFunction(autoArgs, v, vRes); if (output == okRaw) - std::cout << *state.coerceToString(noPos, vRes, context, "while generating the nix-instantiate output"); + std::cout << state.devirtualize( + *state.coerceToString(noPos, vRes, context, "while generating the nix-instantiate output"), + context); // We intentionally don't output a newline here. The default PS1 for Bash in NixOS starts with a newline // and other interactive shells like Zsh are smart enough to print a missing newline before the prompt. - else if (output == okXML) - printValueAsXML(state, strict, location, vRes, std::cout, context, noPos); - else if (output == okJSON) { - printValueAsJSON(state, strict, vRes, v.determinePos(noPos), std::cout, context); - std::cout << std::endl; + else if (output == okXML) { + std::ostringstream s; + printValueAsXML(state, strict, location, vRes, s, context, noPos); + std::cout << state.devirtualize(s.str(), context); + } else if (output == okJSON) { + auto j = printValueAsJSON(state, strict, vRes, v.determinePos(noPos), context); + std::cout << state.devirtualize(j.dump(), context) << std::endl; } else { if (strict) state.forceValueDeep(vRes); std::set seen; - printAmbiguous(vRes, state.symbols, std::cout, &seen, std::numeric_limits::max()); + printAmbiguous(state, vRes, std::cout, &seen, std::numeric_limits::max()); std::cout << std::endl; } } else { diff --git a/src/nix/nix-store/nix-store.cc b/src/nix/nix-store/nix-store.cc index a2c0aaf3ff8e..74697ade110f 100644 --- a/src/nix/nix-store/nix-store.cc +++ b/src/nix/nix-store/nix-store.cc @@ -775,7 +775,7 @@ static void opExport(Strings opFlags, Strings opArgs) paths.insert(store->followLinksToStorePath(i)); FdSink sink(getStandardOutput()); - exportPaths(*store, paths, sink); + exportPaths(*store, paths, sink, 1); sink.flush(); } diff --git a/src/nix/nix.md b/src/nix/nix.md index 10a2aaee88ce..cc31dabbab41 100644 --- a/src/nix/nix.md +++ b/src/nix/nix.md @@ -48,11 +48,6 @@ manual](https://nix.dev/manual/nix/stable/). # Installables -> **Warning** \ -> Installables are part of the unstable -> [`nix-command` experimental feature](@docroot@/development/experimental-features.md#xp-feature-nix-command), -> and subject to change without notice. - Many `nix` subcommands operate on one or more *installables*. These are command line arguments that represent something that can be realised in the Nix store. @@ -72,13 +67,6 @@ That is, Nix will operate on the default flake output attribute of the flake in ### Flake output attribute -> **Warning** \ -> Flake output attribute installables depend on both the -> [`flakes`](@docroot@/development/experimental-features.md#xp-feature-flakes) -> and -> [`nix-command`](@docroot@/development/experimental-features.md#xp-feature-nix-command) -> experimental features, and subject to change without notice. - Example: `nixpkgs#hello` These have the form *flakeref*[`#`*attrpath*], where *flakeref* is a @@ -140,7 +128,7 @@ If *attrpath* begins with `.` then no prefixes or defaults are attempted. This a ### Store path -Example: `/nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10` +Example: `/nix/store/10l19qifk7hjjq47px8m2prqk1gv4isy-hello-2.10` These are paths inside the Nix store, or symlinks that resolve to a path in the Nix store. @@ -196,7 +184,7 @@ operate are determined as follows: and likewise, using a store path to a "drv" file to specify the derivation: ```console - # nix build '/nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^dev,static' + # nix build '/nix/store/fpq78s2h8ffh66v2iy0q1838mhff06y8-glibc-2.33-78.drv^dev,static' … ``` @@ -219,17 +207,17 @@ operate are determined as follows: ```console # nix path-info --closure-size --eval-store auto --store https://cache.nixos.org 'nixpkgs#glibc^*' - /nix/store/g02b1lpbddhymmcjb923kf0l7s9nww58-glibc-2.33-123 33208200 - /nix/store/851dp95qqiisjifi639r0zzg5l465ny4-glibc-2.33-123-bin 36142896 - /nix/store/kdgs3q6r7xdff1p7a9hnjr43xw2404z7-glibc-2.33-123-debug 155787312 - /nix/store/n4xa8h6pbmqmwnq0mmsz08l38abb06zc-glibc-2.33-123-static 42488328 - /nix/store/q6580lr01jpcsqs4r5arlh4ki2c1m9rv-glibc-2.33-123-dev 44200560 + /nix/store/i2fn2mjgihz960bwa7ldab5ra5fhxznh-glibc-2.33-123 33208200 + /nix/store/n2wnn3i47w6dbylh64hdjzgd5rrprdn8-glibc-2.33-123-bin 36142896 + /nix/store/v7dyz518sbkzl8x2a1sgk1lwsfd3d6gm-glibc-2.33-123-debug 155787312 + /nix/store/z4hv6ybyinqw9a3dwyl5k66a91aggylj-glibc-2.33-123-static 42488328 + /nix/store/lrjirf0j1rjnvif6amyp9pfcqr2km385-glibc-2.33-123-dev 44200560 ``` and likewise, using a store path to a "drv" file to specify the derivation: ```console - # nix path-info --closure-size '/nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^*' + # nix path-info --closure-size '/nix/store/fpq78s2h8ffh66v2iy0q1838mhff06y8-glibc-2.33-78.drv^*' … ``` * If you didn't specify the desired outputs, but the derivation has an diff --git a/src/nix/package.nix b/src/nix/package.nix index 8195e6c6ff5a..9c223bceb2ba 100644 --- a/src/nix/package.nix +++ b/src/nix/package.nix @@ -2,6 +2,7 @@ stdenv, lib, mkMesonExecutable, + llvmPackages, nix-store, nix-expr, @@ -18,7 +19,7 @@ let in mkMesonExecutable (finalAttrs: { - pname = "nix"; + pname = "determinate-nix"; inherit version; workDir = ./.; @@ -69,7 +70,13 @@ mkMesonExecutable (finalAttrs: { nix-expr nix-main nix-cmd - ]; + ] + ++ lib.optional ( + stdenv.cc.isClang + && stdenv.hostPlatform.isStatic + && stdenv.cc.libcxx != null + && stdenv.cc.libcxx.isLLVM + ) llvmPackages.libunwind; mesonFlags = [ ]; @@ -79,6 +86,23 @@ mkMesonExecutable (finalAttrs: { echo "file binary-dist $out/bin/nix" >> $out/nix-support/hydra-build-products ''; + # Fixes a problem with the "nix-cli-libcxxStdenv-static" package output. + # For some reason that is not clear, it is wanting to use libgcc_eh which is not available. + # Force this to be built with compiler-rt & libunwind over libgcc_eh works. + # Issue: https://github.com/NixOS/nixpkgs/issues/177129 + NIX_CFLAGS_COMPILE = + lib.optionals + ( + stdenv.cc.isClang + && stdenv.hostPlatform.isStatic + && stdenv.cc.libcxx != null + && stdenv.cc.libcxx.isLLVM + ) + [ + "-rtlib=compiler-rt" + "-unwindlib=libunwind" + ]; + meta = { mainProgram = "nix"; platforms = lib.platforms.unix ++ lib.platforms.windows; diff --git a/src/nix/path-from-hash-part.md b/src/nix/path-from-hash-part.md index 788e13ab6d48..b646aa57dd1a 100644 --- a/src/nix/path-from-hash-part.md +++ b/src/nix/path-from-hash-part.md @@ -5,8 +5,8 @@ R""( * Return the full store path with the given hash part: ```console - # nix store path-from-hash-part --store https://cache.nixos.org/ 0i2jd68mp5g6h2sa5k9c85rb80sn8hi9 - /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10 + # nix store path-from-hash-part --store https://cache.nixos.org/ qbhyj3blxpw2i6pb7c6grc9185nbnpvy + /nix/store/qbhyj3blxpw2i6pb7c6grc9185nbnpvy-hello-2.10 ``` # Description @@ -15,6 +15,6 @@ Given the hash part of a store path (that is, the 32 characters following `/nix/store/`), return the full store path. This is primarily useful in the implementation of binary caches, where a request for a `.narinfo` file only supplies the hash part -(e.g. `https://cache.nixos.org/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9.narinfo`). +(e.g. `https://cache.nixos.org/qbhyj3blxpw2i6pb7c6grc9185nbnpvy.narinfo`). )"" diff --git a/src/nix/path-info.md b/src/nix/path-info.md index 2e39225b8656..bd4a9311cf91 100644 --- a/src/nix/path-info.md +++ b/src/nix/path-info.md @@ -6,7 +6,7 @@ R""( ```console # nix path-info nixpkgs#hello - /nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10 + /nix/store/10l19qifk7hjjq47px8m2prqk1gv4isy-hello-2.10 ``` * Show the closure sizes of every path in the current NixOS system @@ -14,8 +14,8 @@ R""( ```console # nix path-info --recursive --closure-size /run/current-system | sort -nk2 - /nix/store/hl5xwp9kdrd1zkm0idm3kkby9q66z404-empty 96 - /nix/store/27324qvqhnxj3rncazmxc4mwy79kz8ha-nameservers 112 + /nix/store/zlnmjjbpv5pwwv911qp0grqi25y80wbs-empty 96 + /nix/store/v40fjpq45135avrmnfm8klbvdhf0dcp7-nameservers 112 … /nix/store/539jkw9a8dyry7clcv60gk6na816j7y8-etc 5783255504 /nix/store/zqamz3cz4dbzfihki2mk7a63mbkxz9xq-nixos-system-machine-20.09.20201112.3090c65 5887562256 @@ -26,8 +26,8 @@ R""( ```console # nix path-info --recursive --size --closure-size --human-readable nixpkgs#rustc - /nix/store/01rrgsg5zk3cds0xgdsq40zpk6g51dz9-ncurses-6.2-dev 386.7 KiB 69.1 MiB - /nix/store/0q783wnvixpqz6dxjp16nw296avgczam-libpfm-4.11.0 5.9 MiB 37.4 MiB + /nix/store/klarszqikbvf6n70581w0381zb7rlzri-ncurses-6.2-dev 386.7 KiB 69.1 MiB + /nix/store/30rva1kafnr6fyf8y5xxlpnwixvdpv4w-libpfm-4.11.0 5.9 MiB 37.4 MiB … ``` diff --git a/src/nix/prefetch.cc b/src/nix/prefetch.cc index d494b0986864..781677cb4fe5 100644 --- a/src/nix/prefetch.cc +++ b/src/nix/prefetch.cc @@ -15,6 +15,8 @@ #include "nix/util/environment-variables.hh" #include "nix/util/url.hh" #include "nix/store/path.hh" +#include "nix/util/override-provenance-source-accessor.hh" +#include "nix/fetchers/provenance.hh" #include "man-pages.hh" @@ -143,7 +145,15 @@ std::tuple prefetchFile( Activity act(*logger, lvlChatty, actUnknown, fmt("adding '%s' to the store", url.to_string())); - auto info = store->addToStoreSlow(name, makeFSSourceAccessor(tmpFile), method, hashAlgo, {}, expectedHash); + auto info = store->addToStoreSlow( + name, + {make_ref( + makeFSSourceAccessor(tmpFile), + unpack ? nullptr : std::make_shared(url.to_string()))}, + method, + hashAlgo, + {}, + expectedHash); storePath = info.path; assert(info.ca); hash = info.ca->hash; diff --git a/src/nix/print-dev-env.md b/src/nix/print-dev-env.md index a8ce9d36ae78..fd84b8afe862 100644 --- a/src/nix/print-dev-env.md +++ b/src/nix/print-dev-env.md @@ -25,7 +25,7 @@ R""( "variables": { "src": { "type": "exported", - "value": "/nix/store/3x7dwzq014bblazs7kq20p9hyzz0qh8g-hello-2.10.tar.gz" + "value": "/nix/store/8alrpdaasjd1x6g1fczchmzbpqm936a3-hello-2.10.tar.gz" }, "postUnpackHooks": { "type": "array", diff --git a/src/nix/profile-history.md b/src/nix/profile-history.md index f0bfe5037912..0c9a340ddf0d 100644 --- a/src/nix/profile-history.md +++ b/src/nix/profile-history.md @@ -7,7 +7,7 @@ R""( ```console # nix profile history Version 508 (2020-04-10): - flake:nixpkgs#legacyPackages.x86_64-linux.awscli: ∅ -> 1.17.13 + flake:nixpkgs#legacyPackages.x86_64-linux.awscli: 1.17.13 added Version 509 (2020-05-16) <- 508: flake:nixpkgs#legacyPackages.x86_64-linux.awscli: 1.17.13 -> 1.18.211 @@ -20,7 +20,7 @@ between subsequent versions of a profile. It only shows top-level packages, not dependencies; for that, use [`nix profile diff-closures`](./nix3-profile-diff-closures.md). -The addition of a package to a profile is denoted by the string `∅ ->` -*version*, whereas the removal is denoted by *version* `-> ∅`. +The addition of a package to a profile is denoted by the string +*version* `added`, whereas the removal is denoted by *version* ` removed`. )"" diff --git a/src/nix/profile-list.md b/src/nix/profile-list.md index 9811b9ec920b..89ac228a393c 100644 --- a/src/nix/profile-list.md +++ b/src/nix/profile-list.md @@ -10,13 +10,13 @@ R""( Flake attribute: legacyPackages.x86_64-linux.gdb Original flake URL: flake:nixpkgs Locked flake URL: github:NixOS/nixpkgs/7b38b03d76ab71bdc8dc325e3f6338d984cc35ca - Store paths: /nix/store/indzcw5wvlhx6vwk7k4iq29q15chvr3d-gdb-11.1 + Store paths: /nix/store/i6i08pl20rh0lm46g38wk3bfnvhdl43d-gdb-11.1 Name: blender-bin Flake attribute: packages.x86_64-linux.default Original flake URL: flake:blender-bin Locked flake URL: github:edolstra/nix-warez/91f2ffee657bf834e4475865ae336e2379282d34?dir=blender - Store paths: /nix/store/i798sxl3j40wpdi1rgf391id1b5klw7g-blender-bin-3.1.2 + Store paths: /nix/store/rlgr8vjhcv6v2rv7ljgl0pr6g74r0cg9-blender-bin-3.1.2 ``` Note that you can unambiguously rebuild a package from a profile diff --git a/src/nix/profile-remove.md b/src/nix/profile-remove.md index e7e5e0dfb94b..2d32447d49c0 100644 --- a/src/nix/profile-remove.md +++ b/src/nix/profile-remove.md @@ -24,7 +24,7 @@ R""( * Remove a package by store path: ```console - # nix profile remove /nix/store/rr3y0c6zyk7kjjl8y19s4lsrhn4aiq1z-hello-2.10 + # nix profile remove /nix/store/xwjlac5ay8hw3djdm5llhjz79isgngbl-hello-2.10 ``` # Description diff --git a/src/nix/profile.cc b/src/nix/profile.cc index 822c8046eb8a..53fac7d24545 100644 --- a/src/nix/profile.cc +++ b/src/nix/profile.cc @@ -288,11 +288,11 @@ struct ProfileManifest while (i != prev.elements.end() || j != cur.elements.end()) { if (j != cur.elements.end() && (i == prev.elements.end() || i->first > j->first)) { - logger->cout("%s%s: ∅ -> %s", indent, j->second.identifier(), j->second.versions()); + logger->cout("%s%s: %s added", indent, j->second.identifier(), j->second.versions()); changes = true; ++j; } else if (i != prev.elements.end() && (j == cur.elements.end() || i->first < j->first)) { - logger->cout("%s%s: %s -> ∅", indent, i->second.identifier(), i->second.versions()); + logger->cout("%s%s: %s removed", indent, i->second.identifier(), i->second.versions()); changes = true; ++i; } else { @@ -313,11 +313,11 @@ struct ProfileManifest }; static std::map>> -builtPathsPerInstallable(const std::vector, BuiltPathWithResult>> & builtPaths) +builtPathsPerInstallable(const std::vector & builtPaths) { std::map>> res; - for (auto & [installable, builtPath] : builtPaths) { - auto & r = res.insert({&*installable, + for (auto & b : builtPaths) { + auto & r = res.insert({&*b.installable, { {}, make_ref(), @@ -327,6 +327,7 @@ builtPathsPerInstallable(const std::vector, BuiltPath (e.g. meta.priority fields) if the installable returned multiple derivations. So pick one arbitrarily. FIXME: print a warning? */ + auto builtPath = b.getSuccess(); r.first.push_back(builtPath.path); r.second = builtPath.info; } @@ -363,8 +364,10 @@ struct CmdProfileAdd : InstallablesCommand, MixDefaultProfile { ProfileManifest manifest(*getEvalState(), *profile); - auto builtPaths = builtPathsPerInstallable( - Installable::build2(getEvalStore(), store, Realise::Outputs, installables, bmNormal)); + auto buildResults = Installable::build2(getEvalStore(), store, Realise::Outputs, installables, bmNormal); + Installable::throwBuildErrors(buildResults, *store); + + auto builtPaths = builtPathsPerInstallable(buildResults); for (auto & installable : installables) { ProfileElement element; @@ -726,11 +729,11 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf this, getEvalState(), FlakeRef(element.source->originalRef), - "", + "." + element.source->attrPath, // absolute lookup element.source->outputs, - Strings{element.source->attrPath}, - Strings{}, - lockFlags); + StringSet{}, + lockFlags, + getDefaultFlakeSchemas()); auto derivedPaths = installable->toDerivedPaths(); if (derivedPaths.empty()) @@ -766,8 +769,10 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf return; } - auto builtPaths = builtPathsPerInstallable( - Installable::build2(getEvalStore(), store, Realise::Outputs, installables, bmNormal)); + auto buildResults = Installable::build2(getEvalStore(), store, Realise::Outputs, installables, bmNormal); + Installable::throwBuildErrors(buildResults, *store); + + auto builtPaths = builtPathsPerInstallable(buildResults); for (size_t i = 0; i < installables.size(); ++i) { auto & installable = installables.at(i); diff --git a/src/nix/provenance-show.md b/src/nix/provenance-show.md new file mode 100644 index 000000000000..526fbd54c8ea --- /dev/null +++ b/src/nix/provenance-show.md @@ -0,0 +1,29 @@ +R""( + +# Examples + +* Show the provenance of a store path: + + ```console + # nix provenance show /run/current-system + /nix/store/k145bdxhdb89i4fkvgdisdz1yh2wiymm-nixos-system-machine-25.05.20251210.d2b1213 + ← copied from cache.flakehub.com + ← built from derivation /nix/store/w3p3xkminq61hs00kihd34w1dglpj5s9-nixos-system-machine-25.05.20251210.d2b1213.drv (output out) on build-machine for x86_64-linux + ← instantiated from flake output github:my-org/my-repo/6b03eb949597fe96d536e956a2c14da9901dbd21?dir=machine#nixosConfigurations.machine.config.system.build.toplevel + ``` + +# Description + +Show the provenance chain of one or more store paths. For each store path, this displays where it came from: what binary cache it was copied from, what flake it was built from, and so on. + +The provenance chain shows the history of how the store path came to exist, including: + +- **Copied**: The path was copied from another Nix store, typically a binary cache. +- **Built**: The path was built from a derivation. +- **Flake evaluation**: The derivation was instantiated during the evaluation of a flake output. +- **Fetched**: The path was obtained by fetching a source tree. +- **Meta**: Metadata associated with the derivation. + +Note: if you want provenance in JSON format, use the `provenance` field returned by `nix path-info --json`. + +)"" diff --git a/src/nix/provenance-verify.md b/src/nix/provenance-verify.md new file mode 100644 index 000000000000..e05cb95bf249 --- /dev/null +++ b/src/nix/provenance-verify.md @@ -0,0 +1,21 @@ +R""( + +# Examples + +* Verify the provenance of a store path: + + ```console + # nix provenance verify /run/current-system + ``` + +# Description + +Verify the provenance of one or more store paths. This checks whether the store paths can be rebuilt from source. Specifically, it verifies the following: + +* That source trees can be fetched. +* That flake evaluations result in the instantiation of the desired store paths (most commonly, store derivations). +* That derivations can be successfully rebuilt, producing identical outputs. + +A non-zero exit code is returned if any of the verifications fail. + +)"" diff --git a/src/nix/provenance.cc b/src/nix/provenance.cc new file mode 100644 index 000000000000..2df68128f458 --- /dev/null +++ b/src/nix/provenance.cc @@ -0,0 +1,578 @@ +#include "nix/cmd/command.hh" +#include "nix/store/store-api.hh" +#include "nix/store/store-open.hh" +#include "nix/expr/provenance.hh" +#include "nix/store/provenance.hh" +#include "nix/flake/provenance.hh" +#include "nix/fetchers/provenance.hh" +#include "nix/util/provenance.hh" +#include "nix/util/json-utils.hh" +#include "nix/fetchers/fetch-to-store.hh" +#include "nix/util/exit.hh" +#include "nix/cmd/installable-flake.hh" +#include "nix/store/derivations.hh" +#include "nix/store/filetransfer.hh" +#include "nix/util/callback.hh" + +#include +#include +#include +#include +#include + +#define TAB " " + +using namespace nix; + +struct CmdProvenance : NixMultiCommand +{ + CmdProvenance() + : NixMultiCommand("provenance", RegisterCommand::getCommandsFor({"provenance"})) + { + } + + std::string description() override + { + return "query and check the provenance of store paths"; + } + + std::optional experimentalFeature() override + { + return Xp::Provenance; + } + + Category category() override + { + return catUtility; + } +}; + +static auto rCmdProvenance = registerCommand("provenance"); + +struct CmdProvenanceShow : StorePathsCommand +{ + std::string description() override + { + return "show the provenance chain of store paths"; + } + + std::string doc() override + { + return +#include "provenance-show.md" + ; + } + + void displayProvenance(Store & store, const StorePath & path, std::shared_ptr provenance) + { + while (provenance) { + if (auto copied = std::dynamic_pointer_cast(provenance)) { + logger->cout("← copied from " ANSI_BOLD "%s" ANSI_NORMAL, copied->from); + provenance = copied->next; + } + + else if (auto build = std::dynamic_pointer_cast(provenance)) { + logger->cout( + "← built from derivation " ANSI_BOLD "%s" ANSI_NORMAL " (output " ANSI_BOLD "%s" ANSI_NORMAL + ") on " ANSI_BOLD "%s" ANSI_NORMAL " for " ANSI_BOLD "%s" ANSI_NORMAL, + store.printStorePath(build->drvPath), + build->output, + build->buildHost.value_or("unknown host").c_str(), + build->system); + provenance = build->next; + } + + else if (auto flake = std::dynamic_pointer_cast(provenance)) { + // Collapse subpath/tree provenance into the flake provenance for legibility. + auto next = flake->next; + CanonPath flakePath("/flake.nix"); + if (auto subpath = std::dynamic_pointer_cast(next)) { + next = subpath->next; + flakePath = subpath->subpath; + } + if (auto tree = std::dynamic_pointer_cast(next)) { + FlakeRef flakeRef( + fetchers::Input::fromAttrs(fetchSettings, fetchers::jsonToAttrs(*tree->attrs)), + Path(flakePath.parent().value_or(CanonPath::root).rel())); + logger->cout( + "← %sinstantiated from %sflake output " ANSI_BOLD "%s#%s" ANSI_NORMAL, + flake->pure ? "" : ANSI_RED "impurely" ANSI_NORMAL " ", + flakeRef.input.isLocked(fetchSettings) ? "" : ANSI_RED "unlocked" ANSI_NORMAL " ", + flakeRef.to_string(), + flake->flakeOutput); + break; + } else { + logger->cout("← instantiated from flake output " ANSI_BOLD "%s" ANSI_NORMAL, flake->flakeOutput); + provenance = flake->next; + } + } + + else if (auto tree = std::dynamic_pointer_cast(provenance)) { + auto input = fetchers::Input::fromAttrs(fetchSettings, fetchers::jsonToAttrs(*tree->attrs)); + logger->cout( + "← from %stree " ANSI_BOLD "%s" ANSI_NORMAL, + input.isLocked(fetchSettings) ? "" : ANSI_RED "unlocked" ANSI_NORMAL " ", + input.to_string()); + break; + } + + else if (auto subpath = std::dynamic_pointer_cast(provenance)) { + logger->cout("← from file " ANSI_BOLD "%s" ANSI_NORMAL, subpath->subpath.abs()); + provenance = subpath->next; + } + + else if (auto drv = std::dynamic_pointer_cast(provenance)) { + logger->cout("← with derivation metadata"); + std::istringstream stream((*drv->meta).dump(2)); + for (std::string line; std::getline(stream, line);) { + logger->cout(" %s", line); + } + provenance = drv->next; + } + + else if (auto fetchurl = std::dynamic_pointer_cast(provenance)) { + logger->cout("← fetched from URL " ANSI_BOLD "%s" ANSI_NORMAL, fetchurl->url); + break; + } + + else { + // Unknown or unhandled provenance type + auto json = provenance->to_json(); + auto typeIt = json.find("type"); + if (typeIt != json.end() && typeIt->is_string()) + logger->cout("← " ANSI_RED "unknown provenance type '%s'" ANSI_NORMAL, typeIt->get()); + else + logger->cout("← " ANSI_RED "unknown provenance type" ANSI_NORMAL); + break; + } + } + } + + void run(ref store, StorePaths && storePaths) override + { + bool first = true; + + for (auto & storePath : storePaths) { + auto info = store->queryPathInfo(storePath); + if (!first) + logger->cout(""); + first = false; + logger->cout(ANSI_BOLD "%s" ANSI_NORMAL, store->printStorePath(info->path)); + + if (info->provenance) + displayProvenance(*store, storePath, info->provenance); + else + logger->cout(ANSI_RED " (no provenance information available)" ANSI_NORMAL); + } + } +}; + +static auto rCmdProvenanceShow = registerCommand2({"provenance", "show"}); + +/** + * A wrapper around an arbitrary store that intercepts `addToStore()` + * and `addToStoreFromDump()` calls to keep track of added paths. + */ +struct TrackingStore : public Store +{ + ref next; + boost::unordered_flat_set instantiatedPaths; + + TrackingStore(ref next) + : Store(next->config) + , next(next) + { + } + + void addToStore(const ValidPathInfo & info, Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs) override + { + next->addToStore(info, narSource, repair, checkSigs); + instantiatedPaths.insert(info.path); + // FIXME: we should really just disable the path info cache, since the underlying store already does caching. + invalidatePathInfoCacheFor(info.path); + } + + StorePath addToStore( + std::string_view name, + const SourcePath & path, + ContentAddressMethod method, + HashAlgorithm hashAlgo, + const StorePathSet & references, + PathFilter & filter, + RepairFlag repair) override + { + auto storePath = next->addToStore(name, path, method, hashAlgo, references, filter, repair); + instantiatedPaths.insert(storePath); + invalidatePathInfoCacheFor(storePath); + return storePath; + } + + StorePath addToStoreFromDump( + Source & dump, + std::string_view name, + FileSerialisationMethod dumpMethod, + ContentAddressMethod hashMethod, + HashAlgorithm hashAlgo, + const StorePathSet & references, + RepairFlag repair, + std::shared_ptr provenance) override + { + auto storePath = + next->addToStoreFromDump(dump, name, dumpMethod, hashMethod, hashAlgo, references, repair, provenance); + instantiatedPaths.insert(storePath); + invalidatePathInfoCacheFor(storePath); + return storePath; + } + + void queryPathInfoUncached( + const StorePath & path, Callback> callback) noexcept override + { + try { + callback(std::make_shared(*next->queryPathInfo(path))); + } catch (InvalidPath &) { + callback(nullptr); + } catch (...) { + callback.rethrow(); + } + } + + void queryRealisationUncached( + const DrvOutput & output, Callback> callback) noexcept override + { + next->queryRealisation(output, std::move(callback)); + } + + std::optional queryPathFromHashPart(const std::string & hashPart) override + { + return next->queryPathFromHashPart(hashPart); + } + + void registerDrvOutput(const Realisation & output) override + { + next->registerDrvOutput(output); + } + + ref getFSAccessor(bool requireValidPath) override + { + return next->getFSAccessor(requireValidPath); + } + + std::shared_ptr getFSAccessor(const StorePath & path, bool requireValidPath) override + { + return next->getFSAccessor(path, requireValidPath); + } + + std::optional isTrustedClient() override + { + return next->isTrustedClient(); + } +}; + +struct CmdProvenanceVerify : StorePathsCommand +{ + bool noRebuild = false; + + CmdProvenanceVerify() + { + addFlag({ + .longName = "no-rebuild", + .description = "Skip rebuilding derivations to verify reproducibility.", + .handler = {&noRebuild, true}, + }); + } + + std::string description() override + { + return "verify the provenance of store paths"; + } + + std::string doc() override + { + return +#include "provenance-verify.md" + ; + } + + bool verifySourcePath(Store & store, const StorePath & expectedPath, const SourcePath & sourcePath) + { + auto computedPath = fetchToStore2(fetchSettings, store, sourcePath, FetchMode::Copy, expectedPath.name()).first; + if (computedPath != expectedPath) { + logger->cout( + "❌ " ANSI_RED "store path mismatch for source '%s': expected '%s' but got '%s'" ANSI_NORMAL, + sourcePath.to_string(), + store.printStorePath(expectedPath), + store.printStorePath(computedPath)); + return false; + } else { + logger->cout("✅ verified store path for source '%s'", sourcePath.to_string()); + return true; + } + } + + using CheckResult = std::variant< + std::pair>, + std::pair, + std::monostate>; + + std::pair + verify(Store & store, std::optional path, std::shared_ptr provenance) + { + if (auto copied = std::dynamic_pointer_cast(provenance)) { + if (!path) { + logger->cout("❌ " ANSI_RED "cannot verify copied provenance without a store path" ANSI_NORMAL); + return {false, std::monostate{}}; + } + bool success = true; + auto fromStore = openStore(copied->from); + auto localInfo = store.queryPathInfo(*path); + auto fromInfo = fromStore->queryPathInfo(*path); + if (localInfo->narHash != fromInfo->narHash) { + logger->cout( + "❌ " ANSI_RED "NAR hash mismatch in origin store '%s': should be '%s' but is '%s'" ANSI_NORMAL, + copied->from, + localInfo->narHash.to_string(HashFormat::SRI, true), + fromInfo->narHash.to_string(HashFormat::SRI, true)); + success = false; + } else + logger->cout("✅ verified NAR hash in origin store '%s'", copied->from); + auto [nextSuccess, result] = verify(store, path, copied->next); + return {success && nextSuccess, std::move(result)}; + } + + else if (auto build = std::dynamic_pointer_cast(provenance)) { + auto success = verify(store, build->drvPath, build->next).first; + + // Verify that `path` is the expected output of the derivation. + auto outputMap = store.queryPartialDerivationOutputMap(build->drvPath); + auto it = outputMap.find(build->output); + if (it == outputMap.end()) { + logger->cout( + "❌ " ANSI_RED "derivation '%s' does not have expected output '%s'" ANSI_NORMAL, + store.printStorePath(build->drvPath), + build->output); + return {false, std::monostate{}}; + } else if (!it->second) { + // Note: this is not an error, should we even print a message? + logger->cout( + "❓ output '%s' of derivation '%s' is not statically known", + build->output, + store.printStorePath(build->drvPath)); + } else if (*it->second != path) { + logger->cout( + "❌ " ANSI_RED "output '%s' of derivation '%s' is '%s', expected '%s'" ANSI_NORMAL, + build->output, + store.printStorePath(build->drvPath), + store.printStorePath(*it->second), + store.printStorePath(*path)); + return {false, std::monostate{}}; + } + + // Do a check rebuild to verify that the derivation + // produces the same output. + if (noRebuild) { + logger->cout( + "⏭️ skipped rebuild of derivation '%s^%s'", store.printStorePath(build->drvPath), build->output); + } else { + try { + store.buildPaths( + {DerivedPath::Built{ + .drvPath = make_ref(SingleDerivedPath::Opaque{build->drvPath}), + .outputs = OutputsSpec::Names{build->output}, + }}, + bmCheck); + logger->cout("✅ rebuilt derivation '%s^%s'", store.printStorePath(build->drvPath), build->output); + } catch (Error & e) { + logger->cout( + "❌ " ANSI_RED "rebuild of derivation '%s^%s' failed: %s" ANSI_NORMAL, + store.printStorePath(build->drvPath), + build->output, + e.what()); + success = false; + } + } + + return {success, std::monostate{}}; + } + + else if (auto flake = std::dynamic_pointer_cast(provenance)) { + // Fetch the flake source. + auto [success, _res] = verify(store, {}, flake->next); + + auto res = std::get_if>(&_res); + if (!res) + return {false, std::monostate{}}; + + // Evaluate the flake output. + flake::LockFlags lockFlags{ + .updateLockFile = false, + .failOnUnlocked = true, + .useRegistries = false, + .allowUnlocked = false, + }; + + if (res->second.path.baseName() != "flake.nix") { + logger->cout( + "❌ " ANSI_RED "expected flake source to be a 'flake.nix' file, but got '%s'" ANSI_NORMAL, + res->second.path.abs()); + return {false, std::monostate{}}; + } + + auto trackingStore = make_ref(getEvalStore()); + + auto evalState = + ref(std::allocate_shared( + traceable_allocator(), + LookupPath{}, + ref(trackingStore), + fetchSettings, + evalSettings, + getStore())); + + InstallableFlake installable{ + nullptr, + evalState, + FlakeRef{std::move(res->first), std::string(res->second.path.parent().value().rel())}, + "." + flake->flakeOutput, + ExtendedOutputsSpec::Default{}, // FIXME: record this in the provenance? + {}, + lockFlags, + {}}; + + // We have to disable the eval cache to ensure that we see which store paths get instantiated. + installable.useEvalCache = false; + + installable.toDerivedPaths(); + + evalState->waitForAllPaths(); + + logger->cout("✅ evaluated '%s#%s'", installable.flakeRef.to_string(true), flake->flakeOutput); + + if (path) { + if (!trackingStore->instantiatedPaths.contains(*path)) { + logger->cout( + "❌ " ANSI_RED "evaluation did not re-instantiate path '%s'" ANSI_NORMAL, + store.printStorePath(*path)); + return {false, std::monostate{}}; + } + + logger->cout("✅ re-instantiated path '%s'", store.printStorePath(*path)); + } + + return {success, std::monostate{}}; + } + + else if (auto tree = std::dynamic_pointer_cast(provenance)) { + auto input = fetchers::Input::fromAttrs(fetchSettings, fetchers::jsonToAttrs(*tree->attrs)); + try { + auto [accessor, final] = input.getAccessor(fetchSettings, store); + if (!input.isLocked(fetchSettings)) + logger->cout("❓ fetched tree '%s', but it's unlocked", input.to_string()); + else + // FIXME: check NAR hash? + logger->cout("✅ fetched tree '%s'", input.to_string()); + + bool success = !path || verifySourcePath(store, *path, SourcePath(accessor, CanonPath::root)); + + return {success, std::make_pair(std::move(final), accessor)}; + } catch (Error & e) { + logger->cout("❌ " ANSI_RED "failed to fetch tree '%s': %s" ANSI_NORMAL, input.to_string(), e.what()); + return {false, std::monostate{}}; + } + } + + else if (auto subpath = std::dynamic_pointer_cast(provenance)) { + auto [success, result] = verify(store, {}, subpath->next); + if (auto p = std::get_if>>(&result)) { + + auto sourcePath = SourcePath(p->second, subpath->subpath); + + if (path && !verifySourcePath(store, *path, sourcePath)) + success = false; + + return {success, std::make_pair(std::move(p->first), std::move(sourcePath))}; + } else + return {false, std::monostate{}}; + } + + else if (auto drv = std::dynamic_pointer_cast(provenance)) + return verify(store, path, drv->next); + + else if (auto fetchurl = std::dynamic_pointer_cast(provenance)) { + if (!path) + return {false, std::monostate{}}; + + auto info = store.queryPathInfo(*path); + + if (!info->ca) { + logger->cout( + "❌ " ANSI_RED "cannot verify URL '%s' without a content address for path '%s'" ANSI_NORMAL, + fetchurl->url, + store.printStorePath(*path)); + return {false, std::monostate{}}; + } + + if (info->ca->method != ContentAddressMethod::Raw::Flat) { + logger->cout( + "❌ " ANSI_RED + "cannot verify URL '%s' with unsupported content address method for path '%s'" ANSI_NORMAL, + fetchurl->url, + store.printStorePath(*path)); + return {false, std::monostate{}}; + } + + HashSink hashSink{info->ca->hash.algo}; + FileTransferRequest req(fetchurl->url); + req.decompress = false; + getFileTransfer()->download(std::move(req), hashSink); + auto hash = hashSink.finish().hash; + + if (hash != info->ca->hash) { + logger->cout( + "❌ " ANSI_RED "hash mismatch for URL '%s': expected '%s' but got '%s'" ANSI_NORMAL, + fetchurl->url, + info->ca->hash.to_string(HashFormat::SRI, true), + hash.to_string(HashFormat::SRI, true)); + return {false, std::monostate{}}; + } + + logger->cout("✅ verified hash of URL '%s'", fetchurl->url); + return {true, std::monostate{}}; + } + + else if (!provenance) { + logger->cout("❓ " ANSI_RED "missing further provenance" ANSI_NORMAL); + return {false, std::monostate{}}; + } + + else { + logger->cout("❓ " ANSI_RED "unknown provenance type" ANSI_NORMAL); + return {false, std::monostate{}}; + } + } + + void run(ref store, StorePaths && storePaths) override + { + bool first = true; + bool success = true; + + for (auto & storePath : storePaths) { + auto info = store->queryPathInfo(storePath); + if (!first) + logger->cout(""); + first = false; + logger->cout(ANSI_BOLD "%s" ANSI_NORMAL, store->printStorePath(info->path)); + + if (info->provenance) + success &= verify(*store, storePath, info->provenance).first; + else { + logger->cout(ANSI_RED " (no provenance information available)" ANSI_NORMAL); + success = false; + } + } + + if (!success) + throw Exit(1); + } +}; + +static auto rCmdProvenanceVerify = registerCommand2({"provenance", "verify"}); diff --git a/src/nix/ps.cc b/src/nix/ps.cc new file mode 100644 index 000000000000..9ae9d97bf98c --- /dev/null +++ b/src/nix/ps.cc @@ -0,0 +1,146 @@ +#include "nix/cmd/command.hh" +#include "nix/main/common-args.hh" +#include "nix/main/shared.hh" +#include "nix/store/store-api.hh" +#include "nix/store/store-cast.hh" +#include "nix/store/active-builds.hh" +#include "nix/util/table.hh" +#include "nix/util/terminal.hh" + +#include + +using namespace nix; + +struct CmdPs : MixJSON, StoreCommand +{ + std::string description() override + { + return "list active builds"; + } + + Category category() override + { + return catUtility; + } + + std::string doc() override + { + return +#include "ps.md" + ; + } + + void run(ref store) override + { + auto & tracker = require(*store); + + auto builds = tracker.queryActiveBuilds(); + + if (json) { + printJSON(nlohmann::json(builds)); + return; + } + + if (builds.empty()) { + notice("No active builds."); + return; + } + + /* Helper to format user info: show name if available, else UID */ + auto formatUser = [](const UserInfo & user) -> std::string { + return user.name ? *user.name : std::to_string(user.uid); + }; + + Table table; + + /* Add column headers. */ + table.push_back({{"USER"}, {"PID"}, {"CPU", TableCell::Alignment::Right}, {"DERIVATION/COMMAND"}}); + + for (const auto & build : builds) { + /* Calculate CPU time - use cgroup stats if available, otherwise sum process times. */ + std::chrono::microseconds cpuTime = build.utime && build.stime ? *build.utime + *build.stime : [&]() { + std::chrono::microseconds total{0}; + for (const auto & process : build.processes) + total += process.utime.value_or(std::chrono::microseconds(0)) + + process.stime.value_or(std::chrono::microseconds(0)) + + process.cutime.value_or(std::chrono::microseconds(0)) + + process.cstime.value_or(std::chrono::microseconds(0)); + return total; + }(); + + /* Add build summary row. */ + table.push_back( + {formatUser(build.mainUser), + std::to_string(build.mainPid), + {fmt("%.1fs", + std::chrono::duration_cast>(cpuTime) + .count()), + TableCell::Alignment::Right}, + fmt(ANSI_BOLD "%s" ANSI_NORMAL " (wall=%ds)", + store->printStorePath(build.derivation), + time(nullptr) - build.startTime)}); + + if (build.processes.empty()) { + table.push_back( + {formatUser(build.mainUser), + std::to_string(build.mainPid), + {"", TableCell::Alignment::Right}, + fmt("%s" ANSI_ITALIC "(no process info)" ANSI_NORMAL, treeLast)}); + } else { + /* Recover the tree structure of the processes. */ + std::set pids; + for (auto & process : build.processes) + pids.insert(process.pid); + + using Processes = std::set; + std::map children; + Processes rootProcesses; + for (auto & process : build.processes) { + if (pids.contains(process.parentPid)) + children[process.parentPid].insert(&process); + else + rootProcesses.insert(&process); + } + + /* Render the process tree. */ + [&](this auto const & visit, const Processes & processes, std::string_view prefix) -> void { + for (const auto & [n, process] : enumerate(processes)) { + bool last = n + 1 == processes.size(); + + // Format CPU time if available + std::string cpuInfo; + if (process->utime || process->stime || process->cutime || process->cstime) { + auto totalCpu = process->utime.value_or(std::chrono::microseconds(0)) + + process->stime.value_or(std::chrono::microseconds(0)) + + process->cutime.value_or(std::chrono::microseconds(0)) + + process->cstime.value_or(std::chrono::microseconds(0)); + auto totalSecs = + std::chrono::duration_cast>( + totalCpu) + .count(); + cpuInfo = fmt("%.1fs", totalSecs); + } + + // Format argv with tree structure + auto argv = concatStringsSep( + " ", tokenizeString>(concatStringsSep(" ", process->argv))); + + table.push_back( + {formatUser(process->user), + std::to_string(process->pid), + {cpuInfo, TableCell::Alignment::Right}, + fmt("%s%s%s", prefix, last ? treeLast : treeConn, argv)}); + + visit(children[process->pid], last ? prefix + treeNull : prefix + treeLine); + } + }(rootProcesses, ""); + } + } + + auto width = isTTY() && isatty(STDOUT_FILENO) ? getWindowWidth() : std::numeric_limits::max(); + + printTable(std::cout, table, width); + } +}; + +static auto rCmdPs = registerCommand2({"ps"}); diff --git a/src/nix/ps.md b/src/nix/ps.md new file mode 100644 index 000000000000..e48a308e6965 --- /dev/null +++ b/src/nix/ps.md @@ -0,0 +1,27 @@ +R"( + +# Examples + +* Show all active builds: + + ```console + # nix ps + USER PID CPU DERIVATION/COMMAND + nixbld11 3534394 110.2s /nix/store/lzvdxlbr6xjd9w8py4nd2y2nnqb9gz7p-nix-util-tests-3.13.2.drv (wall=8s) + nixbld11 3534394 0.8s └───bash -e /nix/store/jwqf79v5p51x9mv8vx20fv9mzm2x7kig-source-stdenv.sh /nix/store/shkw4qm9qcw5sc5n1k5jznc83ny02 + nixbld11 3534751 36.3s └───ninja -j24 + nixbld11 3535637 0.0s ├───/nix/store/0v2jfvx71l1zn14l97pznvbqnhiq3pyd-gcc-14.3.0/bin/g++ -fPIC -fstack-clash-protection -O2 -U_ + nixbld11 3535639 0.1s │ └───/nix/store/0v2jfvx71l1zn14l97pznvbqnhiq3pyd-gcc-14.3.0/libexec/gcc/x86_64-unknown-linux-gnu/14.3. + nixbld11 3535658 0.0s └───/nix/store/0v2jfvx71l1zn14l97pznvbqnhiq3pyd-gcc-14.3.0/bin/g++ -fPIC -fstack-clash-protection -O2 -U_ + nixbld1 3534377 1.8s /nix/store/nh2dx9cqcy9lw4d4rvd0dbsflwdsbzdy-patchelf-0.18.0.drv (wall=5s) + nixbld1 3534377 1.8s └───bash -e /nix/store/xk05lkk4ij6pc7anhdbr81appiqbcb01-default-builder.sh + nixbld1 3535074 0.0s └───/nix/store/21ymxxap3y8hb9ijcfah8ani9cjpv8m6-bash-5.2p37/bin/bash ./configure --disable-dependency-trackin + ``` + +# Description + +This command lists all currently running Nix builds. +For each build, it shows the derivation path and the main process ID. +On Linux and macOS, it also shows the child processes of each build. + +)" diff --git a/src/nix/registry-list.md b/src/nix/registry-list.md index 30b6e29d8aa5..a3eb65c89f5f 100644 --- a/src/nix/registry-list.md +++ b/src/nix/registry-list.md @@ -7,7 +7,7 @@ R""( ```console # nix registry list user flake:dwarffs github:edolstra/dwarffs/d181d714fd36eb06f4992a1997cd5601e26db8f5 - system flake:nixpkgs path:/nix/store/fxl9mrm5xvzam0lxi9ygdmksskx4qq8s-source?lastModified=1605220118&narHash=sha256-Und10ixH1WuW0XHYMxxuHRohKYb45R%2fT8CwZuLd2D2Q=&rev=3090c65041104931adda7625d37fa874b2b5c124 + system flake:nixpkgs path:/nix/store/jschy88crdk7jqqbk1p2b4l1c9gljl9b-source?lastModified=1605220118&narHash=sha256-Und10ixH1WuW0XHYMxxuHRohKYb45R%2fT8CwZuLd2D2Q=&rev=3090c65041104931adda7625d37fa874b2b5c124 global flake:blender-bin github:edolstra/nix-warez?dir=blender global flake:dwarffs github:edolstra/dwarffs … diff --git a/src/nix/repl.cc b/src/nix/repl.cc index 19f02e759c5f..c5787166ab64 100644 --- a/src/nix/repl.cc +++ b/src/nix/repl.cc @@ -45,9 +45,9 @@ struct CmdRepl : RawInstallablesCommand std::vector files; - Strings getDefaultFlakeAttrPaths() override + StringSet getRoles() override { - return {""}; + return {"nix-repl"}; } bool forceImpureByDefault() override diff --git a/src/nix/repl.md b/src/nix/repl.md index 32c08e24b240..e608dabf6f9c 100644 --- a/src/nix/repl.md +++ b/src/nix/repl.md @@ -36,7 +36,7 @@ R""( Loading Installable ''... Added 1 variables. - # nix repl --extra-experimental-features 'flakes' nixpkgs + # nix repl nixpkgs Loading Installable 'flake:nixpkgs#'... Added 5 variables. diff --git a/src/nix/run.cc b/src/nix/run.cc index 324b736a6a5b..8b7a518c9a69 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -134,23 +134,9 @@ struct CmdRun : InstallableValueCommand, MixEnvironment ; } - Strings getDefaultFlakeAttrPaths() override + StringSet getRoles() override { - Strings res{ - "apps." + settings.thisSystem.get() + ".default", - "defaultApp." + settings.thisSystem.get(), - }; - for (auto & s : SourceExprCommand::getDefaultFlakeAttrPaths()) - res.push_back(s); - return res; - } - - Strings getDefaultFlakeAttrPathPrefixes() override - { - Strings res{"apps." + settings.thisSystem.get() + "."}; - for (auto & s : SourceExprCommand::getDefaultFlakeAttrPathPrefixes()) - res.push_back(s); - return res; + return {"nix-run"}; } void run(ref store, ref installable) override diff --git a/src/nix/search.cc b/src/nix/search.cc index dac60ceba573..509287b80206 100644 --- a/src/nix/search.cc +++ b/src/nix/search.cc @@ -11,6 +11,7 @@ #include "nix/expr/attr-path.hh" #include "nix/util/hilite.hh" #include "nix/util/strings-inline.hh" +#include "nix/expr/parallel-eval.hh" #include #include @@ -56,9 +57,9 @@ struct CmdSearch : InstallableValueCommand, MixJSON ; } - Strings getDefaultFlakeAttrPaths() override + StringSet getRoles() override { - return {"packages." + settings.thisSystem.get(), "legacyPackages." + settings.thisSystem.get()}; + return {"nix-search"}; } void run(ref store, ref installable) override @@ -84,11 +85,13 @@ struct CmdSearch : InstallableValueCommand, MixJSON auto state = getEvalState(); - std::optional jsonOut; + std::optional> jsonOut; if (json) - jsonOut = json::object(); + jsonOut.emplace(json::object()); - uint64_t results = 0; + std::atomic results = 0; + + FutureVector futures(*state->executor); std::function visit; @@ -96,15 +99,22 @@ struct CmdSearch : InstallableValueCommand, MixJSON auto attrPathS = state->symbols.resolve({attrPath}); auto attrPathStr = attrPath.to_string(*state); + /* Activity act(*logger, lvlInfo, actUnknown, fmt("evaluating '%s'", attrPathStr)); + */ try { auto recurse = [&]() { + Executor::WorkItems work; for (const auto & attr : cursor.getAttrs()) { auto cursor2 = cursor.getAttr(state->symbols[attr]); auto attrPath2(attrPath); attrPath2.push_back(attr); - visit(*cursor2, attrPath2, false); + state->addWork( + work, + std::string_view(state->symbols[attr]).find("Packages") != std::string_view::npos ? 0 : 2, + [cursor2, attrPath2, visit]() { visit(*cursor2, attrPath2, false); }); } + futures.spawn(std::move(work)); }; if (cursor.isDerivation()) { @@ -147,21 +157,21 @@ struct CmdSearch : InstallableValueCommand, MixJSON if (found) { results++; if (json) { - (*jsonOut)[attrPathStr] = { + (*jsonOut->lock())[attrPathStr] = { {"pname", name.name}, {"version", name.version}, {"description", description}, }; } else { - if (results > 1) - logger->cout(""); - logger->cout( - "* %s%s", - wrap("\e[0;1m", hiliteMatches(attrPathStr, attrPathMatches, ANSI_GREEN, "\e[0;1m")), - optionalBracket(" (", name.version, ")")); + auto out = + fmt("%s* %s%s", + results > 1 ? "\n" : "", + wrap("\e[0;1m", hiliteMatches(attrPathStr, attrPathMatches, ANSI_GREEN, "\e[0;1m")), + optionalBracket(" (", name.version, ")")); if (description != "") - logger->cout( - " %s", hiliteMatches(description, descriptionMatches, ANSI_GREEN, ANSI_NORMAL)); + out += fmt( + "\n %s", hiliteMatches(description, descriptionMatches, ANSI_GREEN, ANSI_NORMAL)); + logger->cout(out); } } } @@ -186,14 +196,20 @@ struct CmdSearch : InstallableValueCommand, MixJSON } }; - for (auto & cursor : installable->getCursors(*state)) - visit(*cursor, cursor->getAttrPath(), true); + Executor::WorkItems work; + for (auto & cursor : installable->getCursors(*state, false)) + state->addWork(work, 1, [cursor, visit]() { visit(*cursor, cursor->getAttrPath(), true); }); + + futures.spawn(std::move(work)); + futures.finishAll(); if (json) - printJSON(*jsonOut); + printJSON(*(jsonOut->lock())); if (!json && !results) throw Error("no results for the given search term(s)!"); + + notice("Found %d matching packages.", results); } }; diff --git a/src/nix/store-cat.md b/src/nix/store-cat.md index da2073473fda..6638be2d54fe 100644 --- a/src/nix/store-cat.md +++ b/src/nix/store-cat.md @@ -6,7 +6,7 @@ R""( ```console # nix store cat --store https://cache.nixos.org/ \ - /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10/bin/hello | hexdump -C | head -n1 + /nix/store/qbhyj3blxpw2i6pb7c6grc9185nbnpvy-hello-2.10/bin/hello | hexdump -C | head -n1 00000000 7f 45 4c 46 02 01 01 00 00 00 00 00 00 00 00 00 |.ELF............| ``` diff --git a/src/nix/store-copy-log.md b/src/nix/store-copy-log.md index 61daa75c1780..9d10174bba37 100644 --- a/src/nix/store-copy-log.md +++ b/src/nix/store-copy-log.md @@ -23,7 +23,7 @@ R""( [store derivation]: @docroot@/glossary.md#gloss-store-derivation ```console - # nix store copy-log --to ssh-ng://machine /nix/store/ilgm50plpmcgjhcp33z6n4qbnpqfhxym-glibc-2.33-59.drv + # nix store copy-log --to ssh-ng://machine /nix/store/yaxvykk956vdrwrx9cxyw44mpqr1ml7i-glibc-2.33-59.drv ``` # Description diff --git a/src/nix/store-copy-sigs.md b/src/nix/store-copy-sigs.md index 678756221566..25c60966e000 100644 --- a/src/nix/store-copy-sigs.md +++ b/src/nix/store-copy-sigs.md @@ -6,7 +6,7 @@ R""( ```console # nix store copy-sigs --substituter https://cache.nixos.org \ - --recursive /nix/store/y1x7ng5bmc9s8lqrf98brcpk1a7lbcl5-hello-2.12.1 + --recursive /nix/store/q833p12cmm9qknyp1walqih941msnb9z-hello-2.12.1 ``` * To copy signatures from one binary cache to another: @@ -15,7 +15,7 @@ R""( # nix store copy-sigs --substituter https://cache.nixos.org \ --store file:///tmp/binary-cache \ --recursive -v \ - /nix/store/y1x7ng5bmc9s8lqrf98brcpk1a7lbcl5-hello-2.12.1 + /nix/store/q833p12cmm9qknyp1walqih941msnb9z-hello-2.12.1 imported 2 signatures ``` diff --git a/src/nix/store-delete.md b/src/nix/store-delete.md index 431bc5f5e3f4..026dccd0f19a 100644 --- a/src/nix/store-delete.md +++ b/src/nix/store-delete.md @@ -5,7 +5,7 @@ R""( * Delete a specific store path: ```console - # nix store delete /nix/store/yb5q57zxv6hgqql42d5r8b5k5mcq6kay-hello-2.10 + # nix store delete /nix/store/fdhrijyv3670djsgprx596nn89iwlj2s-hello-2.10 ``` # Description diff --git a/src/nix/store-dump-path.md b/src/nix/store-dump-path.md index 21467ff329ec..4e5c6aeddbe9 100644 --- a/src/nix/store-dump-path.md +++ b/src/nix/store-dump-path.md @@ -12,7 +12,7 @@ R""( ```console # nix store dump-path --store https://cache.nixos.org/ \ - /nix/store/7crrmih8c52r8fbnqb933dxrsp44md93-glibc-2.25 > glibc.nar + /nix/store/vyrnv99qi410q82qp7nw7lcl37zmzaxd-glibc-2.25 > glibc.nar ``` # Description diff --git a/src/nix/store-ls.md b/src/nix/store-ls.md index 14c4627c97a3..62f6cd0709d0 100644 --- a/src/nix/store-ls.md +++ b/src/nix/store-ls.md @@ -5,7 +5,7 @@ R""( * To list the contents of a store path in a binary cache: ```console - # nix store ls --store https://cache.nixos.org/ --long --recursive /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10 + # nix store ls --store https://cache.nixos.org/ --long --recursive /nix/store/qbhyj3blxpw2i6pb7c6grc9185nbnpvy-hello-2.10 dr-xr-xr-x 0 ./bin -r-xr-xr-x 38184 ./bin/hello dr-xr-xr-x 0 ./share @@ -15,7 +15,7 @@ R""( * To show information about a specific file in a binary cache: ```console - # nix store ls --store https://cache.nixos.org/ --long /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10/bin/hello + # nix store ls --store https://cache.nixos.org/ --long /nix/store/qbhyj3blxpw2i6pb7c6grc9185nbnpvy-hello-2.10/bin/hello -r-xr-xr-x 38184 hello ``` diff --git a/src/nix/store-repair.md b/src/nix/store-repair.md index 180c577acaef..a03952714cbd 100644 --- a/src/nix/store-repair.md +++ b/src/nix/store-repair.md @@ -5,13 +5,13 @@ R""( * Repair a store path, after determining that it is corrupt: ```console - # nix store verify /nix/store/yb5q57zxv6hgqql42d5r8b5k5mcq6kay-hello-2.10 - path '/nix/store/yb5q57zxv6hgqql42d5r8b5k5mcq6kay-hello-2.10' was + # nix store verify /nix/store/fdhrijyv3670djsgprx596nn89iwlj2s-hello-2.10 + path '/nix/store/fdhrijyv3670djsgprx596nn89iwlj2s-hello-2.10' was modified! expected hash 'sha256:1hd5vnh6xjk388gdk841vflicy8qv7qzj2hb7xlyh8lpb43j921l', got 'sha256:1a25lf78x5wi6pfkrxalf0n13kdaca0bqmjqnp7wfjza2qz5ssgl' - # nix store repair /nix/store/yb5q57zxv6hgqql42d5r8b5k5mcq6kay-hello-2.10 + # nix store repair /nix/store/fdhrijyv3670djsgprx596nn89iwlj2s-hello-2.10 ``` # Description diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc index f26613bf899e..f5ca094c6af7 100644 --- a/src/nix/upgrade-nix.cc +++ b/src/nix/upgrade-nix.cc @@ -15,26 +15,6 @@ using namespace nix; struct CmdUpgradeNix : MixDryRun, StoreCommand { - std::filesystem::path profileDir; - - CmdUpgradeNix() - { - addFlag({ - .longName = "profile", - .shortName = 'p', - .description = "The path to the Nix profile to upgrade.", - .labels = {"profile-dir"}, - .handler = {&profileDir}, - }); - - addFlag({ - .longName = "nix-store-paths-url", - .description = "The URL of the file that contains the store paths of the latest Nix release.", - .labels = {"url"}, - .handler = {&(std::string &) settings.upgradeNixStorePathUrl}, - }); - } - /** * This command is stable before the others */ @@ -45,7 +25,7 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand std::string description() override { - return "upgrade Nix to the latest stable version"; + return "deprecated in favor of determinate-nixd upgrade"; } std::string doc() override @@ -62,111 +42,9 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand void run(ref store) override { - evalSettings.pureEval = true; - - if (profileDir == "") - profileDir = getProfileDir(store); - - printInfo("upgrading Nix in profile %s", profileDir); - - auto storePath = getLatestNix(store); - - auto version = DrvName(storePath.name()).version; - - if (dryRun) { - logger->stop(); - warn("would upgrade to version %s", version); - return; - } - - { - Activity act(*logger, lvlInfo, actUnknown, fmt("downloading '%s'...", store->printStorePath(storePath))); - store->ensurePath(storePath); - } - - { - Activity act( - *logger, lvlInfo, actUnknown, fmt("verifying that '%s' works...", store->printStorePath(storePath))); - auto program = store->printStorePath(storePath) + "/bin/nix-env"; - auto s = runProgram(program, false, {"--version"}); - if (s.find("Nix") == std::string::npos) - throw Error("could not verify that '%s' works", program); - } - - logger->stop(); - - { - Activity act( - *logger, - lvlInfo, - actUnknown, - fmt("installing '%s' into profile %s...", store->printStorePath(storePath), profileDir)); - - // FIXME: don't call an external process. - runProgram( - getNixBin("nix-env").string(), - false, - {"--profile", profileDir.string(), "-i", store->printStorePath(storePath), "--no-sandbox"}); - } - - printInfo(ANSI_GREEN "upgrade to version %s done" ANSI_NORMAL, version); - } - - /* Return the profile in which Nix is installed. */ - std::filesystem::path getProfileDir(ref store) - { - auto whereOpt = ExecutablePath::load().findName(OS_STR("nix-env")); - if (!whereOpt) - throw Error("couldn't figure out how Nix is installed, so I can't upgrade it"); - const auto & where = whereOpt->parent_path(); - - printInfo("found Nix in %s", where); - - if (hasPrefix(where.string(), "/run/current-system")) - throw Error("Nix on NixOS must be upgraded via 'nixos-rebuild'"); - - auto profileDir = where.parent_path(); - - // Resolve profile to /nix/var/nix/profiles/ link. - while (canonPath(profileDir.string()).find("/profiles/") == std::string::npos - && std::filesystem::is_symlink(profileDir)) - profileDir = readLink(profileDir.string()); - - printInfo("found profile %s", profileDir); - - Path userEnv = canonPath(profileDir.string(), true); - - if (std::filesystem::exists(profileDir / "manifest.json")) - throw Error( - "directory %s is managed by 'nix profile' and currently cannot be upgraded by 'nix upgrade-nix'", - profileDir); - - if (!std::filesystem::exists(profileDir / "manifest.nix")) - throw Error("directory %s does not appear to be part of a Nix profile", profileDir); - - if (!store->isValidPath(store->parseStorePath(userEnv))) - throw Error("directory '%s' is not in the Nix store", userEnv); - - return profileDir; - } - - /* Return the store path of the latest stable Nix. */ - StorePath getLatestNix(ref store) - { - Activity act(*logger, lvlInfo, actUnknown, "querying latest Nix version"); - - // FIXME: use nixos.org? - auto req = FileTransferRequest(parseURL(settings.upgradeNixStorePathUrl.get())); - auto res = getFileTransfer()->download(req); - - auto state = std::make_unique(LookupPath{}, store, fetchSettings, evalSettings); - auto v = state->allocValue(); - state->eval(state->parseExprFromString(res.data, state->rootPath(CanonPath("/no-such-path"))), *v); - Bindings & bindings = Bindings::emptyBindings; - auto v2 = findAlongAttrPath(*state, settings.thisSystem, bindings, *v).first; - - return store->parseStorePath( - state->forceString(*v2, noPos, "while evaluating the path tho latest nix version")); + throw Error( + "The upgrade-nix command isn't available in Determinate Nix; use %s instead", + "sudo determinate-nixd upgrade"); } }; diff --git a/src/nix/upgrade-nix.md b/src/nix/upgrade-nix.md index 3a3bf61b9b05..bb5157175826 100644 --- a/src/nix/upgrade-nix.md +++ b/src/nix/upgrade-nix.md @@ -1,33 +1,11 @@ R""( -# Examples - -* Upgrade Nix to the stable version declared in Nixpkgs: - - ```console - # nix upgrade-nix - ``` - -* Upgrade Nix in a specific profile: - - ```console - # nix upgrade-nix --profile ~alice/.local/state/nix/profiles/profile - ``` - # Description -This command upgrades Nix to the stable version. - -By default, the latest stable version is defined by Nixpkgs, in -[nix-fallback-paths.nix](https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix) -and updated manually. It may not always be the latest tagged release. - -By default, it locates the directory containing the `nix` binary in the `$PATH` -environment variable. If that directory is a Nix profile, it will -upgrade the `nix` package in that profile to the latest stable binary -release. +This command isn't available in Determinate Nix but is present in order to guide +users to the new upgrade path. -You cannot use this command to upgrade Nix in the system profile of a -NixOS system (that is, if `nix` is found in `/run/current-system`). +Use `sudo determinate-nixd upgrade` to upgrade Determinate Nix on systems that manage it imperatively. +In practice, this is any system that isn't running NixOS. )"" diff --git a/src/nix/verify.md b/src/nix/verify.md index ae0b0acd68a2..0c18449e2c04 100644 --- a/src/nix/verify.md +++ b/src/nix/verify.md @@ -19,7 +19,7 @@ R""( ```console # nix store verify --store https://cache.nixos.org/ \ - /nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10 + /nix/store/10l19qifk7hjjq47px8m2prqk1gv4isy-hello-2.10 ``` # Description diff --git a/src/nix/why-depends.md b/src/nix/why-depends.md index dc13619e13ac..ac8adeb7e858 100644 --- a/src/nix/why-depends.md +++ b/src/nix/why-depends.md @@ -7,9 +7,9 @@ R""( ```console # nix why-depends nixpkgs#hello nixpkgs#glibc - /nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10 - └───bin/hello: …...................../nix/store/9l06v7fc38c1x3r2iydl15ksgz0ysb82-glibc-2.32/lib/ld-linux-x86-64.… - → /nix/store/9l06v7fc38c1x3r2iydl15ksgz0ysb82-glibc-2.32 + /nix/store/10l19qifk7hjjq47px8m2prqk1gv4isy-hello-2.10 + └───bin/hello: …...................../nix/store/kmmr0ggkywxvnad4z1chqb6lsxi6pqgc-glibc-2.32/lib/ld-linux-x86-64.… + → /nix/store/kmmr0ggkywxvnad4z1chqb6lsxi6pqgc-glibc-2.32 ``` * Show all files and paths in the dependency graph leading from @@ -17,13 +17,13 @@ R""( ```console # nix why-depends --all nixpkgs#thunderbird nixpkgs#xorg.libX11 - /nix/store/qfc8729nzpdln1h0hvi1ziclsl3m84sr-thunderbird-78.5.1 - ├───lib/thunderbird/libxul.so: …6wrw-libxcb-1.14/lib:/nix/store/adzfjjh8w25vdr0xdx9x16ah4f5rqrw5-libX11-1.7.0/lib:/nix/store/ssf… - │ → /nix/store/adzfjjh8w25vdr0xdx9x16ah4f5rqrw5-libX11-1.7.0 - ├───lib/thunderbird/libxul.so: …pxyc-libXt-1.2.0/lib:/nix/store/1qj29ipxl2fyi2b13l39hdircq17gnk0-libXdamage-1.1.5/lib:/nix/store… - │ → /nix/store/1qj29ipxl2fyi2b13l39hdircq17gnk0-libXdamage-1.1.5 - │ ├───lib/libXdamage.so.1.1.0: …-libXfixes-5.0.3/lib:/nix/store/adzfjjh8w25vdr0xdx9x16ah4f5rqrw5-libX11-1.7.0/lib:/nix/store/9l0… - │ │ → /nix/store/adzfjjh8w25vdr0xdx9x16ah4f5rqrw5-libX11-1.7.0 + /nix/store/0my2p7psgdzqc5pq6dyl4ld9w6g0np58-thunderbird-78.5.1 + ├───lib/thunderbird/libxul.so: …6wrw-libxcb-1.14/lib:/nix/store/jmwiq1bb3n47a0css8b1q7lhgf7416k5-libX11-1.7.0/lib:/nix/store/ssf… + │ → /nix/store/jmwiq1bb3n47a0css8b1q7lhgf7416k5-libX11-1.7.0 + ├───lib/thunderbird/libxul.so: …pxyc-libXt-1.2.0/lib:/nix/store/l1sv43bafhkf2iikmdw9y62aybjdhcmm-libXdamage-1.1.5/lib:/nix/store… + │ → /nix/store/l1sv43bafhkf2iikmdw9y62aybjdhcmm-libXdamage-1.1.5 + │ ├───lib/libXdamage.so.1.1.0: …-libXfixes-5.0.3/lib:/nix/store/jmwiq1bb3n47a0css8b1q7lhgf7416k5-libX11-1.7.0/lib:/nix/store/9l0… + │ │ → /nix/store/jmwiq1bb3n47a0css8b1q7lhgf7416k5-libX11-1.7.0 … ``` @@ -31,9 +31,9 @@ R""( ```console # nix why-depends nixpkgs#glibc nixpkgs#glibc - /nix/store/9df65igwjmf2wbw0gbrrgair6piqjgmi-glibc-2.31 - └───lib/ld-2.31.so: …che Do not use /nix/store/9df65igwjmf2wbw0gbrrgair6piqjgmi-glibc-2.31/etc/ld.so.cache. --… - → /nix/store/9df65igwjmf2wbw0gbrrgair6piqjgmi-glibc-2.31 + /nix/store/q9mknq836i0kblq8g1hm9f3cv9qda0r9-glibc-2.31 + └───lib/ld-2.31.so: …che Do not use /nix/store/q9mknq836i0kblq8g1hm9f3cv9qda0r9-glibc-2.31/etc/ld.so.cache. --… + → /nix/store/q9mknq836i0kblq8g1hm9f3cv9qda0r9-glibc-2.31 ``` * Show why Geeqie has a build-time dependency on `systemd`: @@ -54,7 +54,7 @@ R""( Nix automatically determines potential runtime dependencies between store paths by scanning for the *hash parts* of store paths. For instance, if there exists a store path -`/nix/store/9df65igwjmf2wbw0gbrrgair6piqjgmi-glibc-2.31`, and a file +`/nix/store/q9mknq836i0kblq8g1hm9f3cv9qda0r9-glibc-2.31`, and a file inside another store path contains the string `9df65igw…`, then the latter store path *refers* to the former, and thus might need it at runtime. Nix always maintains the existence of the transitive closure diff --git a/src/perl/lib/Nix/Store.xs b/src/perl/lib/Nix/Store.xs index 93e9f0f95417..6de26f0d2840 100644 --- a/src/perl/lib/Nix/Store.xs +++ b/src/perl/lib/Nix/Store.xs @@ -234,7 +234,7 @@ StoreWrapper::exportPaths(int fd, ...) StorePathSet paths; for (int n = 2; n < items; ++n) paths.insert(THIS->store->parseStorePath(SvPV_nolen(ST(n)))); FdSink sink(fd); - exportPaths(*THIS->store, paths, sink); + exportPaths(*THIS->store, paths, sink, 1); } catch (Error & e) { croak("%s", e.what()); } diff --git a/src/perl/lib/Nix/meson.build b/src/perl/lib/Nix/meson.build index dd5560e21cc5..675d33b63710 100644 --- a/src/perl/lib/Nix/meson.build +++ b/src/perl/lib/Nix/meson.build @@ -38,13 +38,19 @@ nix_perl_store_cc = custom_target( command : [ xsubpp, '@INPUT@', '-output', '@OUTPUT@' ], ) +if host_machine.system() == 'darwin' and get_option('default_library') == 'static' + prelink = false +else + prelink = true +endif + # Build Nix::Store Library #------------------------------------------------- nix_perl_store_lib = library( 'Store', sources : nix_perl_store_cc, name_prefix : '', - prelink : true, # For C++ static initializers + prelink : prelink, # For C++ static initializers install : true, install_mode : 'rwxr-xr-x', install_dir : join_paths(nix_perl_install_dir, 'auto', 'Nix', 'Store'), diff --git a/src/perl/package.nix b/src/perl/package.nix index 864558ec855e..b2a1f6975836 100644 --- a/src/perl/package.nix +++ b/src/perl/package.nix @@ -18,7 +18,7 @@ in perl.pkgs.toPerlModule ( mkMesonDerivation (finalAttrs: { - pname = "nix-perl"; + pname = "determinate-nix-perl"; inherit version; workDir = ./.; diff --git a/tests/functional/binary-cache.sh b/tests/functional/binary-cache.sh index 445845bba2a3..066fa026ed9b 100755 --- a/tests/functional/binary-cache.sh +++ b/tests/functional/binary-cache.sh @@ -312,3 +312,24 @@ nix-store --delete "$outPath" "$docPath" # -vvv is the level that logs during the loop timeout 60 nix-build --no-out-link -E "$expr" --option substituters "file://$cacheDir" \ --option trusted-binary-caches "file://$cacheDir" --no-require-sigs + + +# Test that the narinfo-cache-meta-ttl causes nix-cache-info to be cached, +# and that --refresh overrides it. + +# Populate the metadata cache by querying store info over HTTP. +_NIX_FORCE_HTTP=1 nix store info --store "file://$cacheDir" + +# Remove nix-cache-info from the binary cache. +rm "$cacheDir/nix-cache-info" + +# nix store info should still work because the metadata is cached +# (narinfo-cache-meta-ttl defaults to 7 days). +_NIX_FORCE_HTTP=1 nix store info --store "file://$cacheDir" + +# But with --refresh, it should fail because nix-cache-info is gone +# and the cached metadata TTL is overridden to 0. +_NIX_FORCE_HTTP=1 expectStderr 1 nix store info --store "file://$cacheDir" --refresh | grepQuiet "uploading.*is not supported" + +# Remove --refresh and it should work again. +_NIX_FORCE_HTTP=1 nix store info --store "file://$cacheDir" diff --git a/tests/functional/build.sh b/tests/functional/build.sh index dbc72d991352..6a4c90a43781 100755 --- a/tests/functional/build.sh +++ b/tests/functional/build.sh @@ -158,33 +158,22 @@ printf "" | nix build --no-link --stdin --json | jq --exit-status '. == []' printf "%s\n" "$drv^*" | nix build --no-link --stdin --json | jq --exit-status '.[0]|has("drvPath")' # --keep-going and FOD -if isDaemonNewer "2.34pre"; then - # With the fix, cancelled goals are not reported as failures. - # Use -j1 so only x1 starts and fails; x2, x3, x4 are cancelled. - out="$(nix build -f fod-failing.nix -j1 -L 2>&1)" && status=0 || status=$? - test "$status" = 1 - # Only the hash mismatch error for x1. Cancelled goals not reported. - test "$(<<<"$out" grep -cE '^error:')" = 1 - # Regression test: error messages should not be empty (end with just "failed:") - <<<"$out" grepQuietInverse -E "^error:.*failed: *$" -else - out="$(nix build -f fod-failing.nix -L 2>&1)" && status=0 || status=$? - test "$status" = 1 - # At minimum, check that x1 is reported as failing - <<<"$out" grepQuiet -E "error:.*-x1" -fi +out="$(nix build -f fod-failing.nix -j1 -L 2>&1)" && status=0 || status=$? +test "$status" = 1 +# Only the hash mismatch error for the first failing goal (x1). +# The other goals (x2, x3, x4) are cancelled and not reported as failures. +test "$(<<<"$out" grep -cE '^error:')" = 1 <<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x1\\.drv'" <<<"$out" grepQuiet -vE "hash mismatch in fixed-output derivation '.*-x3\\.drv'" <<<"$out" grepQuiet -vE "hash mismatch in fixed-output derivation '.*-x2\\.drv'" out="$(nix build -f fod-failing.nix -L x1 x2 x3 --keep-going 2>&1)" && status=0 || status=$? test "$status" = 1 -# three "hash mismatch" errors - for each failing fod, one "build of ... failed" -test "$(<<<"$out" grep -cE '^error:')" = 4 +# three "hash mismatch" errors - for each failing fod +test "$(<<<"$out" grep -cE '^error:')" = 3 <<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x1\\.drv'" <<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x3\\.drv'" <<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x2\\.drv'" -<<<"$out" grepQuiet -E "error: build of '.*-x[1-3]\\.drv\\^out', '.*-x[1-3]\\.drv\\^out', '.*-x[1-3]\\.drv\\^out' failed" out="$(nix build -f fod-failing.nix -L x4 2>&1)" && status=0 || status=$? test "$status" = 1 @@ -213,69 +202,3 @@ else fi <<<"$out" grepQuiet -vE "hash mismatch in fixed-output derivation '.*-x3\\.drv'" <<<"$out" grepQuiet -vE "hash mismatch in fixed-output derivation '.*-x2\\.drv'" - -# Regression test: cancelled builds should not be reported as failures -# When fast-fail fails, slow and depends-on-slow are cancelled (not failed). -# Only fast-fail should be reported as a failure. -# Uses fifo for synchronization to ensure deterministic behavior. -# Requires -j2 so slow and fast-fail run concurrently (fifo deadlocks if serialized). -if isDaemonNewer "2.34pre" && canUseSandbox; then - fifoDir="$TEST_ROOT/cancelled-builds-fifo" - mkdir -p "$fifoDir" - mkfifo "$fifoDir/fifo" - chmod a+rw "$fifoDir/fifo" - # When using a separate test store, we need sandbox-paths to access - # the system store (where bash/coreutils live). On NixOS, the test - # uses the system store directly, so this isn't needed (and would - # conflict with input paths). - sandboxPathsArg=() - if ! isTestOnNixOS; then - sandboxPathsArg=(--option sandbox-paths "/nix/store") - fi - out="$(nix flake check ./cancelled-builds --impure -L -j2 \ - --option sandbox true \ - "${sandboxPathsArg[@]}" \ - --option sandbox-build-dir /build-tmp \ - --option extra-sandbox-paths "/cancelled-builds-fifo=$fifoDir" \ - 2>&1)" && status=0 || status=$? - rm -rf "$fifoDir" - test "$status" = 1 - # The error should be for fast-fail, not for cancelled goals - <<<"$out" grepQuiet -E "Cannot build.*fast-fail" - # Cancelled goals should NOT appear in error messages (but may appear in "will be built" list) - <<<"$out" grepQuietInverse -E "^error:.*slow" - <<<"$out" grepQuietInverse -E "^error:.*depends-on-slow" - <<<"$out" grepQuietInverse -E "^error:.*depends-on-fail" - # Error messages should not be empty (end with just "failed:") - <<<"$out" grepQuietInverse -E "^error:.*failed: *$" - - # Test that nix build follows the same rules (uses a slightly different code path) - mkdir -p "$fifoDir" - mkfifo "$fifoDir/fifo" - chmod a+rw "$fifoDir/fifo" - sandboxPathsArg=() - if ! isTestOnNixOS; then - sandboxPathsArg=(--option sandbox-paths "/nix/store") - fi - system=$(nix eval --raw --impure --expr builtins.currentSystem) - out="$(nix build --impure -L -j2 \ - --option sandbox true \ - "${sandboxPathsArg[@]}" \ - --option sandbox-build-dir /build-tmp \ - --option extra-sandbox-paths "/cancelled-builds-fifo=$fifoDir" \ - "./cancelled-builds#checks.$system.slow" \ - "./cancelled-builds#checks.$system.depends-on-slow" \ - "./cancelled-builds#checks.$system.fast-fail" \ - "./cancelled-builds#checks.$system.depends-on-fail" \ - 2>&1)" && status=0 || status=$? - rm -rf "$fifoDir" - test "$status" = 1 - # The error should be for fast-fail, not for cancelled goals - <<<"$out" grepQuiet -E "Cannot build.*fast-fail" - # Cancelled goals should NOT appear in error messages - <<<"$out" grepQuietInverse -E "^error:.*slow" - <<<"$out" grepQuietInverse -E "^error:.*depends-on-slow" - <<<"$out" grepQuietInverse -E "^error:.*depends-on-fail" - # Error messages should not be empty (end with just "failed:") - <<<"$out" grepQuietInverse -E "^error:.*failed: *$" -fi diff --git a/tests/functional/ca/build-with-garbage-path.sh b/tests/functional/ca/build-with-garbage-path.sh index 298cd469a928..b610f8e2b1c8 100755 --- a/tests/functional/ca/build-with-garbage-path.sh +++ b/tests/functional/ca/build-with-garbage-path.sh @@ -9,7 +9,7 @@ requireDaemonNewerThan "2.4pre20210621" # Get the output path of `rootCA`, and put some garbage instead outPath="$(nix-build ./content-addressed.nix -A rootCA --no-out-link)" # shellcheck disable=SC2046 # Multiple store paths need to become individual args -nix-store --delete $(nix-store -q --referrers-closure "$outPath") +nix-store --delete $(nix-store -q --referrers-closure "$outPath") --ignore-liveness touch "$outPath" # The build should correctly remove the garbage and put the expected path instead diff --git a/tests/functional/ca/derivation-json.sh b/tests/functional/ca/derivation-json.sh index f94c5a72c3f0..eb1d949676a7 100644 --- a/tests/functional/ca/derivation-json.sh +++ b/tests/functional/ca/derivation-json.sh @@ -19,7 +19,7 @@ drvPath3=$(nix derivation add --dry-run < "$TEST_HOME"/foo.json) [[ ! -e "$drvPath3" ]] # But the JSON is rejected without the experimental feature -expectStderr 1 nix derivation add < "$TEST_HOME"/foo.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'ca-derivations' is disabled" +expectStderr 1 nix derivation add < "$TEST_HOME"/foo.json --experimental-features '' | grepQuiet "experimental Nix feature 'ca-derivations' is disabled" # Without --dry-run it is actually written drvPath4=$(nix derivation add < "$TEST_HOME"/foo.json) diff --git a/tests/functional/ca/selfref-gc.sh b/tests/functional/ca/selfref-gc.sh index 7ac9ec9f78d0..fdee6b07ab09 100755 --- a/tests/functional/ca/selfref-gc.sh +++ b/tests/functional/ca/selfref-gc.sh @@ -4,7 +4,7 @@ source common.sh requireDaemonNewerThan "2.4pre20210626" -enableFeatures "ca-derivations nix-command flakes" +enableFeatures "ca-derivations" export NIX_TESTS_CA_BY_DEFAULT=1 cd .. diff --git a/tests/functional/cancelled-builds/flake.nix b/tests/functional/cancelled-builds/flake.nix deleted file mode 100644 index 0b8bf1ca5d86..000000000000 --- a/tests/functional/cancelled-builds/flake.nix +++ /dev/null @@ -1,64 +0,0 @@ -# Regression test for cancelled builds not being reported as failures. -# -# Scenario: When a build fails while other builds are running, those other -# builds (and their dependents) get cancelled. Previously, cancelled builds -# were incorrectly reported as failures with empty error messages. -# -# Uses a fifo for synchronization: fast-fail waits for slow to start before -# failing, ensuring slow is actually running when it gets cancelled. -# -# See: tests/functional/build.sh (search for "cancelled-builds") -{ - outputs = - { self }: - let - config = import "${builtins.getEnv "_NIX_TEST_BUILD_DIR"}/config.nix"; - in - with config; - { - checks.${system} = { - # A derivation that signals it started via fifo, then waits - slow = mkDerivation { - name = "slow"; - buildCommand = '' - echo "slow: started, signaling via fifo" - echo started > /cancelled-builds-fifo/fifo - echo "slow: waiting..." - sleep 10 - touch $out - ''; - }; - - # Depends on slow - will be cancelled when fast-fail fails - depends-on-slow = mkDerivation { - name = "depends-on-slow"; - slow = self.checks.${system}.slow; - buildCommand = '' - echo "depends-on-slow: slow finished at $slow" - touch $out - ''; - }; - - # Waits for slow to start via fifo, then fails - fast-fail = mkDerivation { - name = "fast-fail"; - buildCommand = '' - echo "fast-fail: waiting for slow to start..." - read line < /cancelled-builds-fifo/fifo - echo "fast-fail: slow started, now failing" >&2 - exit 1 - ''; - }; - - # Depends on fast-fail - will fail with DependencyFailed - depends-on-fail = mkDerivation { - name = "depends-on-fail"; - fast-fail = self.checks.${system}.fast-fail; - buildCommand = '' - echo "depends-on-fail: fast-fail finished (should never get here)" - touch $out - ''; - }; - }; - }; -} diff --git a/tests/functional/common/functions.sh b/tests/functional/common/functions.sh index 1b2ec8fe0e8f..3e3aeef3ddcf 100644 --- a/tests/functional/common/functions.sh +++ b/tests/functional/common/functions.sh @@ -73,6 +73,7 @@ startDaemon() { fi # Start the daemon, wait for the socket to appear. rm -f "$NIX_DAEMON_SOCKET_PATH" + # TODO: remove the nix-command feature when we're no longer testing against old daemons. PATH=$DAEMON_PATH nix --extra-experimental-features 'nix-command' daemon & _NIX_TEST_DAEMON_PID=$! export _NIX_TEST_DAEMON_PID @@ -132,11 +133,11 @@ restartDaemon() { } isDaemonNewer () { - [[ -n "${NIX_DAEMON_PACKAGE:-}" ]] || return 0 - local requiredVersion="$1" - local daemonVersion - daemonVersion=$("$NIX_DAEMON_PACKAGE/bin/nix" daemon --version | cut -d' ' -f3) - [[ $(nix eval --expr "builtins.compareVersions ''$daemonVersion'' ''$requiredVersion''") -ge 0 ]] + [[ -n "${NIX_DAEMON_PACKAGE:-}" ]] || return 0 + local requiredVersion="$1" + local daemonVersion + daemonVersion=$("$NIX_DAEMON_PACKAGE/bin/nix" daemon --version | sed 's/.*) //') + [[ $(nix eval --expr "builtins.compareVersions ''$daemonVersion'' ''$requiredVersion''") -ge 0 ]] } skipTest () { @@ -360,4 +361,25 @@ execUnshare () { exec unshare --mount --map-root-user "$SHELL" "$@" } +initGitRepo() { + local repo="$1" + local extraArgs="${2-}" + + # shellcheck disable=SC2086 # word splitting of extraArgs is intended + git -C "$repo" init $extraArgs + git -C "$repo" config user.email "foobar@example.com" + git -C "$repo" config user.name "Foobar" +} + +createGitRepo() { + local repo="$1" + local extraArgs="${2-}" + + rm -rf "$repo" "$repo".tmp + mkdir -p "$repo" + + # shellcheck disable=SC2086 # word splitting of extraArgs is intended + initGitRepo "$repo" $extraArgs +} + fi # COMMON_FUNCTIONS_SH_SOURCED diff --git a/tests/functional/common/init.sh b/tests/functional/common/init.sh index 66b44c76f696..41e1851160a7 100755 --- a/tests/functional/common/init.sh +++ b/tests/functional/common/init.sh @@ -12,7 +12,7 @@ if isTestOnNixOS; then ! test -e "$test_nix_conf" cat > "$test_nix_conf" < "$NIX_CONF_DIR"/nix.conf < "$NIX_CONF_DIR"/nix.conf.extra <"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +xpFeature=auto-allocate-uids +gatedSetting=auto-allocate-uids + +# Experimental feature is disabled before, ignore and warn. +NIX_CONFIG=" + experimental-features = + $gatedSetting = true +" expect 1 nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr [[ $(cat "$TEST_ROOT/stdout") = '' ]] -grepQuiet "Ignoring setting 'accept-flake-config' because experimental feature 'flakes' is not enabled" "$TEST_ROOT/stderr" -grepQuiet "error: could not find setting 'accept-flake-config'" "$TEST_ROOT/stderr" +grepQuiet "error: could not find setting '$gatedSetting'" "$TEST_ROOT/stderr" -# 'flakes' experimental-feature is disabled after, ignore and warn -NIX_CONFIG=' - accept-flake-config = true - experimental-features = nix-command -' expect 1 nix config show accept-flake-config 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +# Experimental feature is disabled after, ignore and warn. +NIX_CONFIG=" + $gatedSetting = true + experimental-features = +" expect 1 nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr [[ $(cat "$TEST_ROOT/stdout") = '' ]] -grepQuiet "Ignoring setting 'accept-flake-config' because experimental feature 'flakes' is not enabled" "$TEST_ROOT/stderr" -grepQuiet "error: could not find setting 'accept-flake-config'" "$TEST_ROOT/stderr" +grepQuiet "error: could not find setting '$gatedSetting'" "$TEST_ROOT/stderr" -# 'flakes' experimental-feature is enabled before, process -NIX_CONFIG=' - experimental-features = nix-command flakes - accept-flake-config = true -' nix config show accept-flake-config 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +# Experimental feature is enabled before, process. +NIX_CONFIG=" + experimental-features = $xpFeature + $gatedSetting = true +" nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr grepQuiet "true" "$TEST_ROOT/stdout" -grepQuietInverse "Ignoring setting 'accept-flake-config'" "$TEST_ROOT/stderr" -# 'flakes' experimental-feature is enabled after, process -NIX_CONFIG=' - accept-flake-config = true - experimental-features = nix-command flakes -' nix config show accept-flake-config 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr +# Experimental feature is enabled after, process. +NIX_CONFIG=" + $gatedSetting = true + experimental-features = $xpFeature +" nix config show $gatedSetting 1>"$TEST_ROOT"/stdout 2>"$TEST_ROOT"/stderr grepQuiet "true" "$TEST_ROOT/stdout" -grepQuietInverse "Ignoring setting 'accept-flake-config'" "$TEST_ROOT/stderr" +grepQuietInverse "Ignoring setting '$gatedSetting'" "$TEST_ROOT/stderr" function exit_code_both_ways { - expect 1 nix --experimental-features 'nix-command' "$@" 1>/dev/null - nix --experimental-features 'nix-command flakes' "$@" 1>/dev/null + expect 1 nix --experimental-features '' "$@" 1>/dev/null + nix --experimental-features "$xpFeature" "$@" 1>/dev/null # Also, the order should not matter - expect 1 nix "$@" --experimental-features 'nix-command' 1>/dev/null - nix "$@" --experimental-features 'nix-command flakes' 1>/dev/null + expect 1 nix "$@" --experimental-features '' 1>/dev/null + nix "$@" --experimental-features "$xpFeature" 1>/dev/null } -exit_code_both_ways show-config --flake-registry 'https://no' +exit_code_both_ways config show --auto-allocate-uids # Double check these are stable nix --experimental-features '' --help 1>/dev/null nix --experimental-features '' doctor --help 1>/dev/null nix --experimental-features '' repl --help 1>/dev/null nix --experimental-features '' upgrade-nix --help 1>/dev/null - -# These 3 arguments are currently given to all commands, which is wrong (as not -# all care). To deal with fixing later, we simply make them require the -# nix-command experimental features --- it so happens that the commands we wish -# stabilizing to do not need them anyways. -for arg in '--print-build-logs' '--offline' '--refresh'; do - nix --experimental-features 'nix-command' "$arg" --help 1>/dev/null - expect 1 nix --experimental-features '' "$arg" --help 1>/dev/null -done diff --git a/tests/functional/export.sh b/tests/functional/export.sh index 53bbdd9ac39e..a74efa91d806 100755 --- a/tests/functional/export.sh +++ b/tests/functional/export.sh @@ -7,18 +7,24 @@ TODO_NixOS clearStore outPath=$(nix-build dependencies.nix --no-out-link) +drvPath=$(nix path-info --json "$outPath" | jq -r .\""$outPath"\".deriver) nix-store --export "$outPath" > "$TEST_ROOT"/exp +expectStderr 1 nix nario export "$outPath" | grepQuiet "required argument.*missing" +nix nario export --format 1 "$outPath" > "$TEST_ROOT/exp2" +cmp "$TEST_ROOT/exp" "$TEST_ROOT/exp2" # shellcheck disable=SC2046 nix-store --export $(nix-store -qR "$outPath") > "$TEST_ROOT"/exp_all +nix nario export --format 1 -r "$outPath" > "$TEST_ROOT"/exp_all2 +cmp "$TEST_ROOT/exp_all" "$TEST_ROOT/exp_all2" + if nix-store --export "$outPath" >/dev/full ; then echo "exporting to a bad file descriptor should fail" exit 1 fi - clearStore if nix-store --import < "$TEST_ROOT"/exp; then @@ -26,7 +32,6 @@ if nix-store --import < "$TEST_ROOT"/exp; then exit 1 fi - clearStore nix-store --import < "$TEST_ROOT"/exp_all @@ -34,9 +39,42 @@ nix-store --import < "$TEST_ROOT"/exp_all # shellcheck disable=SC2046 nix-store --export $(nix-store -qR "$outPath") > "$TEST_ROOT"/exp_all2 - clearStore # Regression test: the derivers in exp_all2 are empty, which shouldn't # cause a failure. nix-store --import < "$TEST_ROOT"/exp_all2 + +# Test `nix nario import` on files created by `nix-store --export`. +clearStore +expectStderr 1 nix nario import < "$TEST_ROOT"/exp_all | grepQuiet "lacks a signature" +nix nario import --no-check-sigs < "$TEST_ROOT"/exp_all +nix path-info "$outPath" + +# Test `nix nario list`. +nix nario list < "$TEST_ROOT"/exp_all +nix nario list < "$TEST_ROOT"/exp_all | grepQuiet ".*dependencies-input-0.*bytes" +nix nario list -lR < "$TEST_ROOT"/exp_all | grepQuiet "dr-xr-xr-x .*0 $outPath" +nix nario list -lR < "$TEST_ROOT"/exp_all | grepQuiet "lrwxrwxrwx .*0 $outPath/self -> $outPath" +nix nario list -lR < "$TEST_ROOT"/exp_all | grepQuiet -- "-r--r--r-- .*7 $outPath/foobar" + +# Test format 2 (including signatures). +nix key generate-secret --key-name my-key > "$TEST_ROOT"/secret +public_key=$(nix key convert-secret-to-public < "$TEST_ROOT"/secret) +nix store sign --key-file "$TEST_ROOT/secret" -r "$outPath" +nix nario export --format 2 -r "$outPath" > "$TEST_ROOT"/exp_all +clearStore +expectStderr 1 nix nario import < "$TEST_ROOT"/exp_all | grepQuiet "lacks a signature" +nix nario import --trusted-public-keys "$public_key" < "$TEST_ROOT"/exp_all +[[ $(nix path-info --json "$outPath" | jq -r .[].signatures[]) =~ my-key: ]] + +# Test json listing. +json=$(nix nario list --json -R < "$TEST_ROOT/exp_all") +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".deriver") = "$drvPath" ]] +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".contents.type") = directory ]] +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".contents.entries.foobar.type") = regular ]] +[[ $(printf "%s" "$json" | jq ".paths.\"$outPath\".contents.entries.foobar.size") = 7 ]] + +json=$(nix nario list --json < "$TEST_ROOT/exp_all") +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".deriver") = "$drvPath" ]] +[[ $(printf "%s" "$json" | jq -r ".paths.\"$outPath\".contents.type") = null ]] diff --git a/tests/functional/fetchGit.sh b/tests/functional/fetchGit.sh index e3135328f40c..d43108e6d2a2 100755 --- a/tests/functional/fetchGit.sh +++ b/tests/functional/fetchGit.sh @@ -12,11 +12,9 @@ repo=$TEST_ROOT/./git export _NIX_FORCE_HTTP=1 -rm -rf "$repo" "${repo}"-tmp "$TEST_HOME"/.cache/nix "$TEST_ROOT"/worktree "$TEST_ROOT"/minimal +rm -rf "${repo}"-tmp "$TEST_HOME"/.cache/nix "$TEST_ROOT"/worktree "$TEST_ROOT"/minimal -git init "$repo" -git -C "$repo" config user.email "foobar@example.com" -git -C "$repo" config user.name "Foobar" +createGitRepo "$repo" echo utrecht > "$repo"/hello touch "$repo"/.gitignore @@ -213,8 +211,7 @@ path5=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = $repo; ref = # Fetching from a repo with only a specific revision and no branches should # not fall back to copying files and record correct revision information. See: #5302 -mkdir "$TEST_ROOT"/minimal -git -C "$TEST_ROOT"/minimal init +createGitRepo "$TEST_ROOT"/minimal git -C "$TEST_ROOT"/minimal fetch "$repo" "$rev2" git -C "$TEST_ROOT"/minimal checkout "$rev2" [[ $(nix eval --impure --raw --expr "(builtins.fetchGit { url = $TEST_ROOT/minimal; }).rev") = "$rev2" ]] @@ -267,7 +264,7 @@ rm -rf "$TEST_HOME"/.cache/nix (! nix eval --impure --raw --expr "(builtins.fetchGit \"file://$repo\").outPath") # should succeed for a repo without commits -git init "$repo" +initGitRepo "$repo" git -C "$repo" add hello # need to add at least one file to cause the root of the repo to be visible # shellcheck disable=SC2034 path10=$(nix eval --impure --raw --expr "(builtins.fetchGit \"file://$repo\").outPath") @@ -275,9 +272,7 @@ path10=$(nix eval --impure --raw --expr "(builtins.fetchGit \"file://$repo\").ou # should succeed for a path with a space # regression test for #7707 repo="$TEST_ROOT/a b" -git init "$repo" -git -C "$repo" config user.email "foobar@example.com" -git -C "$repo" config user.name "Foobar" +createGitRepo "$repo" echo utrecht > "$repo/hello" touch "$repo/.gitignore" @@ -289,7 +284,7 @@ path11=$(nix eval --impure --raw --expr "(builtins.fetchGit ./.).outPath") # Test a workdir with no commits. empty="$TEST_ROOT/empty" -git init "$empty" +createGitRepo "$empty" emptyAttrs="{ lastModified = 0; lastModifiedDate = \"19700101000000\"; narHash = \"sha256-pQpattmS9VmO3ZIQUFn66az8GSmB4IvYhTTCFn6SUmo=\"; rev = \"0000000000000000000000000000000000000000\"; revCount = 0; shortRev = \"0000000\"; submodules = false; }" result=$(nix eval --impure --expr "builtins.removeAttrs (builtins.fetchGit $empty) [\"outPath\"]") @@ -314,3 +309,71 @@ git -C "$empty" config user.name "Foobar" git -C "$empty" commit --allow-empty --allow-empty-message --message "" nix eval --impure --expr "let attrs = builtins.fetchGit $empty; in assert attrs.lastModified != 0; assert attrs.rev != \"0000000000000000000000000000000000000000\"; assert attrs.revCount == 1; true" + +# Test backward compatibility hack for Nix < 2.20 locks / fetchTree calls that expect Git filters to be applied. +eol="$TEST_ROOT/git-eol" +createGitRepo "$eol" +mkdir -p "$eol/dir" +printf "Hello\nWorld\n" > "$eol/dir/crlf" +printf "ignore me" > "$eol/dir/ignored" +git -C "$eol" add dir/crlf dir/ignored +git -C "$eol" commit -a -m Initial +echo "Version: \$Format:%s\$" > "$eol/dir/version" +printf "crlf text eol=crlf\nignored export-ignore\nversion export-subst\n" > "$eol/dir/.gitattributes" +git -C "$eol" add dir/.gitattributes dir/version +git -C "$eol" commit -a -m 'Apply gitattributes' + +rev="$(git -C "$eol" rev-parse HEAD)" + +export _NIX_TEST_BARF_ON_UNCACHEABLE=1 + +oldHash="sha256-fccLx4BSC7e/PzQM4JnixstJQnd4dzgm73BqKhV3KRs=" +newHash="sha256-Ns7sLZOvpacagAPNun1+jBovMpo+zM7PUJ9x+lW3cIU=" + +expectStderr 0 nix eval --expr \ + "let tree = builtins.fetchTree { type = \"git\"; url = \"file://$eol\"; rev = \"$rev\"; narHash = \"$oldHash\"; }; in assert builtins.readFile \"\${tree}/dir/crlf\" == \"Hello\r\nWorld\r\n\"; assert !builtins.pathExists \"\${tree}/dir/ignored\"; assert builtins.readFile \"\${tree}/dir/version\" == \"Version: Apply gitattributes\n\"; true" \ + | grepQuiet "Please update the NAR hash to '$newHash'" + +nix eval --expr \ + "let tree = builtins.fetchTree { type = \"git\"; url = \"file://$eol\"; rev = \"$rev\"; narHash = \"$newHash\"; }; in assert builtins.readFile \"\${tree}/dir/crlf\" == \"Hello\nWorld\n\"; assert builtins.pathExists \"\${tree}/dir/ignored\"; assert builtins.readFile \"\${tree}/dir/version\" == \"Version: \$Format:%s\$\n\"; true" + +expectStderr 102 nix eval --expr \ + "builtins.fetchTree { type = \"git\"; url = \"file://$eol\"; rev = \"$rev\"; narHash = \"sha256-DLDvcwdcwCxnuPTxSQ6gLAyopB20lD0bOQoQB3i2hsA=\"; }" \ + | grepQuiet "NAR hash mismatch" + +mkdir -p "$TEST_ROOT"/flake +cat > "$TEST_ROOT"/flake/flake.nix << EOF +{ + inputs.eol = { type = "git"; url = "file://$eol"; rev = "$rev"; flake = false; }; + outputs = { self, eol }: rec { + crlf = builtins.readFile "\${eol}/dir/crlf"; + isLegacy = assert crlf == "Hello\r\nWorld\r\n"; true; + isModern = assert crlf == "Hello\nWorld\n"; true; + }; +} +EOF + +# Test locking with Nix < 2.20 semantics (i.e. using `git archive`). +nix eval --nix-219-compat "path:$TEST_ROOT/flake"#isLegacy +nix eval "path:$TEST_ROOT/flake"#isLegacy +[[ $(jq -r .nodes.eol.locked.narHash < "$TEST_ROOT"/flake/flake.lock) = "$oldHash" ]] + +# Test locking with Nix >= 2.20 semantics (i.e. using libgit2). +rm "$TEST_ROOT"/flake/flake.lock +nix eval "path:$TEST_ROOT/flake"#isModern +nix eval --nix-219-compat "path:$TEST_ROOT/flake"#isModern +[[ $(jq -r .nodes.eol.locked.narHash < "$TEST_ROOT"/flake/flake.lock) = "$newHash" ]] + + +# Test that builtins.hashString devirtualizes lazy paths (https://github.com/DeterminateSystems/determinate/issues/160). +hashStringRepo="$TEST_ROOT/hashString" +createGitRepo "$hashStringRepo" +echo hello > "$hashStringRepo"/hello +git -C "$hashStringRepo" add hello +git -C "$hashStringRepo" commit -m 'Initial' +hashStringRev=$(git -C "$hashStringRepo" rev-parse HEAD) + +hash1=$(nix eval --lazy-trees --raw --expr "builtins.hashString \"sha256\" (toString ((builtins.fetchGit { url = file://$hashStringRepo; rev = \"$hashStringRev\"; })))") +hash2=$(nix eval --lazy-trees --raw --expr "builtins.hashString \"sha256\" (toString ((builtins.fetchGit { url = file://$hashStringRepo; rev = \"$hashStringRev\"; })))") + +[[ "$hash1" = "$hash2" ]] diff --git a/tests/functional/fetchGitRefs.sh b/tests/functional/fetchGitRefs.sh index a7d1a2a2931c..9c7fb323eb7e 100755 --- a/tests/functional/fetchGitRefs.sh +++ b/tests/functional/fetchGitRefs.sh @@ -8,11 +8,9 @@ clearStoreIfPossible repo="$TEST_ROOT/git" -rm -rf "$repo" "${repo}-tmp" "$TEST_HOME/.cache/nix" +rm -rf "${repo}-tmp" "$TEST_HOME/.cache/nix" -git init "$repo" -git -C "$repo" config user.email "foobar@example.com" -git -C "$repo" config user.name "Foobar" +createGitRepo "$repo" echo utrecht > "$repo/hello" git -C "$repo" add hello diff --git a/tests/functional/fetchGitShallow.sh b/tests/functional/fetchGitShallow.sh index 4c21bd7ac80a..6b91d60cd9e3 100644 --- a/tests/functional/fetchGitShallow.sh +++ b/tests/functional/fetchGitShallow.sh @@ -6,9 +6,7 @@ source common.sh requireGit # Create a test repo with multiple commits for all our tests -git init "$TEST_ROOT/shallow-parent" -git -C "$TEST_ROOT/shallow-parent" config user.email "foobar@example.com" -git -C "$TEST_ROOT/shallow-parent" config user.name "Foobar" +createGitRepo "$TEST_ROOT/shallow-parent" # Add several commits to have history echo "{ outputs = _: {}; }" > "$TEST_ROOT/shallow-parent/flake.nix" diff --git a/tests/functional/fetchGitSubmodules.sh b/tests/functional/fetchGitSubmodules.sh index 2a25245be756..bf5fe5df3877 100755 --- a/tests/functional/fetchGitSubmodules.sh +++ b/tests/functional/fetchGitSubmodules.sh @@ -22,22 +22,16 @@ rm -rf "${rootRepo}" "${subRepo}" "$TEST_HOME"/.cache/nix export XDG_CONFIG_HOME=$TEST_HOME/.config git config --global protocol.file.allow always -initGitRepo() { - git init "$1" - git -C "$1" config user.email "foobar@example.com" - git -C "$1" config user.name "Foobar" -} - addGitContent() { echo "lorem ipsum" > "$1"/content git -C "$1" add content git -C "$1" commit -m "Initial commit" } -initGitRepo "$subRepo" +createGitRepo "$subRepo" addGitContent "$subRepo" -initGitRepo "$rootRepo" +createGitRepo "$rootRepo" git -C "$rootRepo" submodule init git -C "$rootRepo" submodule add "$subRepo" sub @@ -199,19 +193,19 @@ test_submodule_nested() { local repoB=$TEST_ROOT/submodule_nested/b local repoC=$TEST_ROOT/submodule_nested/c - rm -rf "$repoA" "$repoB" "$repoC" "$TEST_HOME"/.cache/nix + rm -rf "$TEST_HOME"/.cache/nix - initGitRepo "$repoC" + createGitRepo "$repoC" touch "$repoC"/inside-c git -C "$repoC" add inside-c addGitContent "$repoC" - initGitRepo "$repoB" + createGitRepo "$repoB" git -C "$repoB" submodule add "$repoC" c git -C "$repoB" add c addGitContent "$repoB" - initGitRepo "$repoA" + createGitRepo "$repoA" git -C "$repoA" submodule add "$repoB" b git -C "$repoA" add b addGitContent "$repoA" diff --git a/tests/functional/fetchGitVerification.sh b/tests/functional/fetchGitVerification.sh index 79c78d0c9f6f..3b5f9b866b9a 100755 --- a/tests/functional/fetchGitVerification.sh +++ b/tests/functional/fetchGitVerification.sh @@ -21,9 +21,7 @@ ssh-keygen -f "$keysDir/testkey2" -t rsa -P "" -C "test key 2" key2File="$keysDir/testkey2.pub" publicKey2=$(awk '{print $2}' "$key2File") -git init "$repo" -git -C "$repo" config user.email "foobar@example.com" -git -C "$repo" config user.name "Foobar" +createGitRepo "$repo" git -C "$repo" config gpg.format ssh echo 'hello' > "$repo"/text diff --git a/tests/functional/fetchPath.sh b/tests/functional/fetchPath.sh index 1df895b61662..2784afb0388e 100755 --- a/tests/functional/fetchPath.sh +++ b/tests/functional/fetchPath.sh @@ -3,9 +3,9 @@ source common.sh touch "$TEST_ROOT/foo" -t 202211111111 -# We only check whether 2022-11-1* **:**:** is the last modified date since -# `lastModified` is transformed into UTC in `builtins.fetchTarball`. -[[ "$(nix eval --impure --raw --expr "(builtins.fetchTree \"path://$TEST_ROOT/foo\").lastModifiedDate")" =~ 2022111.* ]] + +# The path fetcher does not return lastModified. +[[ "$(nix eval --impure --expr "(builtins.fetchTree \"path://$TEST_ROOT/foo\") ? lastModifiedDate")" = false ]] # Check that we can override lastModified for "path:" inputs. [[ "$(nix eval --impure --expr "(builtins.fetchTree { type = \"path\"; path = \"$TEST_ROOT/foo\"; lastModified = 123; }).lastModified")" = 123 ]] diff --git a/tests/functional/fetchurl.sh b/tests/functional/fetchurl.sh index c25ac321668a..96d46abf4684 100755 --- a/tests/functional/fetchurl.sh +++ b/tests/functional/fetchurl.sh @@ -71,7 +71,7 @@ echo "$outPath" | grepQuiet 'xyzzy' test -x "$outPath/fetchurl.sh" test -L "$outPath/symlink" -nix-store --delete "$outPath" +nix-store --delete "$outPath" --ignore-liveness # Test unpacking a compressed NAR. narxz="$TEST_ROOT/archive.nar.xz" diff --git a/tests/functional/flakes/build-time-flake-inputs.sh b/tests/functional/flakes/build-time-flake-inputs.sh new file mode 100644 index 000000000000..d1fc1c453603 --- /dev/null +++ b/tests/functional/flakes/build-time-flake-inputs.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash + +source ./common.sh + +TODO_NixOS +enableFeatures "build-time-fetch-tree" +restartDaemon +requireGit + +lazy="$TEST_ROOT/lazy" +createGitRepo "$lazy" +echo world > "$lazy/who" +git -C "$lazy" add who +git -C "$lazy" commit -a -m foo + +repo="$TEST_ROOT/repo" + +createGitRepo "$repo" + +cat > "$repo/flake.nix" < "$lazy/who" +git -C "$lazy" commit -a -m foo + +nix flake update --flake "$repo" + +clearStore + +nix build --out-link "$TEST_ROOT/result" -L "$repo" +[[ $(cat "$TEST_ROOT/result") = utrecht ]] + +rm -rf "$lazy" + +clearStore + +expectStderr 1 nix build --out-link "$TEST_ROOT/result" -L "$repo" | grepQuiet "Cannot build.*source.drv" + +# `nix flake prefetch-inputs` should ignore build-time inputs. +depDir=$TEST_ROOT/dep +createGitRepo "$depDir" +createSimpleGitFlake "$depDir" + +cat > "$repo/flake.nix" < "$flakeDir"/flake.nix < "$flakeDir"/flake.nix <&1 && fail "nix flake check --all-systems should have failed" || true) -echo "$checkRes" | grepQuiet "unknown-attr" +echo "$checkRes" | grepQuiet "Evaluation check.*apps.system-1.default.isValidApp.*failed" cat > "$flakeDir"/flake.nix < "$flakeDir"/flake.nix < "$flakeDir"/flake.nix < "$flakeDir/flake.nix" <&1 | grepQuiet 'error: breaks' -expect 1 nix build "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' +expect 1 nix build --no-link "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' +expect 1 nix build --no-link "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' # Stack overflow error must not be cached -expect 1 nix build --max-call-depth 50 "$flake1Dir#stack-depth" 2>&1 \ +expect 1 nix build --no-link --max-call-depth 50 "$flake1Dir#stack-depth" 2>&1 \ | grepQuiet 'error: stack overflow; max-call-depth exceeded' # If the SO is cached, the following invocation will produce a cached failure; we expect it to succeed nix build --no-link "$flake1Dir#stack-depth" @@ -48,3 +48,11 @@ nix build --no-link "$flake1Dir#stack-depth" expect 1 nix build "$flake1Dir#ifd" --option allow-import-from-derivation false 2>&1 \ | grepQuiet 'error: cannot build .* during evaluation because the option '\''allow-import-from-derivation'\'' is disabled' nix build --no-link "$flake1Dir#ifd" + +# Test that a store derivation is recreated when it has been deleted +# but the corresponding attribute is still cached. +if ! isTestOnNixOS; then + nix build --no-link "$flake1Dir#drv" + clearStore + nix build --no-link "$flake1Dir#drv" +fi diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh index d9e187251f9e..c33b2a64ae1d 100755 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -69,7 +69,9 @@ nix flake metadata "$flake1Dir" | grepQuiet 'URL:.*flake1.*' # Test 'nix flake metadata --json'. json=$(nix flake metadata flake1 --json | jq .) [[ $(echo "$json" | jq -r .description) = 'Bla bla' ]] -[[ -d $(echo "$json" | jq -r .path) ]] +if [[ $(nix config show lazy-trees) = false ]]; then + [[ -d $(echo "$json" | jq -r .path) ]] +fi [[ $(echo "$json" | jq -r .lastModified) = $(git -C "$flake1Dir" log -n1 --format=%ct) ]] hash1=$(echo "$json" | jq -r .revision) [[ -n $(echo "$json" | jq -r .fingerprint) ]] @@ -77,6 +79,7 @@ hash1=$(echo "$json" | jq -r .revision) echo foo > "$flake1Dir/foo" git -C "$flake1Dir" add "$flake1Dir"/foo [[ $(nix flake metadata flake1 --json --refresh | jq -r .dirtyRevision) == "$hash1-dirty" ]] +[[ $(_NIX_TEST_FAIL_ON_LARGE_PATH=1 nix flake metadata flake1 --json --refresh --warn-large-path-threshold 1 --lazy-trees | jq -r .dirtyRevision) == "$hash1-dirty" ]] [[ "$(nix flake metadata flake1 --json | jq -r .fingerprint)" != null ]] echo -n '# foo' >> "$flake1Dir/flake.nix" @@ -110,6 +113,11 @@ nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir#default" nix build -o "$TEST_ROOT/result" "$flake1Dir?ref=HEAD#default" nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir?ref=HEAD#default" +# Check that the fetcher cache works. +if [[ $(nix config show lazy-trees) = false ]]; then + nix build -o "$TEST_ROOT/result" "git+file://$flake1Dir?ref=HEAD#default" -vvvvv 2>&1 | grepQuiet "source path.*cache hit" +fi + # Check that relative paths are allowed for git flakes. # This may change in the future once git submodule support is refined. # See: https://discourse.nixos.org/t/57783 and #9708. @@ -161,7 +169,12 @@ expect 1 nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" --no-update-lock-file nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" --commit-lock-file [[ -e "$flake2Dir/flake.lock" ]] [[ -z $(git -C "$flake2Dir" diff main || echo failed) ]] -[[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'.*',"narHash":"sha256-'.*'","ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]] +[[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'[0-9]*',"narHash":"sha256-'.*'","ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]] +if [[ $(nix config show lazy-trees) = true ]]; then + # Test that `lazy-locks` causes NAR hashes to be omitted from the lock file. + nix flake update --flake "$flake2Dir" --commit-lock-file --lazy-locks + [[ $(jq --indent 0 --compact-output . < "$flake2Dir/flake.lock") =~ ^'{"nodes":{"flake1":{"locked":{"lastModified":'[0-9]*',"ref":"refs/heads/master","rev":"'.*'","revCount":2,"type":"git","url":"file:///'.*'"},"original":{"id":"flake1","type":"indirect"}},"root":{"inputs":{"flake1":"flake1"}}},"root":"root","version":7}'$ ]] +fi # Rerunning the build should not change the lockfile. nix build -o "$TEST_ROOT/result" "$flake2Dir#bar" @@ -264,6 +277,9 @@ nix registry remove flake1 [[ $(nix registry list | wc -l) == 4 ]] [[ $(nix registry resolve flake1) = "git+file://$flake1Dir" ]] +# Test the builtin fallback registry. +[[ $(nix registry resolve nixpkgs --flake-registry http://fail.invalid.org/sdklsdklsd --download-attempts 1) = github:NixOS/nixpkgs/nixpkgs-unstable ]] + # Test 'nix registry list' with a disabled global registry. nix registry add user-flake1 git+file://"$flake1Dir" nix registry add user-flake2 "git+file://$percentEncodedFlake2Dir" @@ -420,7 +436,7 @@ nix flake metadata "$flake3Dir" --json --eval-store "dummy://?read-only=false" | rm -rf "$badFlakeDir" mkdir "$badFlakeDir" echo INVALID > "$badFlakeDir"/flake.nix -nix store delete "$(nix store add-path "$badFlakeDir")" +nix store delete --ignore-liveness "$(nix store add-path "$badFlakeDir")" [[ $(nix path-info "$(nix store add-path "$flake1Dir")") =~ flake1 ]] [[ $(nix path-info path:"$(nix store add-path "$flake1Dir")") =~ simple ]] diff --git a/tests/functional/flakes/follow-paths.sh b/tests/functional/flakes/follow-paths.sh index f658a0847f7a..3d668d687ab2 100755 --- a/tests/functional/flakes/follow-paths.sh +++ b/tests/functional/flakes/follow-paths.sh @@ -131,7 +131,7 @@ EOF git -C "$flakeFollowsA" add flake.nix expect 1 nix flake lock "$flakeFollowsA" 2>&1 | grep '/flakeB.*is forbidden in pure evaluation mode' -expect 1 nix flake lock --impure "$flakeFollowsA" 2>&1 | grep "'flakeB' is too short to be a valid store path" +expect 1 nix flake lock --impure "$flakeFollowsA" 2>&1 | grep "error: 'flakeB' is too short to be a valid store path" # Test relative non-flake inputs. cat > "$flakeFollowsA"/flake.nix < "$flake1Dir/subflake/flake.nix" < "$flakeDir/a" -(cd "$flakeDir" && nix flake init) # check idempotence +(cd "$flakeDir" && nix flake init --template "git+file://$templatesDir") # check idempotence # Test 'nix flake init' with conflicts createGitRepo "$flakeDir" echo b > "$flakeDir/a" pushd "$flakeDir" -(! nix flake init) |& grep "refusing to overwrite existing file '$flakeDir/a'" +(! nix flake init --template "git+file://$templatesDir") |& grep "refusing to overwrite existing file '$flakeDir/a'" popd git -C "$flakeDir" commit -a -m 'Changed' diff --git a/tests/functional/flakes/meson.build b/tests/functional/flakes/meson.build index de76a55804a8..3658b66aceaa 100644 --- a/tests/functional/flakes/meson.build +++ b/tests/functional/flakes/meson.build @@ -34,6 +34,12 @@ suites += { 'source-paths.sh', 'old-lockfiles.sh', 'trace-ifd.sh', + 'build-time-flake-inputs.sh', + 'substitution.sh', + 'shallow.sh', + 'get-flake.sh', + 'provenance.sh', + 'search.sh', ], 'workdir' : meson.current_source_dir(), } diff --git a/tests/functional/flakes/provenance.sh b/tests/functional/flakes/provenance.sh new file mode 100644 index 000000000000..933436983f38 --- /dev/null +++ b/tests/functional/flakes/provenance.sh @@ -0,0 +1,355 @@ +#!/usr/bin/env bash + +experimental_features="provenance" + +source common.sh + +TODO_NixOS + +createFlake1 + +outPath=$(nix build --print-out-paths --no-link "$flake1Dir#packages.$system.default") +drvPath=$(nix eval --raw "$flake1Dir#packages.$system.default.drvPath") +rev=$(nix flake metadata --json "$flake1Dir" | jq -r .locked.rev) +lastModified=$(nix flake metadata --json "$flake1Dir" | jq -r .locked.lastModified) +treePath=$(nix flake prefetch --json "$flake1Dir" | jq -r .storePath) +builder=$(nix eval --raw "$flake1Dir#packages.$system.default._builder") + +# Building a derivation should have tree+subpath+flake+meta+build provenance. +[[ "$(nix path-info --json --json-format 1 "$outPath" | jq ".\"$outPath\".provenance")" == "$(cat < "$flake1Dir/somefile" +git -C "$flake1Dir" add somefile +nix build --impure --print-out-paths --no-link "$flake1Dir#packages.$system.default" +[[ $(nix path-info --json --json-format 1 "$builder" | jq ".\"$builder\".provenance") != null ]] + +[[ "$(nix provenance show "$outPath")" = "$(cat < "$TEST_ROOT/counter" +cat > "$flake1Dir/flake.nix" < \$out + echo x >> "$TEST_ROOT/counter" + ''; + }; + }; + }; +} +EOF +outPath=$(nix build --print-out-paths --no-link "$flake1Dir") + +expectStderr 1 nix provenance verify --all | grepQuiet "derivation .* may not be deterministic: output .* differs" + +# Test various types of source files. +clearStore +echo x > "$TEST_ROOT/counter" +cat > "$flake1Dir/flake.nix" < "$TEST_ROOT/hello.txt" + +path="$(nix store prefetch-file --json "file://$TEST_ROOT/hello.txt" | jq -r .storePath)" + +[[ "$(nix provenance show "$path")" = $(cat < "$TEST_ROOT/hello.txt" + +expectStderr 1 nix provenance verify "$path" | grepQuiet "hash mismatch for URL" diff --git a/tests/functional/flakes/relative-paths.sh b/tests/functional/flakes/relative-paths.sh index 7480cd504584..323e97ba9cc0 100644 --- a/tests/functional/flakes/relative-paths.sh +++ b/tests/functional/flakes/relative-paths.sh @@ -135,10 +135,8 @@ EOF # https://github.com/NixOS/nix/issues/13164 mkdir -p "$TEST_ROOT/issue-13164/nested-flake1/nested-flake2" ( + initGitRepo "$TEST_ROOT/issue-13164" cd "$TEST_ROOT/issue-13164" - git init - git config --global user.email "you@example.com" - git config --global user.name "Your Name" cat >flake.nix < "$flake1Dir/flake.nix" <> "$flake1Dir/flake.nix" +git -C "$flake1Dir" commit -a -m bla + +cat > "$repoDir"/flake.nix < show-output.json nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in -assert show_output.packages.someOtherSystem.default == {}; -assert show_output.packages.${builtins.currentSystem}.default.name == "simple"; -assert show_output.legacyPackages.${builtins.currentSystem} == {}; +assert show_output.inventory.packages.output.children.someOtherSystem.filtered; +assert show_output.inventory.packages.output.children.${builtins.currentSystem}.children.default.derivation.name == "simple"; +assert show_output.inventory.legacyPackages.skipped; true ' @@ -28,8 +28,8 @@ nix flake show --json --all-systems > show-output.json nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in -assert show_output.packages.someOtherSystem.default.name == "simple"; -assert show_output.legacyPackages.${builtins.currentSystem} == {}; +assert show_output.inventory.packages.output.children.someOtherSystem.children.default.derivation.name == "simple"; +assert show_output.inventory.legacyPackages.skipped; true ' @@ -39,34 +39,7 @@ nix flake show --json --legacy > show-output.json nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in -assert show_output.legacyPackages.${builtins.currentSystem}.hello.name == "simple"; -true -' - -# Test that attributes are only reported when they have actual content -cat >flake.nix < show-output.json -nix eval --impure --expr ' -let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); -in -assert show_output == { }; +assert show_output.inventory.legacyPackages.output.children.${builtins.currentSystem}.children.hello.derivation.name == "simple"; true ' @@ -87,8 +60,8 @@ nix flake show --json --legacy --all-systems > show-output.json nix eval --impure --expr ' let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); in -assert show_output.legacyPackages.${builtins.currentSystem}.AAAAAASomeThingsFailToEvaluate == { }; -assert show_output.legacyPackages.${builtins.currentSystem}.simple.name == "simple"; +assert show_output.inventory.legacyPackages.output.children.${builtins.currentSystem}.children.AAAAAASomeThingsFailToEvaluate.failed; +assert show_output.inventory.legacyPackages.output.children.${builtins.currentSystem}.children.simple.derivation.name == "simple"; true ' @@ -98,35 +71,4 @@ popd writeIfdFlake "$flakeDir" pushd "$flakeDir" - -nix flake show --json > show-output.json -# shellcheck disable=SC2016 -nix eval --impure --expr ' -let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); -in -assert show_output.packages.${builtins.currentSystem}.default == { }; -true -' - - -# Test that nix keeps going even when packages.$SYSTEM contains not derivations -cat >flake.nix < show-output.json -# shellcheck disable=SC2016 -nix eval --impure --expr ' -let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); -in -assert show_output.packages.${builtins.currentSystem}.not-a-derivation == {}; -true -' - +[[ $(nix flake show --json | jq -r ".inventory.packages.output.children.\"$system\".children.default.derivation.name") = top ]] diff --git a/tests/functional/flakes/source-paths.sh b/tests/functional/flakes/source-paths.sh index 3aa3683c27cf..e4f4ec1cda84 100644 --- a/tests/functional/flakes/source-paths.sh +++ b/tests/functional/flakes/source-paths.sh @@ -30,10 +30,10 @@ expectStderr 1 nix eval "$repo#y" | grepQuiet "at $repo/flake.nix:" git -C "$repo" commit -a -m foo -expectStderr 1 nix eval "git+file://$repo?ref=master#y" | grepQuiet "at «git+file://$repo?ref=master&rev=.*»/flake.nix:" +expectStderr 1 nix eval "git+file://$repo?ref=master#y" | grepQuiet "at «git+file://$repo?rev=.*»/flake.nix:" expectStderr 1 nix eval "$repo#z" | grepQuiet "error: Path 'foo' does not exist in Git repository \"$repo\"." -expectStderr 1 nix eval "git+file://$repo?ref=master#z" | grepQuiet "error: '«git+file://$repo?ref=master&rev=.*»/foo' does not exist" +expectStderr 1 nix eval "git+file://$repo?ref=master#z" | grepQuiet "error: '«git+file://$repo?rev=.*»/foo' does not exist" expectStderr 1 nix eval "$repo#a" | grepQuiet "error: Path 'foo' does not exist in Git repository \"$repo\"." echo 123 > "$repo/foo" diff --git a/tests/functional/flakes/substitution.sh b/tests/functional/flakes/substitution.sh new file mode 100644 index 000000000000..97a04931abfc --- /dev/null +++ b/tests/functional/flakes/substitution.sh @@ -0,0 +1,31 @@ +#! /usr/bin/env bash + +# Test that inputs are substituted if they cannot be fetched from their original location. + +source ./common.sh + +if [[ $(nix config show lazy-trees) = true ]]; then + exit 0 +fi + +TODO_NixOS + +createFlake1 +createFlake2 + +nix build --no-link "$flake2Dir#bar" + +path1="$(nix flake metadata --json "$flake1Dir" | jq -r .path)" + +# Building after an input disappeared should succeed, because it's still in the Nix store. +mv "$flake1Dir" "$flake1Dir-tmp" +nix build --no-link "$flake2Dir#bar" --no-eval-cache + +# Check that Nix will fall back to fetching the input from a substituter. +cache="file://$TEST_ROOT/binary-cache" +nix copy --to "$cache" "$path1" +clearStore +nix build --no-link "$flake2Dir#bar" --no-eval-cache --substitute --substituters "$cache" + +clearStore +expectStderr 1 nix build --no-link "$flake2Dir#bar" --no-eval-cache | grepQuiet "Git repository.*does not exist" diff --git a/tests/functional/flakes/unlocked-override.sh b/tests/functional/flakes/unlocked-override.sh index ed05440de03b..ed4d131b7ad0 100755 --- a/tests/functional/flakes/unlocked-override.sh +++ b/tests/functional/flakes/unlocked-override.sh @@ -36,6 +36,7 @@ expectStderr 1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/f grepQuiet "Not writing lock file.*because it has an unlocked input" nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks +_NIX_TEST_FAIL_ON_LARGE_PATH=1 nix flake lock "$flake2Dir" --override-input flake1 "$TEST_ROOT/flake1" --allow-dirty-locks --warn-large-path-threshold 1 --lazy-trees # Using a lock file with a dirty lock does not require --allow-dirty-locks, but should print a warning. expectStderr 0 nix eval "$flake2Dir#x" | diff --git a/tests/functional/formatter.sh b/tests/functional/formatter.sh index 03b31708d670..7071055d153e 100755 --- a/tests/functional/formatter.sh +++ b/tests/functional/formatter.sh @@ -85,4 +85,6 @@ rm ./my-result # Flake outputs check. nix flake check -nix flake show | grep -P "package 'formatter'" + +clearStore +expectStderr 0 nix flake show | grepQuiet ": formatter" diff --git a/tests/functional/gc-runtime.nix b/tests/functional/gc-runtime.nix index ee5980bdff98..df7f8ad16478 100644 --- a/tests/functional/gc-runtime.nix +++ b/tests/functional/gc-runtime.nix @@ -9,6 +9,7 @@ mkDerivation { cat > $out/program < \$TEST_ROOT/fifo sleep 10000 EOF diff --git a/tests/functional/gc-runtime.sh b/tests/functional/gc-runtime.sh index 0cccaaf16ab1..34e99415d5c2 100755 --- a/tests/functional/gc-runtime.sh +++ b/tests/functional/gc-runtime.sh @@ -21,11 +21,16 @@ nix-env -p "$profiles/test" -f ./gc-runtime.nix -i gc-runtime outPath=$(nix-env -p "$profiles/test" -q --no-name --out-path gc-runtime) echo "$outPath" +fifo="$TEST_ROOT/fifo" +mkfifo "$fifo" + echo "backgrounding program..." -"$profiles"/test/program & -sleep 2 # hack - wait for the program to get started +"$profiles"/test/program "$fifo" & child=$! echo PID=$child +cat "$fifo" + +expectStderr 1 nix-store --delete "$outPath" | grepQuiet "Cannot delete path.*because it's referenced by the GC root '/proc/" nix-env -p "$profiles/test" -e gc-runtime nix-env -p "$profiles/test" --delete-generations old diff --git a/tests/functional/gc.sh b/tests/functional/gc.sh index c58f47021f84..3ade6e4f582c 100755 --- a/tests/functional/gc.sh +++ b/tests/functional/gc.sh @@ -13,7 +13,7 @@ outPath=$(nix-store -rvv "$drvPath") rm -f "$NIX_STATE_DIR/gcroots/foo" ln -sf "$outPath" "$NIX_STATE_DIR/gcroots/foo" -[ "$(nix-store -q --roots "$outPath")" = "$NIX_STATE_DIR/gcroots/foo -> $outPath" ] +expectStderr 0 nix-store -q --roots "$outPath" | grepQuiet "$NIX_STATE_DIR/gcroots/foo -> $outPath" nix-store --gc --print-roots | grep "$outPath" nix-store --gc --print-live | grep "$outPath" @@ -23,10 +23,10 @@ if nix-store --gc --print-dead | grep -E "$outPath"$; then false; fi nix-store --gc --print-dead inUse=$(readLink "$outPath/reference-to-input-2") -if nix-store --delete "$inUse"; then false; fi +expectStderr 1 nix-store --delete "$inUse" | grepQuiet "Cannot delete path.*because it's referenced by path '" test -e "$inUse" -if nix-store --delete "$outPath"; then false; fi +expectStderr 1 nix-store --delete "$outPath" | grepQuiet "Cannot delete path.*because it's referenced by the GC root " test -e "$outPath" for i in "$NIX_STORE_DIR"/*; do diff --git a/tests/functional/git-hashing/simple-common.sh b/tests/functional/git-hashing/simple-common.sh index a776ec43e00b..1c5b0bf6552a 100644 --- a/tests/functional/git-hashing/simple-common.sh +++ b/tests/functional/git-hashing/simple-common.sh @@ -7,13 +7,6 @@ source common.sh repo="$TEST_ROOT/scratch" -initRepo () { - git init "$repo" --object-format="$hashAlgo" - - git -C "$repo" config user.email "you@example.com" - git -C "$repo" config user.name "Your Name" -} - # Compare Nix's and git's implementation of git hashing try () { local expected="$1" diff --git a/tests/functional/git-hashing/simple-sha1.sh b/tests/functional/git-hashing/simple-sha1.sh index a883ea84808e..f8024f80aabf 100755 --- a/tests/functional/git-hashing/simple-sha1.sh +++ b/tests/functional/git-hashing/simple-sha1.sh @@ -4,7 +4,7 @@ hashAlgo=sha1 source simple-common.sh -initRepo +createGitRepo "$repo" "--object-format=$hashAlgo" # blob test0 diff --git a/tests/functional/git-hashing/simple-sha256.sh b/tests/functional/git-hashing/simple-sha256.sh index c7da71e00c71..0f2a3a2e6c8b 100755 --- a/tests/functional/git-hashing/simple-sha256.sh +++ b/tests/functional/git-hashing/simple-sha256.sh @@ -6,7 +6,7 @@ source simple-common.sh requireDaemonNewerThan 2.31pre20250724 -initRepo +createGitRepo "$repo" "--object-format=$hashAlgo" # blob test0 diff --git a/tests/functional/impure-derivations.sh b/tests/functional/impure-derivations.sh index f887ca408f77..89392ce30712 100755 --- a/tests/functional/impure-derivations.sh +++ b/tests/functional/impure-derivations.sh @@ -21,7 +21,7 @@ drvPath2=$(nix derivation add < "$TEST_HOME"/impure-drv.json) [[ "$drvPath" = "$drvPath2" ]] # But only with the experimental feature! -expectStderr 1 nix derivation add < "$TEST_HOME"/impure-drv.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" +expectStderr 1 nix derivation add < "$TEST_HOME"/impure-drv.json --experimental-features '' | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" nix build --dry-run --json --file ./impure-derivations.nix impure.all json=$(nix build -L --no-link --json --file ./impure-derivations.nix impure.all) diff --git a/tests/functional/lang.sh b/tests/functional/lang.sh index e64663d30648..63264ec22770 100755 --- a/tests/functional/lang.sh +++ b/tests/functional/lang.sh @@ -27,7 +27,7 @@ nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" 123' 2> expectStderr 1 nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" (throw "Foo")' | grepQuiet Hello expectStderr 1 nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello %" (throw "Foo")' | grepQuiet 'Hello %' # Relies on parsing the expression derivation as a derivation, can't use --eval -expectStderr 1 nix-instantiate --show-trace lang/non-eval-fail-bad-drvPath.nix | grepQuiet "store path '8qlfcic10lw5304gqm8q45nr7g7jl62b-cachix-1.7.3-bin' is not a valid derivation path" +expectStderr 1 nix-instantiate --show-trace lang/non-eval-fail-bad-drvPath.nix | grepQuiet "store path '2chwzswhhmpxbgc981i2vcz7xj4d1in9-cachix-1.7.3-bin' is not a valid derivation path" nix-instantiate --eval -E 'let x = builtins.trace { x = x; } true; in x' \ diff --git a/tests/functional/lang/eval-fail-blackhole.err.exp b/tests/functional/lang/eval-fail-blackhole.err.exp index 95e33a5fe456..d11eb338f9a6 100644 --- a/tests/functional/lang/eval-fail-blackhole.err.exp +++ b/tests/functional/lang/eval-fail-blackhole.err.exp @@ -7,8 +7,8 @@ error: 3| x = y; error: infinite recursion encountered - at /pwd/lang/eval-fail-blackhole.nix:3:7: + at /pwd/lang/eval-fail-blackhole.nix:2:3: + 1| let { 2| body = x; + | ^ 3| x = y; - | ^ - 4| y = x; diff --git a/tests/functional/lang/eval-fail-deepseq-stack-overflow.err.exp b/tests/functional/lang/eval-fail-deepseq-stack-overflow.err.exp index 4cc43ca095e1..f142b5c4d45d 100644 --- a/tests/functional/lang/eval-fail-deepseq-stack-overflow.err.exp +++ b/tests/functional/lang/eval-fail-deepseq-stack-overflow.err.exp @@ -23,8 +23,3 @@ error: 7| in error: stack overflow; max-call-depth exceeded - at /pwd/lang/eval-fail-deepseq-stack-overflow.nix:5:28: - 4| let - 5| long = builtins.genList (x: x) 100000; - | ^ - 6| reverseLinkedList = builtins.foldl' (tail: head: { inherit head tail; }) null long; diff --git a/tests/functional/lang/eval-fail-derivation-name.err.exp b/tests/functional/lang/eval-fail-derivation-name.err.exp index ba5ff2d002ab..dad9ff6a9f69 100644 --- a/tests/functional/lang/eval-fail-derivation-name.err.exp +++ b/tests/functional/lang/eval-fail-derivation-name.err.exp @@ -16,7 +16,7 @@ error: … while calling the 'derivationStrict' builtin at «nix-internal»/derivation-internal.nix::: | - | strict = derivationStrict drvAttrs; + | strict = drvFunc drvAttrs; | ^ | diff --git a/tests/functional/lang/eval-fail-derivation-structuredAttrs-stack-overflow.err.exp b/tests/functional/lang/eval-fail-derivation-structuredAttrs-stack-overflow.err.exp index c61eab0aa422..1a8dfa681f26 100644 --- a/tests/functional/lang/eval-fail-derivation-structuredAttrs-stack-overflow.err.exp +++ b/tests/functional/lang/eval-fail-derivation-structuredAttrs-stack-overflow.err.exp @@ -1,24 +1,24 @@ error: … while evaluating the attribute 'outPath' - at «nix-internal»/derivation-internal.nix:50:7: - 49| value = commonAttrs // { - 50| outPath = builtins.getAttr outputName strict; + at «nix-internal»/derivation-internal.nix:51:7: + 50| value = commonAttrs // { + 51| outPath = builtins.getAttr outputName strict; | ^ - 51| drvPath = strict.drvPath; + 52| drvPath = strict.drvPath; … while calling the 'getAttr' builtin - at «nix-internal»/derivation-internal.nix:50:17: - 49| value = commonAttrs // { - 50| outPath = builtins.getAttr outputName strict; + at «nix-internal»/derivation-internal.nix:51:17: + 50| value = commonAttrs // { + 51| outPath = builtins.getAttr outputName strict; | ^ - 51| drvPath = strict.drvPath; + 52| drvPath = strict.drvPath; … while calling the 'derivationStrict' builtin - at «nix-internal»/derivation-internal.nix:37:12: - 36| - 37| strict = derivationStrict drvAttrs; + at «nix-internal»/derivation-internal.nix:38:12: + 37| + 38| strict = drvFunc drvAttrs; | ^ - 38| + 39| … while evaluating derivation 'test' whose name attribute is located at /pwd/lang/eval-fail-derivation-structuredAttrs-stack-overflow.nix:5:3 diff --git a/tests/functional/lang/eval-fail-recursion.err.exp b/tests/functional/lang/eval-fail-recursion.err.exp index ee41ff46bea9..21bf7a695bdd 100644 --- a/tests/functional/lang/eval-fail-recursion.err.exp +++ b/tests/functional/lang/eval-fail-recursion.err.exp @@ -1,14 +1,14 @@ error: … in the right operand of the update (//) operator - at /pwd/lang/eval-fail-recursion.nix:2:14: + at /pwd/lang/eval-fail-recursion.nix:2:11: 1| let 2| a = { } // a; - | ^ + | ^ 3| in error: infinite recursion encountered - at /pwd/lang/eval-fail-recursion.nix:2:14: - 1| let - 2| a = { } // a; - | ^ + at /pwd/lang/eval-fail-recursion.nix:4:1: 3| in + 4| a.foo + | ^ + 5| diff --git a/tests/functional/lang/eval-fail-scope-5.err.exp b/tests/functional/lang/eval-fail-scope-5.err.exp index 6edc85f4f161..557054b53549 100644 --- a/tests/functional/lang/eval-fail-scope-5.err.exp +++ b/tests/functional/lang/eval-fail-scope-5.err.exp @@ -21,8 +21,8 @@ error: 8| x ? y, error: infinite recursion encountered - at /pwd/lang/eval-fail-scope-5.nix:8:11: - 7| { - 8| x ? y, - | ^ - 9| y ? x, + at /pwd/lang/eval-fail-scope-5.nix:13:3: + 12| + 13| body = f { }; + | ^ + 14| diff --git a/tests/functional/lang/eval-okay-filterattrs-names.exp b/tests/functional/lang/eval-okay-filterattrs-names.exp new file mode 100644 index 000000000000..3f07d6e1a028 --- /dev/null +++ b/tests/functional/lang/eval-okay-filterattrs-names.exp @@ -0,0 +1 @@ +{ a = 3; } diff --git a/tests/functional/lang/eval-okay-filterattrs-names.nix b/tests/functional/lang/eval-okay-filterattrs-names.nix new file mode 100644 index 000000000000..94108fbefdaf --- /dev/null +++ b/tests/functional/lang/eval-okay-filterattrs-names.nix @@ -0,0 +1,5 @@ +builtins.filterAttrs (name: value: name == "a") { + a = 3; + b = 6; + c = 10; +} diff --git a/tests/functional/lang/eval-okay-filterattrs.exp b/tests/functional/lang/eval-okay-filterattrs.exp new file mode 100644 index 000000000000..74b9825e9c42 --- /dev/null +++ b/tests/functional/lang/eval-okay-filterattrs.exp @@ -0,0 +1 @@ +{ b = 6; c = 10; } diff --git a/tests/functional/lang/eval-okay-filterattrs.nix b/tests/functional/lang/eval-okay-filterattrs.nix new file mode 100644 index 000000000000..28d37bbe7843 --- /dev/null +++ b/tests/functional/lang/eval-okay-filterattrs.nix @@ -0,0 +1,5 @@ +builtins.filterAttrs (name: value: value > 5) { + a = 3; + b = 6; + c = 10; +} diff --git a/tests/functional/lang/eval-okay-regex-match2.nix b/tests/functional/lang/eval-okay-regex-match2.nix index 31a94423d86d..2345b265535b 100644 --- a/tests/functional/lang/eval-okay-regex-match2.nix +++ b/tests/functional/lang/eval-okay-regex-match2.nix @@ -155,7 +155,7 @@ builtins.map ] [ ''.*pypy.*'' - ''/nix/store/8w718rm43x7z73xhw9d6vh8s4snrq67h-python3-3.12.10/bin/python3.12'' + ''/nix/store/iqlzcyc1z7nv804n9wc5k5i0l180wnbs-python3-3.12.10/bin/python3.12'' ] [ ''(.*/)?\.\.(/.*)?'' @@ -199,19 +199,19 @@ builtins.map ] [ ''.*-polly.*'' - ''/nix/store/0yxfdnfxbzczjxhgdpac81jnas194wfj-gnu-install-dirs.patch'' + ''/nix/store/21yv6cysn8axxjyh7dbsnnmbp9nprg9i-gnu-install-dirs.patch'' ] [ ''.*-polly.*'' - ''/nix/store/jh2pda7psaasq85b2rrigmkjdbl8d0a1-llvm-lit-cfg-add-libs-to-dylib-path.patch'' + ''/nix/store/96dqnv9v20fi7glzsah6qx5zypbkrwsh-llvm-lit-cfg-add-libs-to-dylib-path.patch'' ] [ ''.*-polly.*'' - ''/nix/store/x868j4ih7wqiivf6wr9m4g424jav0hpq-gnu-install-dirs-polly.patch'' + ''/nix/store/hjlgp59nhxjj2y8ghf7mmqgbirqarccy-gnu-install-dirs-polly.patch'' ] [ ''.*-polly.*'' - ''/nix/store/gr73nf6sca9nyzl88x58y3qxrav04yhd-polly-lit-cfg-add-libs-to-dylib-path.patch'' + ''/nix/store/ybagzhw2933fvgi95qgbyw6i4avahyzr-polly-lit-cfg-add-libs-to-dylib-path.patch'' ] [ ''(.*/)?\.\.(/.*)?'' @@ -367,7 +367,7 @@ builtins.map ] [ ''.*pypy.*'' - ''/nix/store/8w718rm43x7z73xhw9d6vh8s4snrq67h-python3-3.12.10/bin/python3.12'' + ''/nix/store/iqlzcyc1z7nv804n9wc5k5i0l180wnbs-python3-3.12.10/bin/python3.12'' ] [ ''(.*)\.git'' @@ -453,11 +453,11 @@ builtins.map ] [ ''.*llvm-tblgen.*'' - ''-DLLVM_TABLEGEN:STRING=/nix/store/xp9hkw8nsw9p81d69yvcg1yr6f7vh71c-llvm-tblgen-18.1.8/bin/llvm-tblgen'' + ''-DLLVM_TABLEGEN:STRING=/nix/store/sclapmhdj6i9h02y7s5a630kfy55v9h1-llvm-tblgen-18.1.8/bin/llvm-tblgen'' ] [ ''.*llvm-tblgen.*'' - ''-DLLVM_TABLEGEN_EXE:STRING=/nix/store/xp9hkw8nsw9p81d69yvcg1yr6f7vh71c-llvm-tblgen-18.1.8/bin/llvm-tblgen'' + ''-DLLVM_TABLEGEN_EXE:STRING=/nix/store/sclapmhdj6i9h02y7s5a630kfy55v9h1-llvm-tblgen-18.1.8/bin/llvm-tblgen'' ] [ ''(.+)-b(.+)'' diff --git a/tests/functional/lang/non-eval-fail-bad-drvPath.nix b/tests/functional/lang/non-eval-fail-bad-drvPath.nix index 23639bc54651..327a2cb2c9f2 100644 --- a/tests/functional/lang/non-eval-fail-bad-drvPath.nix +++ b/tests/functional/lang/non-eval-fail-bad-drvPath.nix @@ -5,9 +5,9 @@ let system = builtins.currentSystem; outputs = [ "out" ]; # Illegal, because does not end in `.drv` - drvPath = "${builtins.storeDir}/8qlfcic10lw5304gqm8q45nr7g7jl62b-cachix-1.7.3-bin"; + drvPath = "${builtins.storeDir}/2chwzswhhmpxbgc981i2vcz7xj4d1in9-cachix-1.7.3-bin"; outputName = "out"; - outPath = "${builtins.storeDir}/8qlfcic10lw5304gqm8q45nr7g7jl62b-cachix-1.7.3-bin"; + outPath = "${builtins.storeDir}/2chwzswhhmpxbgc981i2vcz7xj4d1in9-cachix-1.7.3-bin"; out = package; }; in diff --git a/tests/functional/linux-sandbox.sh b/tests/functional/linux-sandbox.sh index c3ddf6ce65f7..484ad1d2b688 100755 --- a/tests/functional/linux-sandbox.sh +++ b/tests/functional/linux-sandbox.sh @@ -96,3 +96,9 @@ nix-sandbox-build symlink-derivation.nix -A test_sandbox_paths \ --option extra-sandbox-paths "/dir=$TEST_ROOT" \ --option extra-sandbox-paths "/symlinkDir=$symlinkDir" \ --option extra-sandbox-paths "/symlink=$symlinkcert" + +# Nonexistent sandbox paths should error early in the build process +# shellcheck disable=SC2016 +expectStderr 1 nix-sandbox-build --option extra-sandbox-paths '/does-not-exist' \ + -E 'with import '"${config_nix}"'; mkDerivation { name = "trivial"; buildCommand = "echo > $out"; }' | + grepQuiet "path '/does-not-exist' is configured as part of the \`sandbox-paths\` option, but is inaccessible" diff --git a/tests/functional/local-overlay-store/delete-refs-inner.sh b/tests/functional/local-overlay-store/delete-refs-inner.sh index f54ef2bb6b49..708e8c5a8dfd 100644 --- a/tests/functional/local-overlay-store/delete-refs-inner.sh +++ b/tests/functional/local-overlay-store/delete-refs-inner.sh @@ -23,14 +23,14 @@ input2=$(nix-build ../hermetic.nix --no-out-link --arg busybox "$busybox" --arg input3=$(nix-build ../hermetic.nix --no-out-link --arg busybox "$busybox" --arg withFinalRefs true --arg seed 2 -A passthru.input3 -j0) # Can't delete because referenced -expectStderr 1 nix-store --delete "$input1" | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --delete "$input2" | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --delete "$input3" | grepQuiet "Cannot delete path" +expectStderr 1 nix-store --delete "$input1" | grepQuiet "Cannot delete path.*because it's referenced by path" +expectStderr 1 nix-store --delete "$input2" | grepQuiet "Cannot delete path.*because it's referenced by path" +expectStderr 1 nix-store --delete "$input3" | grepQuiet "Cannot delete path.*because it's referenced by path" # These same paths are referenced in the lower layer (by the seed 1 # build done in `initLowerStore`). -expectStderr 1 nix-store --store "$storeA" --delete "$input2" | grepQuiet "Cannot delete path" -expectStderr 1 nix-store --store "$storeA" --delete "$input3" | grepQuiet "Cannot delete path" +expectStderr 1 nix-store --store "$storeA" --delete "$input2" | grepQuiet "Cannot delete path.*because it's referenced by path" +expectStderr 1 nix-store --store "$storeA" --delete "$input3" | grepQuiet "Cannot delete path.*because it's referenced by path" # Can delete nix-store --delete "$hermetic" diff --git a/tests/functional/logging.sh b/tests/functional/logging.sh index 600fce43e940..ffb1e6d9621e 100755 --- a/tests/functional/logging.sh +++ b/tests/functional/logging.sh @@ -40,5 +40,6 @@ if [[ "$NIX_REMOTE" != "daemon" ]]; then nix build -vv --file dependencies.nix --no-link --json-log-path "$TEST_ROOT/log.json" 2>&1 | grepQuiet 'building.*dependencies-top.drv' jq < "$TEST_ROOT/log.json" grep '{"action":"start","fields":\[".*-dependencies-top.drv","",1,1\],"id":.*,"level":3,"parent":0' "$TEST_ROOT/log.json" >&2 + grep -E '{"action":"result","id":[^,]+,"payload":{"builtOutputs":{"out":{"dependentRealisations":\{\},"id":"[^"]+","outPath":"[^-]+-dependencies-top".*"status":"Built".*"success":true' "$TEST_ROOT/log.json" >&2 (( $(grep -c '{"action":"msg","level":5,"msg":"executing builder .*"}' "$TEST_ROOT/log.json" ) == 5 )) fi diff --git a/tests/functional/misc.sh b/tests/functional/misc.sh index 131b63323e57..b8bbb74dddd7 100755 --- a/tests/functional/misc.sh +++ b/tests/functional/misc.sh @@ -23,11 +23,11 @@ expect 1 nix-env -q --foo 2>&1 | grep "unknown flag" # Eval Errors. eval_arg_res=$(nix-instantiate --eval -E 'let a = {} // a; in a.foo' 2>&1 || true) -echo "$eval_arg_res" | grep "at «string»:1:15:" +echo "$eval_arg_res" | grep "at «string»:1:12:" echo "$eval_arg_res" | grep "infinite recursion encountered" eval_stdin_res=$(echo 'let a = {} // a; in a.foo' | nix-instantiate --eval -E - 2>&1 || true) -echo "$eval_stdin_res" | grep "at «stdin»:1:15:" +echo "$eval_stdin_res" | grep "at «stdin»:1:12:" echo "$eval_stdin_res" | grep "infinite recursion encountered" # Attribute path errors diff --git a/tests/functional/nix-profile.sh b/tests/functional/nix-profile.sh index cf84088d7618..a27a32c6efda 100755 --- a/tests/functional/nix-profile.sh +++ b/tests/functional/nix-profile.sh @@ -4,6 +4,8 @@ source common.sh TODO_NixOS +requireGit + clearStore clearProfiles @@ -12,7 +14,7 @@ restartDaemon # Make a flake. flake1Dir=$TEST_ROOT/flake1 -mkdir -p "$flake1Dir" +createGitRepo "$flake1Dir" # shellcheck disable=SC2154,SC1039 cat > "$flake1Dir"/flake.nix < "$flake1Dir"/ca.nix cp "${config_nix}" "$flake1Dir"/ +git -C "$flake1Dir" add flake.nix config.nix who version ca.nix +git -C "$flake1Dir" commit -m 'Initial' + # Test upgrading from nix-env. nix-env -f ./user-envs.nix -i foo-1.0 nix profile list | grep -A2 'Name:.*foo' | grep 'Store paths:.*foo-1.0' nix profile add "$flake1Dir" -L -nix profile list | grep -A4 'Name:.*flake1' | grep 'Locked flake URL:.*narHash' +#nix profile list | grep -A4 'Name:.*flake1' | grep 'Locked flake URL:.*narHash' [[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] [ -e "$TEST_HOME"/.nix-profile/share/man ] # shellcheck disable=SC2235 (! [ -e "$TEST_HOME"/.nix-profile/include ]) nix profile history -nix profile history | grep "packages.$system.default: ∅ -> 1.0" -nix profile diff-closures | grep 'env-manifest.nix: ε → ∅' +nix profile history | grep "packages.$system.default: 1.0, 1.0-man added" +nix profile diff-closures | grep 'env-manifest.nix: (no version) removed' # Test XDG Base Directories support export NIX_CONFIG="use-xdg-base-directories = true" @@ -96,6 +101,7 @@ printf 1.0 > "$flake1Dir"/version # Test --all exclusivity. assertStderr nix --offline profile upgrade --all foo << EOF error: --all cannot be used with package names or regular expressions. + Try 'nix --help' for more information. EOF @@ -130,9 +136,8 @@ nix profile rollback [ -e "$TEST_HOME"/.nix-profile/bin/foo ] # shellcheck disable=SC2235 nix profile remove foo 2>&1 | grep 'removed 1 packages' -# shellcheck disable=SC2235 -(! [ -e "$TEST_HOME"/.nix-profile/bin/foo ]) -nix profile history | grep 'foo: 1.0 -> ∅' +[[ ! -e "$TEST_HOME"/.nix-profile/bin/foo ]] +nix profile history | grep 'foo: 1.0 removed' nix profile diff-closures | grep 'Version 3 -> 4' # Test installing a non-flake package. @@ -224,11 +229,11 @@ error: An existing package already provides the following file: The conflicting packages have a priority of 5. To prioritise the new package: - nix profile add path:${flake2Dir}#packages.${system}.default --priority 4 + nix profile add git+file://${flake2Dir}#packages.${system}.default --priority 4 To prioritise the existing package: - nix profile add path:${flake2Dir}#packages.${system}.default --priority 6 + nix profile add git+file://${flake2Dir}#packages.${system}.default --priority 6 EOF ) [[ $("$TEST_HOME"/.nix-profile/bin/hello) = "Hello World" ]] diff --git a/tests/functional/package.nix b/tests/functional/package.nix index b3b314a50d70..2c1146ec4e85 100644 --- a/tests/functional/package.nix +++ b/tests/functional/package.nix @@ -26,6 +26,9 @@ # For running the functional tests against a different pre-built Nix. test-daemon ? null, + + # Whether to run tests with lazy trees enabled. + lazyTrees ? false, }: let @@ -95,6 +98,8 @@ mkMesonDerivation ( mkdir $out ''; + _NIX_TEST_EXTRA_CONFIG = lib.optionalString lazyTrees "lazy-trees = true"; + meta = { platforms = lib.platforms.unix; }; diff --git a/tests/functional/path-info.sh b/tests/functional/path-info.sh index 712b5267878c..3d6041914ed6 100755 --- a/tests/functional/path-info.sh +++ b/tests/functional/path-info.sh @@ -13,7 +13,7 @@ barBase=$(basename "$bar") echo baz > "$TEST_ROOT"/baz baz=$(nix store add-file "$TEST_ROOT"/baz) bazBase=$(basename "$baz") -nix-store --delete "$baz" +nix-store --delete --ignore-liveness "$baz" diff --unified --color=always \ <(nix path-info --json --json-format 2 "$foo" "$bar" "$baz" | diff --git a/tests/functional/recursive.nix b/tests/functional/recursive.nix index be9e55da37ec..aa2aa26c5494 100644 --- a/tests/functional/recursive.nix +++ b/tests/functional/recursive.nix @@ -17,7 +17,7 @@ mkDerivation rec { buildCommand = '' mkdir $out - opts="--experimental-features nix-command ${ + opts="${ if (NIX_TESTS_CA_BY_DEFAULT == "1") then "--extra-experimental-features ca-derivations" else "" }" diff --git a/tests/functional/recursive.sh b/tests/functional/recursive.sh index 9115aa77583d..16c3fdab4df0 100755 --- a/tests/functional/recursive.sh +++ b/tests/functional/recursive.sh @@ -14,7 +14,7 @@ rm -f "$TEST_ROOT"/result unreachable=$(nix store add-path ./recursive.sh) export unreachable -NIX_BIN_DIR=$(dirname "$(type -p nix)") nix --extra-experimental-features 'nix-command recursive-nix' build -o "$TEST_ROOT"/result -L --impure --file ./recursive.nix +NIX_BIN_DIR=$(dirname "$(type -p nix)") nix --extra-experimental-features 'recursive-nix' build -o "$TEST_ROOT"/result -L --impure --file ./recursive.nix [[ $(cat "$TEST_ROOT"/result/inner1) =~ blaat ]] diff --git a/tests/functional/repl.sh b/tests/functional/repl.sh index 7023f2b8a0d7..0e84a3d14388 100755 --- a/tests/functional/repl.sh +++ b/tests/functional/repl.sh @@ -162,7 +162,7 @@ EOF testReplResponse ' foo + baz ' "3" \ - ./flake ./flake\#bar --experimental-features 'flakes' + ./flake ./flake\#bar testReplResponse $' :a { a = 1; b = 2; longerName = 3; "with spaces" = 4; } @@ -197,7 +197,7 @@ testReplResponseNoRegex $' # - Check that the result has changed mkfifo repl_fifo touch repl_output -nix repl ./flake --experimental-features 'flakes' < repl_fifo >> repl_output 2>&1 & +nix repl ./flake < repl_fifo >> repl_output 2>&1 & repl_pid=$! exec 3>repl_fifo # Open fifo for writing echo "changingThing" >&3 @@ -321,7 +321,7 @@ import $testDir/lang/parse-fail-eof-pos.nix badDiff=0 badExitCode=0 -nixVersion="$(nix eval --impure --raw --expr 'builtins.nixVersion' --extra-experimental-features nix-command)" +nixVersion="$(nix --version | sed 's/nix //')" # TODO: write a repl interacter for testing. Papering over the differences between readline / editline and between platforms is a pain. diff --git a/tests/functional/simple.nix b/tests/functional/simple.nix index 2035ca294cce..bd8b234852d2 100644 --- a/tests/functional/simple.nix +++ b/tests/functional/simple.nix @@ -3,7 +3,23 @@ with import ./config.nix; mkDerivation { name = "simple"; builder = ./simple.builder.sh; + _builder = ./simple.builder.sh; PATH = ""; goodPath = path; - meta.position = "${__curPos.file}:${toString __curPos.line}"; + meta = { + position = "${__curPos.file}:${toString __curPos.line}"; + license = [ + # Since this file is from Nix, use Nix's license. + # Keep in sync with `lib.licenses.lgpl21` from Nixpkgs. + { + deprecated = true; + free = true; + fullName = "GNU Lesser General Public License v2.1"; + redistributable = true; + shortName = "lgpl21"; + spdxId = "LGPL-2.1"; + url = "https://spdx.org/licenses/LGPL-2.1.html"; + } + ]; + }; } diff --git a/tests/functional/simple.sh b/tests/functional/simple.sh index c1f2eef411ee..e54ad860ca97 100755 --- a/tests/functional/simple.sh +++ b/tests/functional/simple.sh @@ -21,7 +21,7 @@ TODO_NixOS # Directed delete: $outPath is not reachable from a root, so it should # be deleteable. -nix-store --delete "$outPath" +nix-store --delete "$outPath" --ignore-liveness [[ ! -e $outPath/hello ]] outPath="$(NIX_REMOTE='local?store=/foo&real='"$TEST_ROOT"'/real-store' nix-instantiate --readonly-mode hash-check.nix)" diff --git a/tests/functional/store-info.sh b/tests/functional/store-info.sh index adaee5dfecfc..ee896929ae3d 100755 --- a/tests/functional/store-info.sh +++ b/tests/functional/store-info.sh @@ -65,7 +65,7 @@ check_human_readable "$STORE_INFO" check_human_readable "$LEGACY_STORE_INFO" if [[ -v NIX_DAEMON_PACKAGE ]] && isDaemonNewer "2.7.0pre20220126"; then - DAEMON_VERSION=$("$NIX_DAEMON_PACKAGE"/bin/nix daemon --version | cut -d' ' -f3) + DAEMON_VERSION=$("$NIX_DAEMON_PACKAGE"/bin/nix daemon --version | sed 's/.*) //') echo "$STORE_INFO" | grep "Version: $DAEMON_VERSION" [[ "$(echo "$STORE_INFO_JSON" | jq -r ".version")" == "$DAEMON_VERSION" ]] fi diff --git a/tests/functional/tarball.sh b/tests/functional/tarball.sh index 6b09cf6a5ced..451ee879a5b3 100755 --- a/tests/functional/tarball.sh +++ b/tests/functional/tarball.sh @@ -38,6 +38,9 @@ test_tarball() { [[ $(nix eval --impure --expr "(fetchTree file://$tarball).lastModified") = 1000000000 ]] + # fetchTree with a narHash is implicitly final, so it doesn't return attributes like lastModified. + [[ $(nix eval --impure --expr "(fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"$hash\"; }) ? lastModified") = false ]] + nix-instantiate --strict --eval -E "!((import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"$hash\"; })) ? submodules)" >&2 nix-instantiate --strict --eval -E "!((import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"$hash\"; })) ? submodules)" 2>&1 | grep 'true' diff --git a/tests/installer/default.nix b/tests/installer/default.nix index d48537dd0d07..dc831cc97b1b 100644 --- a/tests/installer/default.nix +++ b/tests/installer/default.nix @@ -232,7 +232,7 @@ let source /etc/bashrc || true nix-env --version - nix --extra-experimental-features nix-command store info + nix store info out=\$(nix-build --no-substitute -E 'derivation { name = "foo"; system = "x86_64-linux"; builder = "/bin/sh"; args = ["-c" "echo foobar > \$out"]; }') [[ \$(cat \$out) = foobar ]] diff --git a/tests/nixos/authorization.nix b/tests/nixos/authorization.nix index 6540e9fa3379..944e59259253 100644 --- a/tests/nixos/authorization.nix +++ b/tests/nixos/authorization.nix @@ -13,8 +13,6 @@ users.users.alice.isNormalUser = true; users.users.bob.isNormalUser = true; users.users.mallory.isNormalUser = true; - - nix.settings.experimental-features = "nix-command"; }; testScript = diff --git a/tests/nixos/cgroups/default.nix b/tests/nixos/cgroups/default.nix index a6b4bca8c76b..4161aba2ca2f 100644 --- a/tests/nixos/cgroups/default.nix +++ b/tests/nixos/cgroups/default.nix @@ -9,7 +9,7 @@ { virtualisation.additionalPaths = [ pkgs.stdenvNoCC ]; nix.extraOptions = '' - extra-experimental-features = nix-command auto-allocate-uids cgroups + extra-experimental-features = auto-allocate-uids cgroups extra-system-features = uid-range ''; nix.settings.use-cgroups = true; diff --git a/tests/nixos/chroot-store.nix b/tests/nixos/chroot-store.nix index 0a4fff99222b..ecac371e1521 100644 --- a/tests/nixos/chroot-store.nix +++ b/tests/nixos/chroot-store.nix @@ -25,7 +25,6 @@ in virtualisation.writableStore = true; virtualisation.additionalPaths = [ pkgA ]; environment.systemPackages = [ pkgB ]; - nix.extraOptions = "experimental-features = nix-command"; }; }; diff --git a/tests/nixos/containers/containers.nix b/tests/nixos/containers/containers.nix index b590dc8498f7..8d07c80b6a3c 100644 --- a/tests/nixos/containers/containers.nix +++ b/tests/nixos/containers/containers.nix @@ -23,7 +23,7 @@ virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; nix.extraOptions = '' - extra-experimental-features = nix-command auto-allocate-uids cgroups + extra-experimental-features = auto-allocate-uids cgroups extra-system-features = uid-range ''; nix.nixPath = [ "nixpkgs=${nixpkgs}" ]; diff --git a/tests/nixos/content-encoding.nix b/tests/nixos/content-encoding.nix index debee377bdf2..1e188cb060b7 100644 --- a/tests/nixos/content-encoding.nix +++ b/tests/nixos/content-encoding.nix @@ -131,6 +131,7 @@ in start_all() machine.wait_for_unit("nginx.service") + machine.wait_for_open_port(80) # Original test: zstd archive with gzip content-encoding # Make sure that the file is properly compressed as the test would be meaningless otherwise diff --git a/tests/nixos/fetch-git/test-cases/build-time-fetch-tree/default.nix b/tests/nixos/fetch-git/test-cases/build-time-fetch-tree/default.nix new file mode 100644 index 000000000000..a241c877d21e --- /dev/null +++ b/tests/nixos/fetch-git/test-cases/build-time-fetch-tree/default.nix @@ -0,0 +1,49 @@ +{ config, ... }: +{ + description = "build-time fetching"; + script = '' + import json + + # add a file to the repo + client.succeed(f""" + echo ${config.name # to make the git tree and store path unique + } > {repo.path}/test-case \ + && echo chiang-mai > {repo.path}/thailand \ + && {repo.git} add test-case thailand \ + && {repo.git} commit -m 'commit1' \ + && {repo.git} push origin main + """) + + # get the NAR hash + nar_hash = json.loads(client.succeed(f""" + nix flake prefetch --flake-registry "" git+{repo.remote} --json + """))['hash'] + + # construct the derivation + expr = f""" + derivation {{ + name = "source"; + builder = "builtin:fetch-tree"; + system = "builtin"; + __structuredAttrs = true; + input = {{ + type = "git"; + url = "{repo.remote}"; + ref = "main"; + }}; + outputHashMode = "recursive"; + outputHash = "{nar_hash}"; + }} + """ + + # do the build-time fetch + out_path = client.succeed(f""" + nix build --print-out-paths --store /run/store --flake-registry "" --extra-experimental-features build-time-fetch-tree --expr '{expr}' + """).strip() + + # check if the committed file is there + client.succeed(f""" + test -f /run/store/{out_path}/thailand + """) + ''; +} diff --git a/tests/nixos/fetch-git/test-cases/fetchTree-shallow/default.nix b/tests/nixos/fetch-git/test-cases/fetchTree-shallow/default.nix index f635df1f8793..a204caedd578 100644 --- a/tests/nixos/fetch-git/test-cases/fetchTree-shallow/default.nix +++ b/tests/nixos/fetch-git/test-cases/fetchTree-shallow/default.nix @@ -1,5 +1,5 @@ { - description = "fetchTree fetches git repos shallowly by default"; + description = "fetchTree fetches git repos shallowly if possible"; script = '' # purge nix git cache to make sure we start with a clean slate client.succeed("rm -rf ~/.cache/nix") @@ -28,6 +28,7 @@ type = "git"; url = "{repo.remote}"; rev = "{commit2_rev}"; + revCount = 1234; }} """ diff --git a/tests/nixos/fetch-git/testsupport/setup.nix b/tests/nixos/fetch-git/testsupport/setup.nix index c13386c72230..3c9f4bddea12 100644 --- a/tests/nixos/fetch-git/testsupport/setup.nix +++ b/tests/nixos/fetch-git/testsupport/setup.nix @@ -81,10 +81,6 @@ in environment.variables = { _NIX_FORCE_HTTP = "1"; }; - nix.settings.experimental-features = [ - "nix-command" - "flakes" - ]; }; setupScript = ''''; testScript = '' diff --git a/tests/nixos/fetchers-substitute.nix b/tests/nixos/fetchers-substitute.nix index bfe15c5c36e7..a26748dca658 100644 --- a/tests/nixos/fetchers-substitute.nix +++ b/tests/nixos/fetchers-substitute.nix @@ -150,28 +150,5 @@ content = importer.succeed(f"cat {result_path}/hello.txt").strip() assert content == "Hello from tarball!", f"Content mismatch: {content}" print("✓ fetchTarball content verified!") - - ########################################## - # Test 3: Verify fetchTree does NOT substitute (preserves metadata) - ########################################## - - print("Testing that fetchTree without __final does NOT use substitution...") - - # fetchTree with just narHash (not __final) should try to download, which will fail - # since the file doesn't exist on the importer - exit_code = importer.fail(f""" - nix-instantiate --eval --json --read-write-mode --expr ' - builtins.fetchTree {{ - type = "tarball"; - url = "file:///only-on-substituter.tar.gz"; - narHash = "{tarball_hash_sri}"; - }} - ' 2>&1 - """) - - # Should fail with "does not exist" since it tries to download instead of substituting - assert "does not exist" in exit_code or "Couldn't open file" in exit_code, f"Expected download failure, got: {exit_code}" - print("✓ fetchTree correctly does NOT substitute non-final inputs!") - print(" (This preserves metadata like lastModified from the actual fetch)") ''; } diff --git a/tests/nixos/fetchurl.nix b/tests/nixos/fetchurl.nix index e8663debbcd4..d75cc2017de2 100644 --- a/tests/nixos/fetchurl.nix +++ b/tests/nixos/fetchurl.nix @@ -64,8 +64,6 @@ in ]; virtualisation.writableStore = true; - - nix.settings.experimental-features = "nix-command"; }; }; diff --git a/tests/nixos/fsync.nix b/tests/nixos/fsync.nix index e215e5b3c25c..50105f1ccd98 100644 --- a/tests/nixos/fsync.nix +++ b/tests/nixos/fsync.nix @@ -23,7 +23,6 @@ in { virtualisation.emptyDiskImages = [ 1024 ]; environment.systemPackages = [ pkg1 ]; - nix.settings.experimental-features = [ "nix-command" ]; nix.settings.fsync-store-paths = true; nix.settings.require-sigs = false; boot.supportedFilesystems = [ diff --git a/tests/nixos/functional/common.nix b/tests/nixos/functional/common.nix index 4d32b7573245..72b7b61d12c7 100644 --- a/tests/nixos/functional/common.nix +++ b/tests/nixos/functional/common.nix @@ -24,6 +24,7 @@ in ]; nix.settings.substituters = lib.mkForce [ ]; + systemd.services.nix-daemon.environment._NIX_IN_TEST = "1"; environment.systemPackages = let diff --git a/tests/nixos/git-submodules.nix b/tests/nixos/git-submodules.nix index c6f53ada2dc7..9105eb79bd7c 100644 --- a/tests/nixos/git-submodules.nix +++ b/tests/nixos/git-submodules.nix @@ -24,7 +24,6 @@ { programs.ssh.extraConfig = "ConnectTimeout 30"; environment.systemPackages = [ pkgs.git ]; - nix.extraOptions = "experimental-features = nix-command flakes"; }; }; diff --git a/tests/nixos/github-flakes.nix b/tests/nixos/github-flakes.nix index d14cd9d0c75d..3a72c669162e 100644 --- a/tests/nixos/github-flakes.nix +++ b/tests/nixos/github-flakes.nix @@ -17,7 +17,7 @@ let openssl req -newkey rsa:2048 -nodes -keyout $out/server.key \ -subj "/C=CN/ST=Denial/L=Springfield/O=Dis/CN=github.com" -out server.csr - openssl x509 -req -extfile <(printf "subjectAltName=DNS:api.github.com,DNS:github.com,DNS:channels.nixos.org") \ + openssl x509 -req -extfile <(printf "subjectAltName=DNS:api.github.com,DNS:github.com,DNS:channels.nixos.org,DNS:install.determinate.systems") \ -days 36500 -in server.csr -CA $out/ca.crt -CAkey ca.key -CAcreateserial -out $out/server.crt ''; @@ -107,13 +107,13 @@ in services.httpd.extraConfig = '' ErrorLog syslog:local6 ''; - services.httpd.virtualHosts."channels.nixos.org" = { + services.httpd.virtualHosts."install.determinate.systems" = { forceSSL = true; sslServerKey = "${cert}/server.key"; sslServerCert = "${cert}/server.crt"; servedDirs = [ { - urlPath = "/"; + urlPath = "/flake-registry/stable/"; dir = registry; } ]; @@ -163,9 +163,9 @@ in ]; virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; - nix.extraOptions = "experimental-features = nix-command flakes"; networking.hosts.${(builtins.head nodes.github.networking.interfaces.eth1.ipv4.addresses).address} = [ + "install.determinate.systems" "channels.nixos.org" "api.github.com" "github.com" @@ -204,14 +204,53 @@ in assert info["revision"] == "${nixpkgs.rev}", f"revision mismatch: {info['revision']} != ${nixpkgs.rev}" cat_log() + out = client.succeed("nix flake prefetch nixpkgs --json") + nar_hash = json.loads(out)['hash'] + + # Test build-time fetching of public flakes. + expr = f""" + derivation {{ + name = "source"; + builder = "builtin:fetch-tree"; + system = "builtin"; + __structuredAttrs = true; + input = {{ + type = "github"; + owner = "NixOS"; + repo = "nixpkgs"; + }}; + outputHashMode = "recursive"; + outputHash = "{nar_hash}"; + }} + """ + client.succeed(f"nix build --store /run/store --extra-experimental-features build-time-fetch-tree -L --expr '{expr}'") + # ... otherwise it should use the API - out = client.succeed("nix flake metadata private-flake --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0") + out = client.succeed("nix flake metadata private-flake --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0 --no-trust-tarballs-from-git-forges") print(out) info = json.loads(out) assert info["revision"] == "${private-flake-rev}", f"revision mismatch: {info['revision']} != ${private-flake-rev}" assert info["fingerprint"] cat_log() + # Test build-time fetching of private flakes. + expr = f""" + derivation {{ + name = "source"; + builder = "builtin:fetch-tree"; + system = "builtin"; + __structuredAttrs = true; + input = {{ + type = "github"; + owner = "fancy-enterprise"; + repo = "private-flake"; + }}; + outputHashMode = "recursive"; + outputHash = "{info['locked']['narHash']}"; + }} + """ + client.succeed(f"nix build --store /run/store --extra-experimental-features build-time-fetch-tree --access-tokens github.com=ghp_000000000000000000000000000000000000 -L --expr '{expr}'") + # Fetching with the resolved URL should produce the same result. info2 = json.loads(client.succeed(f"nix flake metadata {info['url']} --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0")) print(info["fingerprint"], info2["fingerprint"]) @@ -225,6 +264,10 @@ in hash = client.succeed(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr '(fetchTree {info['url']}).narHash'") assert hash == info['locked']['narHash'] + # Fetching with an incorrect NAR hash should fail. + out = client.fail(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr '(fetchTree \"github:fancy-enterprise/private-flake/{info['revision']}?narHash=sha256-HsrRFZYg69qaVe/wDyWBYLeS6ca7ACEJg2Z%2BGpEFw4A%3D\").narHash' 2>&1") + assert "mismatch in field 'narHash'" in out, "NAR hash check did not fail with the expected error" + # Fetching without a narHash should succeed if trust-github is set and fail otherwise. client.succeed(f"nix eval --raw --expr 'builtins.fetchTree github:github:fancy-enterprise/private-flake/{info['revision']}'") out = client.fail(f"nix eval --no-trust-tarballs-from-git-forges --raw --expr 'builtins.fetchTree github:github:fancy-enterprise/private-flake/{info['revision']}' 2>&1") diff --git a/tests/nixos/nix-copy.nix b/tests/nixos/nix-copy.nix index 64de622de760..a7f0a6a326f4 100644 --- a/tests/nixos/nix-copy.nix +++ b/tests/nixos/nix-copy.nix @@ -39,7 +39,6 @@ in pkgD.drvPath ]; nix.settings.substituters = lib.mkForce [ ]; - nix.settings.experimental-features = [ "nix-command" ]; services.getty.autologinUser = "root"; programs.ssh.extraConfig = '' Host * diff --git a/tests/nixos/s3-binary-cache-store.nix b/tests/nixos/s3-binary-cache-store.nix index 5804057487dc..8085d7b526f3 100644 --- a/tests/nixos/s3-binary-cache-store.nix +++ b/tests/nixos/s3-binary-cache-store.nix @@ -39,7 +39,6 @@ in environment.systemPackages = [ pkgs.minio-client ]; nix.nixPath = [ "nixpkgs=${pkgs.path}" ]; nix.extraOptions = '' - experimental-features = nix-command substituters = ''; services.minio = { @@ -48,9 +47,14 @@ in rootCredentialsFile = pkgs.writeText "minio-credentials-full" '' MINIO_ROOT_USER=${accessKey} MINIO_ROOT_PASSWORD=${secretKey} + MINIO_DOMAIN=minio.local ''; }; networking.firewall.allowedTCPPorts = [ 9000 ]; + # Static hosts for virtual-hosted-style S3 tests. + # MinIO with MINIO_DOMAIN=minio.local accepts virtual-hosted requests + # where the bucket name is a hostname prefix. + networking.extraHosts = "127.0.0.1 vhost-test.minio.local minio.local"; }; client = @@ -59,9 +63,9 @@ in virtualisation.writableStore = true; virtualisation.cores = 2; nix.extraOptions = '' - experimental-features = nix-command substituters = ''; + networking.extraHosts = "192.168.1.2 vhost-test.minio.local minio.local"; }; }; @@ -83,6 +87,10 @@ in ENDPOINT = 'http://server:9000' REGION = 'eu-west-1' + # Virtual-hosted-style configuration (requires MINIO_DOMAIN and static host entries) + VHOST_DOMAIN = 'minio.local' + VHOST_ENDPOINT = f'http://{VHOST_DOMAIN}:9000' + PKGS = { 'A': '${pkgA}', 'B': '${pkgB}', @@ -859,6 +867,92 @@ in print(output) raise Exception("Expected SSO provider to be skipped") + def test_virtual_hosted_copy(): + """Test nix copy with virtual-hosted-style addressing on custom endpoint""" + print("\n=== Testing Virtual-Hosted-Style Addressing ===") + + # Use a fixed bucket name matching the static /etc/hosts entries + bucket = 'vhost-test' + server.succeed(f"mc mb minio/{bucket}") + try: + store_url = make_s3_url( + bucket, + endpoint=VHOST_ENDPOINT, + **{'addressing-style': 'virtual'} + ) + + # Upload with virtual-hosted-style, capture debug output + output = server.succeed( + f"{ENV_WITH_CREDS} nix copy --debug --to '{store_url}' {PKGS['A']} 2>&1" + ) + + # Verify virtual-hosted-style URL was used (bucket in hostname) + vhost_url_prefix = f"http://{bucket}.{VHOST_DOMAIN}:9000/" + if vhost_url_prefix not in output: + print("Debug output:") + print(output) + raise Exception( + f"Expected virtual-hosted-style URL containing '{vhost_url_prefix}'" + ) + + # Verify path-style URL was NOT used (bucket should not be in the path) + path_style_pattern = f"{VHOST_ENDPOINT}/{bucket}/" + if path_style_pattern in output: + print("Debug output:") + print(output) + raise Exception("Found path-style URL when virtual-hosted-style was expected") + + # Download with virtual-hosted-style + verify_packages_in_store(client, PKGS['A'], should_exist=False) + output = client.succeed( + f"{ENV_WITH_CREDS} nix copy --debug --no-check-sigs " + f"--from '{store_url}' {PKGS['A']} 2>&1" + ) + + if vhost_url_prefix not in output: + print("Debug output:") + print(output) + raise Exception( + f"Expected virtual-hosted-style URL in download containing '{vhost_url_prefix}'" + ) + + verify_packages_in_store(client, PKGS['A']) + finally: + server.succeed(f"mc rb --force minio/{bucket}") + for pkg in PKGS.values(): + client.succeed(f"[ ! -e {pkg} ] || nix store delete --ignore-liveness {pkg}") + + @setup_s3() + def test_explicit_path_style(bucket): + """Test that addressing-style=path works as backwards-compatible fallback""" + print("\n=== Testing Explicit Path-Style Addressing ===") + + store_url = make_s3_url( + bucket, + **{'addressing-style': 'path'} + ) + + # Upload with explicit path-style + output = server.succeed( + f"{ENV_WITH_CREDS} nix copy --debug --to '{store_url}' {PKGS['A']} 2>&1" + ) + + # Verify path-style URL was used (bucket in path, not hostname) + path_style_pattern = f"{ENDPOINT}/{bucket}/" + if path_style_pattern not in output: + print("Debug output:") + print(output) + raise Exception( + f"Expected path-style URL containing '{path_style_pattern}'" + ) + + # Download + verify_packages_in_store(client, PKGS['A'], should_exist=False) + client.succeed( + f"{ENV_WITH_CREDS} nix copy --no-check-sigs --from '{store_url}' {PKGS['A']}" + ) + verify_packages_in_store(client, PKGS['A']) + # ============================================================================ # Main Test Execution # ============================================================================ @@ -896,5 +990,7 @@ in test_profile_credentials() test_env_vars_precedence() test_credential_provider_chain() + test_virtual_hosted_copy() + test_explicit_path_style() ''; } diff --git a/tests/nixos/sourcehut-flakes.nix b/tests/nixos/sourcehut-flakes.nix index 3f05130d6aab..5b40866d1fa8 100644 --- a/tests/nixos/sourcehut-flakes.nix +++ b/tests/nixos/sourcehut-flakes.nix @@ -119,7 +119,6 @@ in virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; nix.extraOptions = '' - experimental-features = nix-command flakes flake-registry = https://git.sr.ht/~NixOS/flake-registry/blob/master/flake-registry.json ''; environment.systemPackages = [ pkgs.jq ]; diff --git a/tests/nixos/tarball-flakes.nix b/tests/nixos/tarball-flakes.nix index 26c20cb1aef4..ab9b200db269 100644 --- a/tests/nixos/tarball-flakes.nix +++ b/tests/nixos/tarball-flakes.nix @@ -61,7 +61,6 @@ in ]; virtualisation.memorySize = 4096; nix.settings.substituters = lib.mkForce [ ]; - nix.extraOptions = "experimental-features = nix-command flakes"; }; }; @@ -99,7 +98,6 @@ in # Check that fetching fails if we provide incorrect attributes. machine.fail("nix flake metadata --json http://localhost/tags/latest.tar.gz?rev=493300eb13ae6fb387fbd47bf54a85915acc31c0") - machine.fail("nix flake metadata --json http://localhost/tags/latest.tar.gz?revCount=789") machine.fail("nix flake metadata --json http://localhost/tags/latest.tar.gz?narHash=sha256-tbudgBSg+bHWHiHnlteNzN8TUvI80ygS9IULh4rklEw=") ''; diff --git a/tests/repl-completion.nix b/tests/repl-completion.nix index 07406e969cd2..9ae37796bf57 100644 --- a/tests/repl-completion.nix +++ b/tests/repl-completion.nix @@ -15,7 +15,7 @@ runCommand "repl-completion" ]; expectScript = '' # Regression https://github.com/NixOS/nix/pull/10778 - spawn nix repl --offline --extra-experimental-features nix-command + spawn nix repl --offline expect "nix-repl>" send "foo = import ./does-not-exist.nix\n" expect "nix-repl>"