diff --git a/.commitlintrc.yml b/.commitlintrc.yml new file mode 100644 index 0000000..67ff005 --- /dev/null +++ b/.commitlintrc.yml @@ -0,0 +1,21 @@ +extends: + - '@commitlint/config-conventional' + +rules: + # Allow slightly longer subjects; we have descriptive messages. + header-max-length: [2, always, 100] + # Enforce lowercase type (feat, fix, ...) and allow common scopes. + type-enum: + - 2 + - always + - - build + - chore + - ci + - docs + - feat + - fix + - perf + - refactor + - revert + - style + - test diff --git a/.github/workflows/commitlint.yml b/.github/workflows/commitlint.yml new file mode 100644 index 0000000..a347bd3 --- /dev/null +++ b/.github/workflows/commitlint.yml @@ -0,0 +1,16 @@ +name: Commitlint + +on: + pull_request: + types: [opened, reopened, edited, synchronize] + +jobs: + commitlint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: wagoid/commitlint-github-action@v6 + with: + configFile: .commitlintrc.yml diff --git a/.github/workflows/publish-go.yml b/.github/workflows/publish-go.yml index 948cb33..fe48d2c 100644 --- a/.github/workflows/publish-go.yml +++ b/.github/workflows/publish-go.yml @@ -3,7 +3,7 @@ name: Publish Go bindings on: push: tags: - - "v*" + - "libveritas-v*" workflow_dispatch: permissions: @@ -100,7 +100,7 @@ jobs: - name: Setup Go module run: | - VERSION="${GITHUB_REF_NAME#v}" + VERSION="${GITHUB_REF_NAME#libveritas-v}" if [[ ! "$GITHUB_REF" == refs/tags/* ]]; then VERSION="0.0.0-dev.$(date +%Y%m%d%H%M%S)" fi @@ -116,7 +116,7 @@ jobs: env: GH_TOKEN: ${{ secrets.GO_PUBLISH_TOKEN }} run: | - VERSION="${GITHUB_REF_NAME}" + VERSION="v${GITHUB_REF_NAME#libveritas-v}" if [[ ! "$GITHUB_REF" == refs/tags/* ]]; then VERSION="v0.0.0-dev.$(date +%Y%m%d%H%M%S)" fi diff --git a/.github/workflows/publish-kotlin-android.yml b/.github/workflows/publish-kotlin-android.yml index 9bbe9e0..801b2c6 100644 --- a/.github/workflows/publish-kotlin-android.yml +++ b/.github/workflows/publish-kotlin-android.yml @@ -3,7 +3,7 @@ name: Publish Kotlin Android on: push: tags: - - "v*" + - "libveritas-v*" workflow_dispatch: permissions: @@ -57,7 +57,7 @@ jobs: - name: Set version from tag run: | - VERSION="${GITHUB_REF_NAME#v}" + VERSION="${GITHUB_REF_NAME#libveritas-v}" if [[ ! "$GITHUB_REF" == refs/tags/* ]]; then VERSION="0.0.0-dev.$(date +%Y%m%d%H%M%S)" fi diff --git a/.github/workflows/publish-kotlin-jvm.yml b/.github/workflows/publish-kotlin-jvm.yml index e0bcf77..49498f4 100644 --- a/.github/workflows/publish-kotlin-jvm.yml +++ b/.github/workflows/publish-kotlin-jvm.yml @@ -3,7 +3,7 @@ name: Publish Kotlin JVM on: push: tags: - - "v*" + - "libveritas-v*" workflow_dispatch: permissions: @@ -92,7 +92,7 @@ jobs: - name: Set version run: | - VERSION="${GITHUB_REF_NAME#v}" + VERSION="${GITHUB_REF_NAME#libveritas-v}" if [[ ! "$GITHUB_REF" == refs/tags/* ]]; then VERSION="0.0.0-dev.$(date +%Y%m%d%H%M%S)" fi diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index 8cb5a7c..85399b8 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -3,7 +3,7 @@ name: Publish npm (WASM) on: push: tags: - - "v*" + - "libveritas-v*" workflow_dispatch: permissions: @@ -40,7 +40,7 @@ jobs: - name: Set package version from tag run: | - VERSION="${GITHUB_REF_NAME#v}" + VERSION="${GITHUB_REF_NAME#libveritas-v}" if [[ ! "$GITHUB_REF" == refs/tags/* ]]; then VERSION="0.0.0-dev.$(date +%Y%m%d%H%M%S)" fi diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index 6a311cc..762fdc2 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -3,7 +3,7 @@ name: Publish PyPI (Python) on: push: tags: - - "v*" + - "libveritas-v*" workflow_dispatch: permissions: @@ -18,7 +18,7 @@ jobs: steps: - id: ver run: | - VERSION="${GITHUB_REF_NAME#v}" + VERSION="${GITHUB_REF_NAME#libveritas-v}" if [[ ! "$GITHUB_REF" == refs/tags/* ]]; then VERSION="0.0.0-dev.$(date +%Y%m%d%H%M%S)" fi diff --git a/.github/workflows/publish-react-native.yml b/.github/workflows/publish-react-native.yml index 1f0fad1..9b5768f 100644 --- a/.github/workflows/publish-react-native.yml +++ b/.github/workflows/publish-react-native.yml @@ -3,7 +3,7 @@ name: Publish React Native (npm) on: push: tags: - - "v*" + - "libveritas-v*" workflow_dispatch: permissions: @@ -52,7 +52,7 @@ jobs: - name: Set version from tag working-directory: bindings/react-native run: | - VERSION="${GITHUB_REF_NAME#v}" + VERSION="${GITHUB_REF_NAME#libveritas-v}" if [[ ! "$GITHUB_REF" == refs/tags/* ]]; then VERSION="0.0.0-dev.$(date +%Y%m%d%H%M%S)" fi diff --git a/.github/workflows/publish-swift.yml b/.github/workflows/publish-swift.yml index c8a9171..0f6f87d 100644 --- a/.github/workflows/publish-swift.yml +++ b/.github/workflows/publish-swift.yml @@ -3,7 +3,7 @@ name: Publish Swift (XCFramework) on: push: tags: - - "v*" + - "libveritas-v*" workflow_dispatch: permissions: @@ -82,7 +82,7 @@ jobs: echo "checksum=$CHECKSUM" >> "$GITHUB_OUTPUT" echo "XCFramework checksum: $CHECKSUM" - VERSION="${GITHUB_REF_NAME}" + VERSION="v${GITHUB_REF_NAME#libveritas-v}" if [[ ! "$GITHUB_REF" == refs/tags/* ]]; then VERSION="v0.0.0-dev.$(date +%Y%m%d%H%M%S)" fi diff --git a/.github/workflows/release-plz.yml b/.github/workflows/release-plz.yml new file mode 100644 index 0000000..e467cdb --- /dev/null +++ b/.github/workflows/release-plz.yml @@ -0,0 +1,49 @@ +name: Release-plz + +permissions: + pull-requests: write + contents: write + +on: + push: + branches: [main] + +jobs: + # Opens / updates the "release PR" that bumps versions and edits the CHANGELOG. + release-plz-pr: + name: Release-plz PR + runs-on: ubuntu-latest + if: ${{ github.repository_owner == 'spacesprotocol' }} + concurrency: + group: release-plz-${{ github.ref }} + cancel-in-progress: false + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: dtolnay/rust-toolchain@stable + - name: Run release-plz + uses: release-plz/action@v0.5 + with: + command: release-pr + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} + + # Tags + publishes to crates.io when a release commit lands on main. + release-plz-release: + name: Release-plz publish + runs-on: ubuntu-latest + if: ${{ github.repository_owner == 'spacesprotocol' }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: dtolnay/rust-toolchain@stable + - name: Run release-plz + uses: release-plz/action@v0.5 + with: + command: release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b88aa90..2dc3d32 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -27,6 +27,29 @@ jobs: - name: Run tests run: cargo test -p libveritas + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt, clippy + - uses: Swatinem/rust-cache@v2 + - run: cargo fmt --all -- --check + - run: cargo clippy --workspace --exclude libveritas_methods --all-targets --all-features -- -D warnings + + docs: + name: Docs + runs-on: ubuntu-latest + env: + RUSTDOCFLAGS: -D warnings + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + - run: cargo doc --no-deps --all-features -p libveritas -p libveritas_zk + verify-elfs: runs-on: ubuntu-latest steps: diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..9e0e2bb --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,6 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..b1cc5c8 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,45 @@ +# Contributing to libveritas + +Everyone is welcome to contribute towards development in the form of peer review, testing, and patches. This document explains the practical process and guidelines. + +## Getting started + +Reviewing and testing is highly valued and the most effective way to contribute as a new contributor. It also teaches you much more about the code and process than opening pull requests. + +### Good First Issue Label + +The purpose of the good-first-issue label is to highlight issues suitable for new contributors without a deep understanding of the codebase. + +You do not need to request permission to start working on an issue. However, it's helpful to leave a comment if you are planning to work on one — it helps other contributors track which issues are actively being addressed and is also a good way to request assistance. + +## Communication channels + +You can join the [Spaces telegram](https://t.me/spacesprotocol). + +Discussion about codebase improvements happens in GitHub issues and pull requests. + +## Contributor workflow + +The codebase is maintained using the "contributor workflow" where everyone contributes patch proposals using pull requests. + +To contribute a patch: + +1. Fork the repository (only the first time) +2. Create a topic branch +3. Commit patches using [conventional commits](https://www.conventionalcommits.org/) — this is enforced by CI and drives the changelog and release versioning. Examples: `feat: add lookup helper`, `fix(builder): handle empty record sets`, `docs: clarify SIG record semantics`. + +## Squashing commits + +If your pull request contains fixup commits or too fine-grained commits, squash them before review. See [how to write good commit messages](https://cbea.ms/git-commit/). + +## Pull request philosophy + +Keep patchsets focused: a PR should add a feature, fix a bug, or refactor code — not a mixture. Avoid super pull requests that try to do too much. + +## Releases + +Releases are automated via [release-plz](https://release-plz.dev/). When commits land on `main`, a release PR is opened automatically with version bumps and changelog entries derived from your conventional commits. Merging that PR tags the release and publishes to crates.io. + +## Copyright + +By contributing to this repository, you agree to license your work under the Apache-2.0 license. Any work contributed where you are not the original author must contain its license header with the original author(s) and source. \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index b7500af..8a87d50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -493,7 +493,8 @@ dependencies = [ [[package]] name = "borsh_utils" version = "0.1.0" -source = "git+https://github.com/spacesprotocol/spaces.git?branch=subspaces#12adf5f9f28d1d1174c3bee012516df8857613c7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c0cd2efcc60ce44ca958baa626593a23006b5418b1fa8f038c23aa85f7a97f" dependencies = [ "bitcoin", "borsh", @@ -1401,7 +1402,7 @@ dependencies = [ [[package]] name = "libveritas_methods" -version = "0.1.2" +version = "0.1.0" dependencies = [ "risc0-build", ] @@ -2384,7 +2385,8 @@ dependencies = [ [[package]] name = "sip7" version = "0.1.0" -source = "git+https://github.com/spacesprotocol/spaces.git?branch=subspaces#12adf5f9f28d1d1174c3bee012516df8857613c7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b684197991554bec0801fb44663974608d59ac6df4f95d2d84a3955e82499a85" dependencies = [ "base64 0.22.1", "hex", @@ -2418,8 +2420,9 @@ checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" [[package]] name = "spacedb" -version = "0.0.12" -source = "git+https://github.com/spacesprotocol/spacedb.git#43f23e78e4e5fffb8d89661a4b7f39ab43a5a644" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a48cda82e951391df9d0a54c96f8b04e117ff39abad5359b181cb71c80798d77" dependencies = [ "borsh", "sha2", @@ -2428,7 +2431,8 @@ dependencies = [ [[package]] name = "spaces_nums" version = "0.1.0" -source = "git+https://github.com/spacesprotocol/spaces.git?branch=subspaces#12adf5f9f28d1d1174c3bee012516df8857613c7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2015555e35781fc95ac0eb8e50bef6e70929248ea61d4101b195b4d0abb13e1d" dependencies = [ "bech32", "bitcoin", @@ -2442,8 +2446,9 @@ dependencies = [ [[package]] name = "spaces_protocol" -version = "0.0.7" -source = "git+https://github.com/spacesprotocol/spaces.git?branch=subspaces#12adf5f9f28d1d1174c3bee012516df8857613c7" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f46b7eb14311193304301a394a30e5f7bdfd7fd9f70a5c8113610bf2b5cc2c" dependencies = [ "bitcoin", "borsh", diff --git a/Cargo.toml b/Cargo.toml index 6b1ee7b..3d85170 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,12 +2,26 @@ resolver = "2" members = ["methods", "zk", "veritas", "testutil", "bindings/wasm", "bindings/uniffi", "bindings/python"] +[workspace.package] +version = "0.1.0" +edition = "2024" +rust-version = "1.85" +license = "Apache-2.0" +repository = "https://github.com/spacesprotocol/libveritas" +homepage = "https://spacesprotocol.org" +authors = ["Buffrr "] + [workspace.dependencies] -spaces_protocol = { git = "https://github.com/spacesprotocol/spaces.git", branch = "subspaces", features = ["std"] } -spaces_nums = { git = "https://github.com/spacesprotocol/spaces.git", branch = "subspaces", features = ["std"] } -sip7 = { git = "https://github.com/spacesprotocol/spaces.git", branch = "subspaces", features = ["serde", "std"] } -borsh_utils = { git = "https://github.com/spacesprotocol/spaces.git", branch = "subspaces" } -spacedb = { git = "https://github.com/spacesprotocol/spacedb.git", default-features = false, features = ["extras"] } +# Internal crates +libveritas = { path = "veritas", version = "0.1.0" } +libveritas_zk = { path = "zk", version = "0.1.0" } + +# External +spaces_protocol = { version = "0.1", features = ["std"] } +spaces_nums = { version = "0.1", features = ["std"] } +sip7 = { version = "0.1", features = ["serde", "std"] } +borsh_utils = { version = "0.1" } +spacedb = { version = "0.1", default-features = false, features = ["extras"] } # Always optimize; building and running the guest takes much longer without optimization. [profile.dev] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f49a4e1 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/README.md b/README.md index 82e76bb..30c261f 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,9 @@ # Libveritas +[![Crates.io](https://img.shields.io/crates/v/libveritas.svg)](https://crates.io/crates/libveritas) +[![Docs.rs](https://docs.rs/libveritas/badge.svg)](https://docs.rs/libveritas) +[![License](https://img.shields.io/badge/license-Apache--2.0-blue.svg)](LICENSE) + Stateless verification for Bitcoin handles using the [Spaces protocol](https://spacesprotocol.org). Similar to [BIP-353](https://en.bitcoin.it/wiki/BIP_0353), but replaces centralized ICANN signing keys with a permissionless trust anchor. @@ -8,9 +12,8 @@ Similar to [BIP-353](https://en.bitcoin.it/wiki/BIP_0353), but replaces centrali ### Rust -```toml -[dependencies] -libveritas = { git = "https://github.com/spacesprotocol/libveritas.git" } +```bash +cargo add libveritas ``` ### JavaScript / Node.js diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..e00384f --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,21 @@ +# Security Policy + +## Supported Versions + +Latest published `0.1.x` is supported. + +## Reporting a Vulnerability + +To report security issues, contact one of the maintainers below. + +| Maintainer | Email | Fingerprint | +|------------|----------------------|---------------------------------------------------| +| buffrr | contact@buffrr.dev | 5E18 8EC1 571D 32AC F1B8 85CC 12B0 037C E1D4 54E5 | + +The keys may be used to communicate sensitive information to developers. You can import a key by running the following command with the maintainer's fingerprint: + +```bash +gpg --keyserver hkps://keys.openpgp.org --recv-keys "" +``` + +Ensure you put quotes around fingerprints containing spaces. \ No newline at end of file diff --git a/bindings/python/Cargo.toml b/bindings/python/Cargo.toml index 1473efd..ae316ac 100644 --- a/bindings/python/Cargo.toml +++ b/bindings/python/Cargo.toml @@ -1,7 +1,9 @@ [package] name = "libveritas-python" -version = "0.1.0" -edition = "2024" +version.workspace = true +edition.workspace = true +authors.workspace = true +publish = false [lib] crate-type = ["cdylib"] diff --git a/bindings/uniffi/Cargo.toml b/bindings/uniffi/Cargo.toml index c586661..eee6a0d 100644 --- a/bindings/uniffi/Cargo.toml +++ b/bindings/uniffi/Cargo.toml @@ -1,14 +1,16 @@ [package] name = "libveritas-uniffi" -version = "0.1.0" -edition = "2024" +version.workspace = true +edition.workspace = true +authors.workspace = true +publish = false [lib] crate-type = ["cdylib", "staticlib", "lib"] name = "libveritas_uniffi" [dependencies] -libveritas = { path = "../../veritas" } +libveritas = { workspace = true } sip7 = { workspace = true } serde = { version = "1.0" } serde_json = "1.0" diff --git a/bindings/uniffi/src/lib.rs b/bindings/uniffi/src/lib.rs index b8329e0..cde06a9 100644 --- a/bindings/uniffi/src/lib.rs +++ b/bindings/uniffi/src/lib.rs @@ -2,11 +2,11 @@ use std::sync::{Arc, RwLock}; use libveritas::builder; use libveritas::msg; -use spaces_protocol::sname::SName; use spaces_nums::RootAnchor; use spaces_nums::num_id::NumId; use spaces_protocol::bitcoin::ScriptBuf; use spaces_protocol::slabel::SLabel; +use spaces_protocol::sname::SName; use std::str::FromStr; uniffi::setup_scaffolding!(); @@ -25,7 +25,11 @@ pub enum VeritasError { #[derive(uniffi::Enum)] pub enum DelegateState { - Exists { script_pubkey: Vec, fallback_records: Vec, records: Vec }, + Exists { + script_pubkey: Vec, + fallback_records: Vec, + records: Vec, + }, Empty, Unknown, } @@ -69,15 +73,18 @@ fn trust_set_from_inner(ts: &libveritas::TrustSet) -> TrustSet { } fn parse_data_update(entry: &DataUpdateEntry) -> Result { - let handle = SName::from_str(&entry.name) - .map_err(|e| VeritasError::InvalidInput { - msg: format!("invalid name '{}': {}", entry.name, e), - })?; + let handle = SName::from_str(&entry.name).map_err(|e| VeritasError::InvalidInput { + msg: format!("invalid name '{}': {}", entry.name, e), + })?; - let records = entry.records.as_ref() + let records = entry + .records + .as_ref() .map(|b| sip7::RecordSet::new(b.clone())); - let delegate_records = entry.delegate_records.as_ref() + let delegate_records = entry + .delegate_records + .as_ref() .map(|b| sip7::RecordSet::new(b.clone())); Ok(builder::DataUpdateRequest { @@ -87,7 +94,9 @@ fn parse_data_update(entry: &DataUpdateEntry) -> Result Result, VeritasError> { +fn parse_data_updates( + entries: &[DataUpdateEntry], +) -> Result, VeritasError> { entries.iter().map(parse_data_update).collect() } @@ -149,33 +158,45 @@ fn zone_to_inner(z: &Zone) -> Result { let canonical = SName::from_str(&z.canonical).map_err(|e| VeritasError::InvalidInput { msg: format!("invalid canonical: {e}"), })?; - let alias = z.alias.as_ref() + let alias = z + .alias + .as_ref() .map(|a| SLabel::from_str_unprefixed(a)) .transpose() .map_err(|e| VeritasError::InvalidInput { msg: format!("invalid alias: {e}"), })?; - let num_id = z.num_id.as_ref() + let num_id = z + .num_id + .as_ref() .map(|n| NumId::from_str(n)) .transpose() .map_err(|e| VeritasError::InvalidInput { msg: format!("invalid num_id: {e}"), })?; let delegate = match &z.delegate { - DelegateState::Exists { script_pubkey, fallback_records, records } => { - libveritas::ProvableOption::Exists { - value: libveritas::Delegate { - script_pubkey: ScriptBuf::from_bytes(script_pubkey.clone()), - fallback_records: sip7::RecordSet::new(fallback_records.clone()), - records: sip7::RecordSet::new(records.clone()), - }, - } - } + DelegateState::Exists { + script_pubkey, + fallback_records, + records, + } => libveritas::ProvableOption::Exists { + value: libveritas::Delegate { + script_pubkey: ScriptBuf::from_bytes(script_pubkey.clone()), + fallback_records: sip7::RecordSet::new(fallback_records.clone()), + records: sip7::RecordSet::new(records.clone()), + }, + }, DelegateState::Empty => libveritas::ProvableOption::Empty, DelegateState::Unknown => libveritas::ProvableOption::Unknown, }; let commitment = match &z.commitment { - CommitmentState::Exists { state_root, prev_root, rolling_hash, block_height, receipt_hash } => { + CommitmentState::Exists { + state_root, + prev_root, + rolling_hash, + block_height, + receipt_hash, + } => { let mut sr = [0u8; 32]; sr.copy_from_slice(state_root); let mut rh = [0u8; 32]; @@ -230,6 +251,12 @@ pub struct QueryContext { inner: RwLock, } +impl Default for QueryContext { + fn default() -> Self { + Self::new() + } +} + #[uniffi::export] impl QueryContext { #[uniffi::constructor] @@ -242,19 +269,19 @@ impl QueryContext { /// Add a handle to verify (e.g. "alice@bitcoin"). /// If no requests are added, all handles in the message are verified. pub fn add_request(&self, handle: String) -> Result<(), VeritasError> { - let sname = SName::from_str(&handle) - .map_err(|e| VeritasError::InvalidInput { - msg: format!("invalid handle: {e}"), - })?; + let sname = SName::from_str(&handle).map_err(|e| VeritasError::InvalidInput { + msg: format!("invalid handle: {e}"), + })?; self.inner.write().unwrap().add_request(sname); Ok(()) } /// Add a known zone from stored bytes (from a previous verification). pub fn add_zone(&self, zone_bytes: Vec) -> Result<(), VeritasError> { - let zone = libveritas::Zone::from_slice(&zone_bytes).map_err(|e| VeritasError::InvalidInput { - msg: format!("invalid zone: {e}"), - })?; + let zone = + libveritas::Zone::from_slice(&zone_bytes).map_err(|e| VeritasError::InvalidInput { + msg: format!("invalid zone: {e}"), + })?; self.inner.write().unwrap().add_zone(zone); Ok(()) } @@ -270,11 +297,12 @@ impl Message { /// Decode a message from bytes. #[uniffi::constructor] pub fn new(bytes: Vec) -> Result { - let inner = msg::Message::from_slice(&bytes) - .map_err(|e| VeritasError::InvalidInput { - msg: format!("invalid message: {e}"), - })?; - Ok(Message { inner: RwLock::new(inner) }) + let inner = msg::Message::from_slice(&bytes).map_err(|e| VeritasError::InvalidInput { + msg: format!("invalid message: {e}"), + })?; + Ok(Message { + inner: RwLock::new(inner), + }) } /// Serialize the message to bytes. @@ -290,20 +318,34 @@ impl Message { } /// Set records on the message for a canonical name. - pub fn set_records(&self, canonical: String, records_bytes: Vec) -> Result<(), VeritasError> { + pub fn set_records( + &self, + canonical: String, + records_bytes: Vec, + ) -> Result<(), VeritasError> { let sname = SName::from_str(&canonical).map_err(|e| VeritasError::InvalidInput { msg: format!("invalid canonical: {e}"), })?; - self.inner.write().unwrap().set_records(&sname, sip7::RecordSet::new(records_bytes)); + self.inner + .write() + .unwrap() + .set_records(&sname, sip7::RecordSet::new(records_bytes)); Ok(()) } /// Set delegate records on the message for a canonical name. - pub fn set_delegate_records(&self, canonical: String, records_bytes: Vec) -> Result<(), VeritasError> { + pub fn set_delegate_records( + &self, + canonical: String, + records_bytes: Vec, + ) -> Result<(), VeritasError> { let sname = SName::from_str(&canonical).map_err(|e| VeritasError::InvalidInput { msg: format!("invalid canonical: {e}"), })?; - self.inner.write().unwrap().set_delegate_records(&sname, sip7::RecordSet::new(records_bytes)); + self.inner + .write() + .unwrap() + .set_delegate_records(&sname, sip7::RecordSet::new(records_bytes)); Ok(()) } } @@ -353,7 +395,12 @@ impl UnsignedRecordSet { /// Pack the Sig record with the given signature. Returns signed RecordSet wire bytes. pub fn pack_sig(&self, signature: Vec) -> Vec { - self.inner.read().unwrap().pack_sig(signature).as_slice().to_vec() + self.inner + .read() + .unwrap() + .pack_sig(signature) + .as_slice() + .to_vec() } } @@ -370,6 +417,12 @@ pub struct MessageBuilder { inner: RwLock>, } +impl Default for MessageBuilder { + fn default() -> Self { + Self::new() + } +} + #[uniffi::export] impl MessageBuilder { /// Create an empty builder. @@ -381,13 +434,20 @@ impl MessageBuilder { } /// Add a .spacecert chain with records (sip7 wire bytes). - pub fn add_handle(&self, chain_bytes: Vec, records_bytes: Vec) -> Result<(), VeritasError> { - let chain = libveritas::cert::CertificateChain::from_slice(&chain_bytes) - .map_err(|e| VeritasError::InvalidInput { + pub fn add_handle( + &self, + chain_bytes: Vec, + records_bytes: Vec, + ) -> Result<(), VeritasError> { + let chain = libveritas::cert::CertificateChain::from_slice(&chain_bytes).map_err(|e| { + VeritasError::InvalidInput { msg: format!("invalid chain: {e}"), - })?; + } + })?; let records = sip7::RecordSet::new(records_bytes); - self.inner.write().unwrap() + self.inner + .write() + .unwrap() .as_mut() .ok_or_else(|| VeritasError::InvalidInput { msg: "builder already consumed by build()".to_string(), @@ -398,11 +458,14 @@ impl MessageBuilder { /// Add all certificates from a .spacecert chain. pub fn add_chain(&self, chain_bytes: Vec) -> Result<(), VeritasError> { - let chain = libveritas::cert::CertificateChain::from_slice(&chain_bytes) - .map_err(|e| VeritasError::InvalidInput { + let chain = libveritas::cert::CertificateChain::from_slice(&chain_bytes).map_err(|e| { + VeritasError::InvalidInput { msg: format!("invalid chain: {e}"), - })?; - self.inner.write().unwrap() + } + })?; + self.inner + .write() + .unwrap() .as_mut() .ok_or_else(|| VeritasError::InvalidInput { msg: "builder already consumed by build()".to_string(), @@ -413,11 +476,14 @@ impl MessageBuilder { /// Add a single certificate. pub fn add_cert(&self, cert_bytes: Vec) -> Result<(), VeritasError> { - let cert = libveritas::cert::Certificate::from_slice(&cert_bytes) - .map_err(|e| VeritasError::InvalidInput { + let cert = libveritas::cert::Certificate::from_slice(&cert_bytes).map_err(|e| { + VeritasError::InvalidInput { msg: format!("invalid cert: {e}"), - })?; - self.inner.write().unwrap() + } + })?; + self.inner + .write() + .unwrap() .as_mut() .ok_or_else(|| VeritasError::InvalidInput { msg: "builder already consumed by build()".to_string(), @@ -428,12 +494,13 @@ impl MessageBuilder { /// Add records for a handle (sip7 wire bytes). pub fn add_records(&self, handle: String, records_bytes: Vec) -> Result<(), VeritasError> { - let sname = SName::from_str(&handle) - .map_err(|e| VeritasError::InvalidInput { - msg: format!("invalid handle: {e}"), - })?; + let sname = SName::from_str(&handle).map_err(|e| VeritasError::InvalidInput { + msg: format!("invalid handle: {e}"), + })?; let records = sip7::RecordSet::new(records_bytes); - self.inner.write().unwrap() + self.inner + .write() + .unwrap() .as_mut() .ok_or_else(|| VeritasError::InvalidInput { msg: "builder already consumed by build()".to_string(), @@ -445,7 +512,9 @@ impl MessageBuilder { /// Add a full data update (records + optional delegate records). pub fn add_update(&self, entry: DataUpdateEntry) -> Result<(), VeritasError> { let update = parse_data_update(&entry)?; - self.inner.write().unwrap() + self.inner + .write() + .unwrap() .as_mut() .ok_or_else(|| VeritasError::InvalidInput { msg: "builder already consumed by build()".to_string(), @@ -459,15 +528,11 @@ impl MessageBuilder { /// Send this to the provider/fabric to get the chain proofs needed for `build()`. pub fn chain_proof_request(&self) -> Result { let guard = self.inner.read().unwrap(); - let builder = guard - .as_ref() - .ok_or_else(|| VeritasError::InvalidInput { - msg: "builder already consumed by build()".to_string(), - })?; + let builder = guard.as_ref().ok_or_else(|| VeritasError::InvalidInput { + msg: "builder already consumed by build()".to_string(), + })?; serde_json::to_string(&builder.chain_proof_request()) - .map_err(|e| VeritasError::InvalidInput { - msg: e.to_string(), - }) + .map_err(|e| VeritasError::InvalidInput { msg: e.to_string() }) } /// Build the message from a ChainProof. @@ -475,28 +540,33 @@ impl MessageBuilder { /// Consumes the builder — cannot be called twice. /// Returns the message and unsigned record sets that need signing. pub fn build(&self, chain_proof: Vec) -> Result { - let builder = self - .inner - .write() - .unwrap() - .take() - .ok_or_else(|| VeritasError::InvalidInput { - msg: "builder already consumed by build()".to_string(), - })?; - let chain = msg::ChainProof::from_slice(&chain_proof) - .map_err(|e| VeritasError::InvalidInput { + let builder = + self.inner + .write() + .unwrap() + .take() + .ok_or_else(|| VeritasError::InvalidInput { + msg: "builder already consumed by build()".to_string(), + })?; + let chain = + msg::ChainProof::from_slice(&chain_proof).map_err(|e| VeritasError::InvalidInput { msg: format!("invalid chain proof: {e}"), })?; let (inner_msg, unsigned) = builder .build(chain) - .map_err(|e| VeritasError::InvalidInput { - msg: e.to_string(), - })?; + .map_err(|e| VeritasError::InvalidInput { msg: e.to_string() })?; Ok(BuildResult { - message: Arc::new(Message { inner: RwLock::new(inner_msg) }), - unsigned: unsigned.into_iter().map(|u| Arc::new(UnsignedRecordSet { - inner: RwLock::new(u), - })).collect(), + message: Arc::new(Message { + inner: RwLock::new(inner_msg), + }), + unsigned: unsigned + .into_iter() + .map(|u| { + Arc::new(UnsignedRecordSet { + inner: RwLock::new(u), + }) + }) + .collect(), }) } } @@ -510,8 +580,8 @@ pub struct Anchors { impl Anchors { #[uniffi::constructor] pub fn from_json(json: String) -> Result { - let inner: Vec = serde_json::from_str(&json) - .map_err(|e| VeritasError::InvalidInput { + let inner: Vec = + serde_json::from_str(&json).map_err(|e| VeritasError::InvalidInput { msg: format!("invalid anchors: {e}"), })?; Ok(Anchors { inner }) @@ -527,24 +597,65 @@ impl Anchors { /// A single SIP-7 record (for constructing/packing). #[derive(uniffi::Enum)] pub enum Record { - Seq { version: u64 }, - Txt { key: String, value: Vec }, - Addr { key: String, value: Vec }, - Blob { key: String, value: Vec }, - Sig { flags: u8, canonical: String, handle: String, sig: Vec }, - Unknown { rtype: u8, rdata: Vec }, + Seq { + version: u64, + }, + Txt { + key: String, + value: Vec, + }, + Addr { + key: String, + value: Vec, + }, + Blob { + key: String, + value: Vec, + }, + Sig { + flags: u8, + canonical: String, + handle: String, + sig: Vec, + }, + Unknown { + rtype: u8, + rdata: Vec, + }, } /// A parsed SIP-7 record (from unpacking). Includes `Malformed` for invalid rdata. #[derive(uniffi::Enum)] pub enum ParsedRecord { - Seq { version: u64 }, - Txt { key: String, value: Vec }, - Addr { key: String, value: Vec }, - Blob { key: String, value: Vec }, - Sig { flags: u8, canonical: String, handle: String, sig: Vec }, - Malformed { rtype: u8, rdata: Vec }, - Unknown { rtype: u8, rdata: Vec }, + Seq { + version: u64, + }, + Txt { + key: String, + value: Vec, + }, + Addr { + key: String, + value: Vec, + }, + Blob { + key: String, + value: Vec, + }, + Sig { + flags: u8, + canonical: String, + handle: String, + sig: Vec, + }, + Malformed { + rtype: u8, + rdata: Vec, + }, + Unknown { + rtype: u8, + rdata: Vec, + }, } impl<'a> From> for ParsedRecord { @@ -570,10 +681,12 @@ impl<'a> From> for ParsedRecord { sig: sig.sig.to_vec(), }, sip7::ParsedRecord::Malformed { rtype, rdata } => ParsedRecord::Malformed { - rtype, rdata: rdata.to_vec(), + rtype, + rdata: rdata.to_vec(), }, sip7::ParsedRecord::Unknown { rtype, rdata } => ParsedRecord::Unknown { - rtype, rdata: rdata.to_vec(), + rtype, + rdata: rdata.to_vec(), }, } } @@ -586,7 +699,12 @@ impl From for Record { sip7::Record::Txt { key, value } => Record::Txt { key, value }, sip7::Record::Addr { key, value } => Record::Addr { key, value }, sip7::Record::Blob { key, value } => Record::Blob { key, value }, - sip7::Record::Sig { flags, canonical, handle, sig } => Record::Sig { + sip7::Record::Sig { + flags, + canonical, + handle, + sig, + } => Record::Sig { flags, canonical: canonical.to_string(), handle: handle.to_string(), @@ -610,7 +728,12 @@ impl From for sip7::Record { sip7::Record::addr(&key, &refs) } Record::Blob { key, value } => sip7::Record::blob(&key, value), - Record::Sig { flags, canonical, handle, sig } => { + Record::Sig { + flags, + canonical, + handle, + sig, + } => { let canonical = SName::from_str(&canonical).expect("valid canonical"); let handle = if handle.is_empty() { SName::empty() @@ -635,7 +758,9 @@ impl RecordSet { /// Wrap raw wire bytes (lazy — no parsing until unpack). #[uniffi::constructor] pub fn new(data: Vec) -> Self { - RecordSet { inner: sip7::RecordSet::new(data) } + RecordSet { + inner: sip7::RecordSet::new(data), + } } /// Pack records into wire format. @@ -654,7 +779,8 @@ impl RecordSet { /// Parse all records. pub fn unpack(&self) -> Result, VeritasError> { - self.inner.unpack() + self.inner + .unpack() .map(|records| records.into_iter().map(Into::into).collect()) .map_err(|e| VeritasError::InvalidInput { msg: e.to_string() }) } @@ -670,7 +796,6 @@ impl RecordSet { } } - #[derive(uniffi::Object)] pub struct Veritas { inner: libveritas::Veritas, @@ -682,9 +807,7 @@ impl Veritas { pub fn new(anchors: &Anchors) -> Result { let inner = libveritas::Veritas::new() .with_anchors(anchors.inner.clone()) - .map_err(|e| VeritasError::InvalidInput { - msg: e.to_string(), - })?; + .map_err(|e| VeritasError::InvalidInput { msg: e.to_string() })?; Ok(Veritas { inner }) } @@ -720,9 +843,7 @@ impl Veritas { let inner = self .inner .verify(&ctx_guard, msg_inner.clone()) - .map_err(|e| VeritasError::VerificationFailed { - msg: e.to_string(), - })?; + .map_err(|e| VeritasError::VerificationFailed { msg: e.to_string() })?; Ok(Arc::new(VerifiedMessage { inner })) } @@ -740,9 +861,7 @@ impl Veritas { let inner = self .inner .verify_with_options(&ctx_guard, msg_inner.clone(), options) - .map_err(|e| VeritasError::VerificationFailed { - msg: e.to_string(), - })?; + .map_err(|e| VeritasError::VerificationFailed { msg: e.to_string() })?; Ok(Arc::new(VerifiedMessage { inner })) } @@ -760,18 +879,11 @@ impl VerifiedMessage { } pub fn zones(&self) -> Vec { - self.inner - .zones - .iter() - .map(zone_from_inner) - .collect() + self.inner.zones.iter().map(zone_from_inner).collect() } pub fn certificates(&self) -> Vec> { - self.inner - .certificates() - .map(|c| c.to_bytes()) - .collect() + self.inner.certificates().map(|c| c.to_bytes()).collect() } /// Get the verified message for rebroadcasting or updating. @@ -798,12 +910,17 @@ impl Lookup { /// Create a lookup from a list of handle name strings. #[uniffi::constructor] pub fn new(names: Vec) -> Result { - let snames: Vec = names.iter() - .map(|n| SName::from_str(n).map_err(|e| VeritasError::InvalidInput { - msg: format!("invalid name '{}': {}", n, e), - })) + let snames: Vec = names + .iter() + .map(|n| { + SName::from_str(n).map_err(|e| VeritasError::InvalidInput { + msg: format!("invalid name '{}': {}", n, e), + }) + }) .collect::>()?; - Ok(Lookup { inner: libveritas::names::Lookup::new(snames) }) + Ok(Lookup { + inner: libveritas::names::Lookup::new(snames), + }) } /// Returns the first batch of handles to look up. @@ -814,17 +931,20 @@ impl Lookup { /// Feed zones from a resolveAll response. /// Returns the next batch of handles to look up (empty = done). pub fn advance(&self, zones: Vec) -> Result, VeritasError> { - let inner_zones: Vec = zones.iter() - .map(zone_to_inner) - .collect::>()?; - Ok(self.inner.advance(&inner_zones).iter().map(|s| s.to_string()).collect()) + let inner_zones: Vec = + zones.iter().map(zone_to_inner).collect::>()?; + Ok(self + .inner + .advance(&inner_zones) + .iter() + .map(|s| s.to_string()) + .collect()) } /// Expand zone handles using the alias map accumulated during resolution. pub fn expand_zones(&self, zones: Vec) -> Result, VeritasError> { - let mut inner_zones: Vec = zones.iter() - .map(zone_to_inner) - .collect::>()?; + let mut inner_zones: Vec = + zones.iter().map(zone_to_inner).collect::>()?; self.inner.expand_zones(&mut inner_zones); Ok(inner_zones.iter().map(zone_from_inner).collect()) } @@ -834,31 +954,44 @@ impl Lookup { /// Create a .spacecert file from a subject name and certificate bytes. #[uniffi::export] -pub fn create_certificate_chain(subject: String, cert_bytes_list: Vec>) -> Result, VeritasError> { +pub fn create_certificate_chain( + subject: String, + cert_bytes_list: Vec>, +) -> Result, VeritasError> { let sname = SName::from_str(&subject).map_err(|e| VeritasError::InvalidInput { msg: format!("invalid subject: {e}"), })?; - let certs: Vec = cert_bytes_list.iter() - .map(|b| libveritas::cert::Certificate::from_slice(b) - .map_err(|e| VeritasError::InvalidInput { + let certs: Vec = cert_bytes_list + .iter() + .map(|b| { + libveritas::cert::Certificate::from_slice(b).map_err(|e| VeritasError::InvalidInput { msg: format!("invalid cert: {e}"), - })) + }) + }) .collect::>()?; let chain = libveritas::cert::CertificateChain::new(sname, certs); Ok(chain.to_bytes()) } #[uniffi::export] -pub fn verify_default() -> u32 { libveritas::VERIFY_DEFAULT } +pub fn verify_default() -> u32 { + libveritas::VERIFY_DEFAULT +} #[uniffi::export] -pub fn verify_dev_mode() -> u32 { libveritas::VERIFY_DEV_MODE } +pub fn verify_dev_mode() -> u32 { + libveritas::VERIFY_DEV_MODE +} #[uniffi::export] -pub fn verify_enable_snark() -> u32 { libveritas::VERIFY_ENABLE_SNARK } +pub fn verify_enable_snark() -> u32 { + libveritas::VERIFY_ENABLE_SNARK +} #[uniffi::export] -pub fn sig_primary_zone() -> u8 { sip7::SIG_PRIMARY_ZONE } +pub fn sig_primary_zone() -> u8 { + sip7::SIG_PRIMARY_ZONE +} /// Hash a message with the Spaces signed-message prefix (SHA256). /// Returns the 32-byte digest suitable for Schnorr signing/verification. @@ -874,16 +1007,21 @@ pub fn hash_signable_message(msg: Vec) -> Vec { /// - `signature`: 64-byte Schnorr signature /// - `pubkey`: 32-byte x-only public key #[uniffi::export] -pub fn verify_spaces_message(msg: Vec, signature: Vec, pubkey: Vec) -> Result<(), VeritasError> { - let sig: [u8; 64] = signature.try_into().map_err(|_| VeritasError::InvalidInput { - msg: "signature must be 64 bytes".to_string(), - })?; +pub fn verify_spaces_message( + msg: Vec, + signature: Vec, + pubkey: Vec, +) -> Result<(), VeritasError> { + let sig: [u8; 64] = signature + .try_into() + .map_err(|_| VeritasError::InvalidInput { + msg: "signature must be 64 bytes".to_string(), + })?; let pk: [u8; 32] = pubkey.try_into().map_err(|_| VeritasError::InvalidInput { msg: "pubkey must be 32 bytes".to_string(), })?; - libveritas::verify_spaces_message(&msg, &sig, &pk).map_err(|e| VeritasError::VerificationFailed { - msg: e.to_string(), - }) + libveritas::verify_spaces_message(&msg, &sig, &pk) + .map_err(|e| VeritasError::VerificationFailed { msg: e.to_string() }) } /// Verify a raw Schnorr signature (no prefix, caller provides the 32-byte message hash). @@ -892,28 +1030,34 @@ pub fn verify_spaces_message(msg: Vec, signature: Vec, pubkey: Vec) /// - `signature`: 64-byte Schnorr signature /// - `pubkey`: 32-byte x-only public key #[uniffi::export] -pub fn verify_schnorr(msg_hash: Vec, signature: Vec, pubkey: Vec) -> Result<(), VeritasError> { - let hash: [u8; 32] = msg_hash.try_into().map_err(|_| VeritasError::InvalidInput { - msg: "msg_hash must be 32 bytes".to_string(), - })?; - let sig: [u8; 64] = signature.try_into().map_err(|_| VeritasError::InvalidInput { - msg: "signature must be 64 bytes".to_string(), - })?; +pub fn verify_schnorr( + msg_hash: Vec, + signature: Vec, + pubkey: Vec, +) -> Result<(), VeritasError> { + let hash: [u8; 32] = msg_hash + .try_into() + .map_err(|_| VeritasError::InvalidInput { + msg: "msg_hash must be 32 bytes".to_string(), + })?; + let sig: [u8; 64] = signature + .try_into() + .map_err(|_| VeritasError::InvalidInput { + msg: "signature must be 64 bytes".to_string(), + })?; let pk: [u8; 32] = pubkey.try_into().map_err(|_| VeritasError::InvalidInput { msg: "pubkey must be 32 bytes".to_string(), })?; - libveritas::verify_schnorr(&hash, &sig, &pk).map_err(|e| VeritasError::VerificationFailed { - msg: e.to_string(), - }) + libveritas::verify_schnorr(&hash, &sig, &pk) + .map_err(|e| VeritasError::VerificationFailed { msg: e.to_string() }) } /// Decode stored zone bytes to a Zone record. #[uniffi::export] pub fn decode_zone(bytes: Vec) -> Result { - let zone = libveritas::Zone::from_slice(&bytes) - .map_err(|e| VeritasError::InvalidInput { - msg: format!("invalid zone: {e}"), - })?; + let zone = libveritas::Zone::from_slice(&bytes).map_err(|e| VeritasError::InvalidInput { + msg: format!("invalid zone: {e}"), + })?; Ok(zone_from_inner(&zone)) } @@ -928,9 +1072,7 @@ pub fn zone_to_bytes(zone: Zone) -> Result, VeritasError> { #[uniffi::export] pub fn zone_to_json(zone: Zone) -> Result { let inner = zone_to_inner(&zone)?; - serde_json::to_string(&inner).map_err(|e| VeritasError::InvalidInput { - msg: e.to_string(), - }) + serde_json::to_string(&inner).map_err(|e| VeritasError::InvalidInput { msg: e.to_string() }) } /// Compare two zones — returns true if `a` is fresher/better than `b`. @@ -938,19 +1080,18 @@ pub fn zone_to_json(zone: Zone) -> Result { pub fn zone_is_better_than(a: Zone, b: Zone) -> Result { let inner_a = zone_to_inner(&a)?; let inner_b = zone_to_inner(&b)?; - inner_a.is_better_than(&inner_b).map_err(|e| VeritasError::InvalidInput { - msg: e.to_string(), - }) + inner_a + .is_better_than(&inner_b) + .map_err(|e| VeritasError::InvalidInput { msg: e.to_string() }) } /// Decode stored certificate bytes to JSON. #[uniffi::export] pub fn decode_certificate(bytes: Vec) -> Result { - let cert = libveritas::cert::Certificate::from_slice(&bytes) - .map_err(|e| VeritasError::InvalidInput { + let cert = libveritas::cert::Certificate::from_slice(&bytes).map_err(|e| { + VeritasError::InvalidInput { msg: format!("invalid certificate: {e}"), - })?; - serde_json::to_string(&cert).map_err(|e| VeritasError::InvalidInput { - msg: e.to_string(), - }) + } + })?; + serde_json::to_string(&cert).map_err(|e| VeritasError::InvalidInput { msg: e.to_string() }) } diff --git a/bindings/wasm/Cargo.toml b/bindings/wasm/Cargo.toml index 6160653..6d6edf8 100644 --- a/bindings/wasm/Cargo.toml +++ b/bindings/wasm/Cargo.toml @@ -1,13 +1,15 @@ [package] name = "libveritas-wasm" -version = "0.1.0" -edition = "2024" +version.workspace = true +edition.workspace = true +authors.workspace = true +publish = false [lib] crate-type = ["cdylib"] [dependencies] -libveritas = { path = "../../veritas" } +libveritas = { workspace = true } sip7 = { workspace = true } serde = { version = "1.0" } serde_json = "1.0" diff --git a/bindings/wasm/src/lib.rs b/bindings/wasm/src/lib.rs index 05082a9..b3de2ba 100644 --- a/bindings/wasm/src/lib.rs +++ b/bindings/wasm/src/lib.rs @@ -8,16 +8,24 @@ use serde::Serialize; use spaces_nums::RootAnchor; #[wasm_bindgen(js_name = "VERIFY_DEFAULT")] -pub fn verify_default() -> u32 { libveritas::VERIFY_DEFAULT } +pub fn verify_default() -> u32 { + libveritas::VERIFY_DEFAULT +} #[wasm_bindgen(js_name = "VERIFY_DEV_MODE")] -pub fn verify_dev_mode() -> u32 { libveritas::VERIFY_DEV_MODE } +pub fn verify_dev_mode() -> u32 { + libveritas::VERIFY_DEV_MODE +} #[wasm_bindgen(js_name = "VERIFY_ENABLE_SNARK")] -pub fn verify_enable_snark() -> u32 { libveritas::VERIFY_ENABLE_SNARK } +pub fn verify_enable_snark() -> u32 { + libveritas::VERIFY_ENABLE_SNARK +} #[wasm_bindgen(js_name = "SIG_PRIMARY_ZONE")] -pub fn sig_primary_zone() -> u8 { sip7::SIG_PRIMARY_ZONE } +pub fn sig_primary_zone() -> u8 { + sip7::SIG_PRIMARY_ZONE +} /// Serialize through JSON to get human-readable serde output /// (hex hashes, string names, etc.) as a native JS object. @@ -45,11 +53,9 @@ fn parse_data_update(entry: &JsValue) -> Result Self { + Self::new() + } +} + #[wasm_bindgen] impl QueryContext { #[wasm_bindgen(constructor)] @@ -86,8 +98,8 @@ impl QueryContext { /// If no requests are added, all handles in the message are verified. #[wasm_bindgen(js_name = "addRequest")] pub fn add_request(&mut self, handle: &str) -> Result<(), JsError> { - let sname = SName::from_str(handle) - .map_err(|e| JsError::new(&format!("invalid handle: {e}")))?; + let sname = + SName::from_str(handle).map_err(|e| JsError::new(&format!("invalid handle: {e}")))?; self.inner.add_request(sname); Ok(()) } @@ -119,17 +131,18 @@ fn trust_set_to_js(ts: &libveritas::TrustSet) -> Result { arr.copy_from(r); roots.push(&arr); } - js_sys::Reflect::set(&obj, &"roots".into(), &roots).map_err(|_| JsError::new("failed to set roots"))?; + js_sys::Reflect::set(&obj, &"roots".into(), &roots) + .map_err(|_| JsError::new("failed to set roots"))?; Ok(obj.into()) } fn zone_from_js(val: &JsValue) -> Result { - let json = js_sys::JSON::stringify(val) - .map_err(|_| JsError::new("failed to stringify zone"))?; - let json_str = json.as_string() + let json = + js_sys::JSON::stringify(val).map_err(|_| JsError::new("failed to stringify zone"))?; + let json_str = json + .as_string() .ok_or_else(|| JsError::new("stringify returned non-string"))?; - serde_json::from_str(&json_str) - .map_err(|e| JsError::new(&format!("invalid zone: {e}"))) + serde_json::from_str(&json_str).map_err(|e| JsError::new(&format!("invalid zone: {e}"))) } /// A message containing chain proofs and handle data. @@ -176,16 +189,22 @@ impl Message { pub fn set_records(&mut self, canonical: &str, records_bytes: &[u8]) -> Result<(), JsError> { let sname = SName::from_str(canonical) .map_err(|e| JsError::new(&format!("invalid canonical: {e}")))?; - self.inner.set_records(&sname, sip7::RecordSet::new(records_bytes.to_vec())); + self.inner + .set_records(&sname, sip7::RecordSet::new(records_bytes.to_vec())); Ok(()) } /// Set delegate records on the message for a canonical name. #[wasm_bindgen(js_name = "setDelegateRecords")] - pub fn set_delegate_records(&mut self, canonical: &str, records_bytes: &[u8]) -> Result<(), JsError> { + pub fn set_delegate_records( + &mut self, + canonical: &str, + records_bytes: &[u8], + ) -> Result<(), JsError> { let sname = SName::from_str(canonical) .map_err(|e| JsError::new(&format!("invalid canonical: {e}")))?; - self.inner.set_delegate_records(&sname, sip7::RecordSet::new(records_bytes.to_vec())); + self.inner + .set_delegate_records(&sname, sip7::RecordSet::new(records_bytes.to_vec())); Ok(()) } } @@ -196,6 +215,12 @@ pub struct MessageBuilder { inner: Option, } +impl Default for MessageBuilder { + fn default() -> Self { + Self::new() + } +} + #[wasm_bindgen] impl MessageBuilder { /// Create an empty builder. @@ -213,7 +238,8 @@ impl MessageBuilder { } fn inner_mut(&mut self) -> Result<&mut builder::MessageBuilder, JsError> { - self.inner.as_mut() + self.inner + .as_mut() .ok_or_else(|| JsError::new("builder already consumed by build()")) } @@ -248,8 +274,8 @@ impl MessageBuilder { /// Add records for a handle (sip7 wire bytes). #[wasm_bindgen(js_name = "addRecords")] pub fn add_records(&mut self, handle: &str, records_bytes: &[u8]) -> Result<(), JsError> { - let sname = SName::from_str(handle) - .map_err(|e| JsError::new(&format!("invalid handle: {e}")))?; + let sname = + SName::from_str(handle).map_err(|e| JsError::new(&format!("invalid handle: {e}")))?; let records = sip7::RecordSet::new(records_bytes.to_vec()); self.inner_mut()?.add_records(sname, records); Ok(()) @@ -421,11 +447,7 @@ impl Veritas { } /// Verify a message with default options. - pub fn verify( - &self, - ctx: &QueryContext, - msg: &Message, - ) -> Result { + pub fn verify(&self, ctx: &QueryContext, msg: &Message) -> Result { let inner = self .inner .verify(&ctx.inner, msg.inner.clone()) @@ -474,12 +496,15 @@ impl VerifiedMessage { /// All certificates as serialized byte arrays. pub fn certificates(&self) -> Vec { - self.inner.certificates().map(|c| { - let bytes = c.to_bytes(); - let arr = js_sys::Uint8Array::new_with_length(bytes.len() as u32); - arr.copy_from(&bytes); - arr - }).collect() + self.inner + .certificates() + .map(|c| { + let bytes = c.to_bytes(); + let arr = js_sys::Uint8Array::new_with_length(bytes.len() as u32); + arr.copy_from(&bytes); + arr + }) + .collect() } /// Get the verified message for rebroadcasting or updating. @@ -507,10 +532,16 @@ impl Lookup { /// Create a lookup from an array of handle name strings. #[wasm_bindgen(constructor)] pub fn new(names: Vec) -> Result { - let snames: Vec = names.iter() - .map(|n| SName::from_str(n).map_err(|e| JsError::new(&format!("invalid name '{}': {}", n, e)))) + let snames: Vec = names + .iter() + .map(|n| { + SName::from_str(n) + .map_err(|e| JsError::new(&format!("invalid name '{}': {}", n, e))) + }) .collect::>()?; - Ok(Lookup { inner: libveritas::names::Lookup::new(snames) }) + Ok(Lookup { + inner: libveritas::names::Lookup::new(snames), + }) } /// Returns the first batch of handles to look up. @@ -525,7 +556,12 @@ impl Lookup { let inner_zones: Vec = (0..array.length()) .map(|i| zone_from_js(&array.get(i))) .collect::>()?; - Ok(self.inner.advance(&inner_zones).iter().map(|s| s.to_string()).collect()) + Ok(self + .inner + .advance(&inner_zones) + .iter() + .map(|s| s.to_string()) + .collect()) } /// Expand zone handles using the alias map accumulated during resolution. @@ -548,12 +584,18 @@ impl Lookup { /// /// Collects certificates from multiple verified messages into a single chain. #[wasm_bindgen(js_name = "createCertificateChain")] -pub fn create_certificate_chain(subject: &str, cert_bytes_list: Vec) -> Result, JsError> { - let sname = SName::from_str(subject) - .map_err(|e| JsError::new(&format!("invalid subject: {e}")))?; - let certs: Vec = cert_bytes_list.iter() - .map(|b| libveritas::cert::Certificate::from_slice(&b.to_vec()) - .map_err(|e| JsError::new(&format!("invalid cert: {e}")))) +pub fn create_certificate_chain( + subject: &str, + cert_bytes_list: Vec, +) -> Result, JsError> { + let sname = + SName::from_str(subject).map_err(|e| JsError::new(&format!("invalid subject: {e}")))?; + let certs: Vec = cert_bytes_list + .iter() + .map(|b| { + libveritas::cert::Certificate::from_slice(&b.to_vec()) + .map_err(|e| JsError::new(&format!("invalid cert: {e}"))) + }) .collect::>()?; let chain = libveritas::cert::CertificateChain::new(sname, certs); Ok(chain.to_bytes()) @@ -563,7 +605,8 @@ pub fn create_certificate_chain(subject: &str, cert_bytes_list: Vec Result { let rtype = js_sys::Reflect::get(obj, &"type".into()) - .ok().and_then(|v| v.as_string()) + .ok() + .and_then(|v| v.as_string()) .ok_or_else(|| JsError::new("record must have a 'type' field"))?; match rtype.as_str() { "seq" => { @@ -575,13 +618,16 @@ fn parse_js_record(obj: &JsValue) -> Result { u64::try_from(js_sys::BigInt::from(raw)) .map_err(|_| JsError::new("seq record: 'version' out of u64 range"))? } else { - return Err(JsError::new("seq record: 'version' must be a number or bigint")); + return Err(JsError::new( + "seq record: 'version' must be a number or bigint", + )); }; Ok(sip7::Record::seq(version)) } "txt" => { let key = js_sys::Reflect::get(obj, &"key".into()) - .ok().and_then(|v| v.as_string()) + .ok() + .and_then(|v| v.as_string()) .ok_or_else(|| JsError::new("txt record: 'key' must be a string"))?; let raw = js_sys::Reflect::get(obj, &"value".into()) .map_err(|_| JsError::new("txt record: 'value' is required"))?; @@ -590,18 +636,24 @@ fn parse_js_record(obj: &JsValue) -> Result { } else if js_sys::Array::is_array(&raw) { let arr = js_sys::Array::from(&raw); (0..arr.length()) - .map(|i| arr.get(i).as_string() - .ok_or_else(|| JsError::new("txt record: array values must be strings"))) + .map(|i| { + arr.get(i) + .as_string() + .ok_or_else(|| JsError::new("txt record: array values must be strings")) + }) .collect::, _>>()? } else { - return Err(JsError::new("txt record: 'value' must be a string or array of strings")); + return Err(JsError::new( + "txt record: 'value' must be a string or array of strings", + )); }; let refs: Vec<&str> = values.iter().map(|s| s.as_str()).collect(); Ok(sip7::Record::txt(&key, &refs)) } "addr" => { let key = js_sys::Reflect::get(obj, &"key".into()) - .ok().and_then(|v| v.as_string()) + .ok() + .and_then(|v| v.as_string()) .ok_or_else(|| JsError::new("addr record: 'key' must be a string"))?; let raw = js_sys::Reflect::get(obj, &"value".into()) .map_err(|_| JsError::new("addr record: 'value' is required"))?; @@ -610,18 +662,24 @@ fn parse_js_record(obj: &JsValue) -> Result { } else if js_sys::Array::is_array(&raw) { let arr = js_sys::Array::from(&raw); (0..arr.length()) - .map(|i| arr.get(i).as_string() - .ok_or_else(|| JsError::new("addr record: array values must be strings"))) + .map(|i| { + arr.get(i).as_string().ok_or_else(|| { + JsError::new("addr record: array values must be strings") + }) + }) .collect::, _>>()? } else { - return Err(JsError::new("addr record: 'value' must be a string or array of strings")); + return Err(JsError::new( + "addr record: 'value' must be a string or array of strings", + )); }; let refs: Vec<&str> = values.iter().map(|s| s.as_str()).collect(); Ok(sip7::Record::addr(&key, &refs)) } "blob" => { let key = js_sys::Reflect::get(obj, &"key".into()) - .ok().and_then(|v| v.as_string()) + .ok() + .and_then(|v| v.as_string()) .ok_or_else(|| JsError::new("blob record: 'key' must be a string"))?; let value = js_sys::Reflect::get(obj, &"value".into()) .map(|v| js_sys::Uint8Array::from(v).to_vec()) @@ -630,13 +688,16 @@ fn parse_js_record(obj: &JsValue) -> Result { } "sig" => { let canonical = js_sys::Reflect::get(obj, &"canonical".into()) - .ok().and_then(|v| v.as_string()) + .ok() + .and_then(|v| v.as_string()) .ok_or_else(|| JsError::new("sig record: 'canonical' must be a string"))?; let handle = js_sys::Reflect::get(obj, &"handle".into()) - .ok().and_then(|v| v.as_string()) + .ok() + .and_then(|v| v.as_string()) .unwrap_or_default(); let flags = js_sys::Reflect::get(obj, &"flags".into()) - .ok().and_then(|v| v.as_f64()) + .ok() + .and_then(|v| v.as_f64()) .unwrap_or(0.0) as u8; let sig = js_sys::Reflect::get(obj, &"sig".into()) .map(|v| js_sys::Uint8Array::from(v).to_vec()) @@ -653,8 +714,10 @@ fn parse_js_record(obj: &JsValue) -> Result { } "unknown" => { let rt = js_sys::Reflect::get(obj, &"rtype".into()) - .ok().and_then(|v| v.as_f64()) - .ok_or_else(|| JsError::new("unknown record: 'rtype' must be a number"))? as u8; + .ok() + .and_then(|v| v.as_f64()) + .ok_or_else(|| JsError::new("unknown record: 'rtype' must be a number"))? + as u8; let rdata = js_sys::Reflect::get(obj, &"rdata".into()) .map(|v| js_sys::Uint8Array::from(v).to_vec()) .map_err(|_| JsError::new("unknown record: 'rdata' must be a Uint8Array"))?; @@ -794,7 +857,9 @@ impl RecordSet { /// Wrap raw wire bytes (lazy — no parsing until unpack). #[wasm_bindgen(constructor)] pub fn new(data: &[u8]) -> RecordSet { - RecordSet { inner: sip7::RecordSet::new(data.to_vec()) } + RecordSet { + inner: sip7::RecordSet::new(data.to_vec()), + } } /// Pack records into wire format. @@ -817,7 +882,9 @@ impl RecordSet { /// Parse all records. pub fn unpack(&self) -> Result { - let records = self.inner.unpack() + let records = self + .inner + .unpack() .map_err(|e| JsError::new(&format!("unpack failed: {e}")))?; let array = js_sys::Array::new(); for record in &records { @@ -839,7 +906,6 @@ impl RecordSet { } } - /// Hash a message with the Spaces signed-message prefix (SHA256). /// Returns the 32-byte digest suitable for Schnorr signing/verification. #[wasm_bindgen(js_name = "hashSignableMessage")] @@ -851,25 +917,28 @@ pub fn hash_signable_message(msg: &[u8]) -> Vec { /// Verify a Schnorr signature over a message using the Spaces signed-message prefix. #[wasm_bindgen(js_name = "verifySpacesMessage")] pub fn verify_spaces_message(msg: &[u8], signature: &[u8], pubkey: &[u8]) -> Result<(), JsError> { - let sig: [u8; 64] = signature.try_into() + let sig: [u8; 64] = signature + .try_into() .map_err(|_| JsError::new("signature must be 64 bytes"))?; - let pk: [u8; 32] = pubkey.try_into() + let pk: [u8; 32] = pubkey + .try_into() .map_err(|_| JsError::new("pubkey must be 32 bytes"))?; - libveritas::verify_spaces_message(msg, &sig, &pk) - .map_err(|e| JsError::new(&e.to_string())) + libveritas::verify_spaces_message(msg, &sig, &pk).map_err(|e| JsError::new(&e.to_string())) } /// Verify a raw Schnorr signature (no prefix, caller provides the 32-byte message hash). #[wasm_bindgen(js_name = "verifySchnorr")] pub fn verify_schnorr(msg_hash: &[u8], signature: &[u8], pubkey: &[u8]) -> Result<(), JsError> { - let hash: [u8; 32] = msg_hash.try_into() + let hash: [u8; 32] = msg_hash + .try_into() .map_err(|_| JsError::new("msg_hash must be 32 bytes"))?; - let sig: [u8; 64] = signature.try_into() + let sig: [u8; 64] = signature + .try_into() .map_err(|_| JsError::new("signature must be 64 bytes"))?; - let pk: [u8; 32] = pubkey.try_into() + let pk: [u8; 32] = pubkey + .try_into() .map_err(|_| JsError::new("pubkey must be 32 bytes"))?; - libveritas::verify_schnorr(&hash, &sig, &pk) - .map_err(|e| JsError::new(&e.to_string())) + libveritas::verify_schnorr(&hash, &sig, &pk).map_err(|e| JsError::new(&e.to_string())) } /// Decode stored zone bytes to a plain JS object. @@ -892,7 +961,8 @@ pub fn zone_to_bytes(zone: JsValue) -> Result, JsError> { pub fn zone_is_better_than(a: JsValue, b: JsValue) -> Result { let inner_a = zone_from_js(&a)?; let inner_b = zone_from_js(&b)?; - inner_a.is_better_than(&inner_b) + inner_a + .is_better_than(&inner_b) .map_err(|e| JsError::new(&e.to_string())) } diff --git a/examples/generate_fixture.rs b/examples/generate_fixture.rs index 781435c..50b6799 100644 --- a/examples/generate_fixture.rs +++ b/examples/generate_fixture.rs @@ -3,7 +3,6 @@ /// Outputs: /// examples/fixture/anchors.json - JSON array of RootAnchors /// examples/fixture/message.bin - borsh-encoded Message - use libveritas::msg::QueryContext; use libveritas_testutil::fixture::{ChainState, FixtureRunner, single_commit_finalized}; use std::fs; @@ -41,7 +40,9 @@ fn main() { // Native verify to confirm the fixture is valid let veritas = state.veritas(); let ctx = QueryContext::new(); - let result = veritas.verify_with_options(&ctx, msg, libveritas::VERIFY_DEV_MODE).unwrap(); + let result = veritas + .verify_with_options(&ctx, msg, libveritas::VERIFY_DEV_MODE) + .unwrap(); println!("\nnative verify OK: {} zones", result.zones.len()); for z in &result.zones { println!(" {} -> {}", z.handle, z.sovereignty); diff --git a/methods/Cargo.toml b/methods/Cargo.toml index 36cd77c..a4520d2 100644 --- a/methods/Cargo.toml +++ b/methods/Cargo.toml @@ -1,7 +1,9 @@ [package] name = "libveritas_methods" -version = "0.1.2" -edition = "2021" +version.workspace = true +edition = "2021" # risc0 build environment is on 2021 +authors.workspace = true +publish = false [build-dependencies] risc0-build = { version = "3.0.5" } diff --git a/methods/guest/Cargo.lock b/methods/guest/Cargo.lock index ce8640a..505c51b 100644 --- a/methods/guest/Cargo.lock +++ b/methods/guest/Cargo.lock @@ -1210,8 +1210,9 @@ dependencies = [ [[package]] name = "spacedb" -version = "0.0.12" -source = "git+https://github.com/spacesprotocol/spacedb.git#92c150ea5a368c7ec894ef3530992abd2f7d6012" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a48cda82e951391df9d0a54c96f8b04e117ff39abad5359b181cb71c80798d77" dependencies = [ "borsh", "sha2", diff --git a/release-plz.toml b/release-plz.toml new file mode 100644 index 0000000..fbef184 --- /dev/null +++ b/release-plz.toml @@ -0,0 +1,86 @@ +[workspace] +# Changelog lives at repo root alongside Cargo.toml. +changelog_path = "CHANGELOG.md" +# Always regenerate the changelog from conventional commits. +changelog_update = true +# Open a single release PR per push to main. +pr_draft = false + +[changelog] +header = """# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +""" + +body = """ +## [{{ version | trim_start_matches(pat=\"v\") }}]\ + {%- if release_link -%}\ + ({{ release_link }})\ + {% endif %} - {{ timestamp | date(format=\"%Y-%m-%d\") }} +{% for group, commits in commits | group_by(attribute=\"group\") %} +### {{ group | upper_first }} +{% for commit in commits %} +- {% if commit.scope %}*({{ commit.scope }})* {% endif %}\ + {% if commit.breaking %}[**breaking**] {% endif %}\ + {{ commit.message | upper_first }}\ +{% endfor %} +{% endfor %} +""" + +commit_parsers = [ + { message = "^feat", group = "Features" }, + { message = "^fix", group = "Bug Fixes" }, + { message = "^perf", group = "Performance" }, + { message = "^refactor", group = "Refactor" }, + { message = "^docs", group = "Documentation" }, + { message = "^test", group = "Tests" }, + { message = "^build", group = "Build" }, + { message = "^ci", group = "CI" }, + { message = "^chore\\(release\\)", skip = true }, + { message = "^chore", group = "Chore" }, + { message = "^revert", group = "Revert" }, +] + +# Publishable crates — published to crates.io + GitHub release on tag. +[[package]] +name = "libveritas" +publish = true +git_release_enable = true +git_release_latest = false +git_tag_name = "libveritas-v{{ version }}" + +[[package]] +name = "libveritas_zk" +publish = true +git_release_enable = true +git_release_latest = false +git_tag_name = "libveritas_zk-v{{ version }}" + +# Non-publishable crates: skip versioning, changelog, and releases +[[package]] +name = "libveritas_methods" +release = false +publish = false + +[[package]] +name = "libveritas_testutil" +release = false +publish = false + +[[package]] +name = "libveritas-wasm" +release = false +publish = false + +[[package]] +name = "libveritas-uniffi" +release = false +publish = false + +[[package]] +name = "libveritas-python" +release = false +publish = false diff --git a/testutil/Cargo.toml b/testutil/Cargo.toml index ad1216d..b7b4ac0 100644 --- a/testutil/Cargo.toml +++ b/testutil/Cargo.toml @@ -1,11 +1,13 @@ [package] name = "libveritas_testutil" -version = "0.1.0" -edition = "2024" +version.workspace = true +edition.workspace = true +authors.workspace = true +publish = false [dependencies] -libveritas = { path = "../veritas" } -libveritas_zk = { path = "../zk" } +libveritas = { workspace = true } +libveritas_zk = { workspace = true } sip7 = { workspace = true } spacedb = { workspace = true } spaces_protocol = { workspace = true } diff --git a/testutil/src/fixture.rs b/testutil/src/fixture.rs index 0607e8c..85d63e0 100644 --- a/testutil/src/fixture.rs +++ b/testutil/src/fixture.rs @@ -1,20 +1,20 @@ +use crate::{TestChain, TestDelegatedSpace, TestHandleTree}; +use libveritas::cert::{HandleSubtree, NumsSubtree, SpacesSubtree}; +use libveritas::msg::{Bundle, ChainProof}; +use libveritas::{SovereigntyState, Veritas, msg}; use spacedb::subtree::SubTree; -use spaces_nums::constants::COMMITMENT_FINALITY_INTERVAL; use spaces_nums::RootAnchor; -use libveritas::{msg, SovereigntyState, Veritas}; +use spaces_nums::constants::COMMITMENT_FINALITY_INTERVAL; use spaces_protocol::sname::SName; -use libveritas::cert::{HandleSubtree, NumsSubtree, SpacesSubtree}; -use libveritas::msg::{Bundle, ChainProof}; -use crate::{TestChain, TestDelegatedSpace, TestHandleTree}; -#[derive(Clone,Debug)] +#[derive(Clone, Debug)] pub enum Step { Stage(&'static [&'static str]), Commit, Finalize, } -#[derive(Clone,Debug)] +#[derive(Clone, Debug)] pub struct Fixture { pub name: &'static str, pub steps: Vec, @@ -87,9 +87,9 @@ impl HandleStates { /// Expected sovereignty for a committed handle pub fn sovereignty(&self, handle: &str) -> Option { if self.staged.iter().find(|&&s| s == handle).is_some() { - return Some(SovereigntyState::Dependent) + return Some(SovereigntyState::Dependent); } - + let commit_idx = self.commit_index(handle)?; if commit_idx < self.finalized_count { Some(SovereigntyState::Sovereign) @@ -101,7 +101,10 @@ impl HandleStates { impl Fixture { pub fn new(name: &'static str) -> Self { - Self { name, steps: vec![] } + Self { + name, + steps: vec![], + } } pub fn stage(mut self, handles: &'static [&'static str]) -> Self { @@ -144,7 +147,11 @@ impl Fixture { } } - HandleStates { commits, staged, finalized_count } + HandleStates { + commits, + staged, + finalized_count, + } } } @@ -154,6 +161,12 @@ pub struct ChainState { pub anchors: Vec, } +impl Default for ChainState { + fn default() -> Self { + Self::new() + } +} + impl ChainState { pub fn new() -> Self { Self { @@ -161,15 +174,14 @@ impl ChainState { anchors: vec![], } } - + pub fn veritas(&self) -> Veritas { let mut anchors = self.anchors.clone(); if anchors.is_empty() { anchors.push(self.chain.current_root_anchor()); } anchors.reverse(); - Veritas::new() - .with_anchors(anchors).expect("valid anchors") + Veritas::new().with_anchors(anchors).expect("valid anchors") } pub fn message(&self, bundles: Vec) -> msg::Message { @@ -185,7 +197,7 @@ impl ChainState { } #[derive(Clone)] -pub struct FixtureRunner{ +pub struct FixtureRunner { pub fixture: Fixture, pub step: std::vec::IntoIter, pub space: TestDelegatedSpace, @@ -222,14 +234,15 @@ impl FixtureRunner { tree: HandleSubtree(c.handle_tree.clone()), handles: vec![], }; - for (_, handle) in &mut c.handles { - let signer = SName::join(&handle.name, &space_label) - .expect("join handle name"); + for handle in c.handles.values_mut() { + let signer = SName::join(&handle.name, &space_label).expect("join handle name"); // Add some off-chain data handle.set_records( - sip7::RecordSet::pack(vec![ - sip7::Record::txt("name", &[&handle.name.to_string()]), - ]).expect("pack records"), + sip7::RecordSet::pack(vec![sip7::Record::txt( + "name", + &[&handle.name.to_string()], + )]) + .expect("pack records"), &signer, ); @@ -244,19 +257,20 @@ impl FixtureRunner { } let mut empty_epoch = msg::Epoch { - tree: HandleSubtree(SubTree::empty()), + tree: HandleSubtree(SubTree::empty()), handles: vec![], }; let staging = bundle.epochs.last_mut().unwrap_or(&mut empty_epoch); - for (_, staged) in &mut self.handles.staged { - let signer = SName::join(&staged.handle.name, &space_label) - .expect("join handle name"); + for staged in self.handles.staged.values_mut() { + let signer = SName::join(&staged.handle.name, &space_label).expect("join handle name"); // add some off-chain data staged.handle.set_records( - sip7::RecordSet::pack(vec![ - sip7::Record::txt("name", &[&staged.handle.name.to_string()]), - ]).expect("pack records"), + sip7::RecordSet::pack(vec![sip7::Record::txt( + "name", + &[&staged.handle.name.to_string()], + )]) + .expect("pack records"), &signer, ); staging.handles.push(msg::Handle { @@ -292,7 +306,7 @@ impl FixtureRunner { } pub fn run(&mut self, state: &mut ChainState) { - while self.run_next(state).is_some() {} + while self.run_next(state).is_some() {} } } @@ -302,15 +316,12 @@ impl FixtureRunner { /// No commitments, just staged handles. Temp certs need no exclusion proof. pub fn staged_only() -> Fixture { - Fixture::new("@staged") - .stage(&["alice", "bob"]) + Fixture::new("@staged").stage(&["alice", "bob"]) } /// Single commitment, not yet finalized. Handles are Pending. pub fn single_commit_pending() -> Fixture { - Fixture::new("@pending") - .stage(&["alice", "bob"]) - .commit() + Fixture::new("@pending").stage(&["alice", "bob"]).commit() } /// Single commitment, finalized. Handles are Sovereign. No receipt needed. @@ -398,7 +409,10 @@ mod tests { assert!(states.is_committed("alice")); assert_eq!(states.commit_index("alice"), Some(0)); assert!(!states.has_pending_commit()); - assert_eq!(states.sovereignty("alice"), Some(SovereigntyState::Sovereign)); + assert_eq!( + states.sovereignty("alice"), + Some(SovereigntyState::Sovereign) + ); } #[test] @@ -411,12 +425,18 @@ mod tests { // alice is in finalized commit 0 assert_eq!(states.commit_index("alice"), Some(0)); - assert_eq!(states.sovereignty("alice"), Some(SovereigntyState::Sovereign)); + assert_eq!( + states.sovereignty("alice"), + Some(SovereigntyState::Sovereign) + ); assert!(!states.needs_receipt("alice")); // charlie is in pending commit 1 assert_eq!(states.commit_index("charlie"), Some(1)); - assert_eq!(states.sovereignty("charlie"), Some(SovereigntyState::Pending)); + assert_eq!( + states.sovereignty("charlie"), + Some(SovereigntyState::Pending) + ); assert!(states.needs_receipt("charlie")); } @@ -430,12 +450,18 @@ mod tests { // Commit 0 (finalized): alice, bob assert_eq!(states.in_commit(0), &["alice", "bob"]); - assert_eq!(states.sovereignty("alice"), Some(SovereigntyState::Sovereign)); + assert_eq!( + states.sovereignty("alice"), + Some(SovereigntyState::Sovereign) + ); assert!(!states.needs_receipt("alice")); // Commit 1 (finalized): charlie, dave assert_eq!(states.in_commit(1), &["charlie", "dave"]); - assert_eq!(states.sovereignty("charlie"), Some(SovereigntyState::Sovereign)); + assert_eq!( + states.sovereignty("charlie"), + Some(SovereigntyState::Sovereign) + ); assert!(states.needs_receipt("charlie")); // commit > 0 // Commit 2 (pending): eve, frank @@ -447,6 +473,9 @@ mod tests { assert_eq!(states.staged, vec!["grace", "heidi"]); assert!(states.is_staged("grace")); assert!(!states.is_committed("grace")); - assert_eq!(states.sovereignty("grace"), Some(SovereigntyState::Dependent)); + assert_eq!( + states.sovereignty("grace"), + Some(SovereigntyState::Dependent) + ); } } diff --git a/testutil/src/lib.rs b/testutil/src/lib.rs index 0cc660f..9ff326a 100644 --- a/testutil/src/lib.rs +++ b/testutil/src/lib.rs @@ -5,7 +5,7 @@ pub mod fixture; -use bitcoin::hashes::{Hash as BitcoinHash}; +use bitcoin::hashes::Hash as BitcoinHash; use bitcoin::key::Keypair; use bitcoin::key::rand::Rng; use bitcoin::secp256k1::Secp256k1; @@ -14,18 +14,21 @@ use bitcoin::{BlockHash, OutPoint, ScriptBuf, Txid}; use borsh::{BorshDeserialize, BorshSerialize}; use libveritas::cert::{HandleOut, HandleSubtree, KeyHash, NumsSubtree, Signature, SpacesSubtree}; use libveritas::msg::{self, ChainProof, Message}; -use spaces_protocol::sname::{Subname, SName}; use libveritas::{ProvableOption, SovereigntyState, Veritas, Zone, hash_signable_message}; use risc0_zkvm::{FakeReceipt, InnerReceipt, Receipt, ReceiptClaim}; use spacedb::Sha256Hasher; use spacedb::subtree::{ProofType, SubTree, ValueOrHash}; -use spaces_protocol::constants::{ChainAnchor}; +use spaces_nums::num_id::NumId; +use spaces_nums::snumeric::SNumeric; +use spaces_nums::{ + CommitmentKey, CommitmentTipKey, DelegatorKey, FullNumOut, Num, NumOut, NumOutpointKey, + RootAnchor, rolling_hash, +}; +use spaces_protocol::constants::ChainAnchor; use spaces_protocol::hasher::{KeyHasher, OutpointKey, SpaceKey}; use spaces_protocol::slabel::SLabel; +use spaces_protocol::sname::{SName, Subname}; use spaces_protocol::{Covenant, FullSpaceOut, Space, SpaceOut}; -use spaces_nums::num_id::NumId; -use spaces_nums::{CommitmentKey, FullNumOut, Num, NumOut, NumOutpointKey, CommitmentTipKey, RootAnchor, rolling_hash, DelegatorKey}; -use spaces_nums::snumeric::SNumeric; use std::collections::HashMap; use std::str::FromStr; // ───────────────────────────────────────────────────────────────────────────── @@ -52,7 +55,7 @@ pub fn label(s: &str) -> Subname { } pub fn sign_zone(zone: &Zone, keypair: &Keypair) -> Signature { - sign_mesage(&zone.signing_bytes(), &keypair) + sign_mesage(&zone.signing_bytes(), keypair) } pub fn gen_p2tr_spk() -> (ScriptBuf, Keypair) { @@ -227,7 +230,7 @@ impl TestNum { } pub fn id(&self) -> NumId { - self.fso.numout.num.id.clone() + self.fso.numout.num.id } pub fn outpoint_key(&self) -> NumOutpointKey { @@ -269,7 +272,7 @@ impl TestChain { pub fn chain_proof(&self, anchor: &ChainAnchor) -> ChainProof { ChainProof { - anchor: anchor.clone(), + anchor: *anchor, spaces: SpacesSubtree(self.spaces_tree.clone()), nums: NumsSubtree(self.nums_tree.clone()), } @@ -331,7 +334,7 @@ impl TestChain { self.nums_tree .insert(num.id().into(), ValueOrHash::Value(num.outpoint_bytes())) .expect("insert outpoint"); - self.nums.insert(num.id().into(), num.clone()); + self.nums.insert(num.id(), num.clone()); num } @@ -653,7 +656,7 @@ impl TestHandleTree { Message { chain: msg::ChainProof { - anchor: anchor.clone(), + anchor: *anchor, spaces: SpacesSubtree(spaces_proof), nums: NumsSubtree(nums_proof), }, @@ -715,7 +718,7 @@ impl TestHandleTree { Message { chain: msg::ChainProof { - anchor: anchor.clone(), + anchor: *anchor, spaces: SpacesSubtree(spaces_proof), nums: NumsSubtree(nums_proof), }, @@ -743,6 +746,5 @@ impl TestHandleTree { // ───────────────────────────────────────────────────────────────────────────── pub fn veritas_from_anchors(anchors: Vec) -> Veritas { - Veritas::new() - .with_anchors(anchors).expect("valid anchors") + Veritas::new().with_anchors(anchors).expect("valid anchors") } diff --git a/update-elfs.sh b/update-elfs.sh index 3a18d76..bfb4f01 100755 --- a/update-elfs.sh +++ b/update-elfs.sh @@ -59,7 +59,9 @@ cat > "${CONSTANTS_FILE}" << EOF // To update after changing guest programs, run: // ./update-elfs.sh +#[rustfmt::skip] pub const FOLD_ID: [u32; 8] = ${FOLD_U32}; +#[rustfmt::skip] pub const STEP_ID: [u32; 8] = ${STEP_U32}; #[cfg(feature = "elf")] diff --git a/veritas/Cargo.toml b/veritas/Cargo.toml index 107479f..0d354e7 100644 --- a/veritas/Cargo.toml +++ b/veritas/Cargo.toml @@ -1,26 +1,35 @@ [package] name = "libveritas" -version = "0.1.0" -edition = "2024" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +repository.workspace = true +homepage.workspace = true +authors.workspace = true +description = "Offline verification library for Spaces protocol certificates and zone records." +documentation = "https://docs.rs/libveritas" +readme = "../README.md" +keywords = ["spaces", "verification", "merkle", "zk"] +categories = ["cryptography", "data-structures"] [features] elf = [] [dependencies] +libveritas_zk = { workspace = true } spacedb = { workspace = true } -spaces_protocol = {workspace = true} -spaces_nums = {workspace = true} -sip7 = {workspace = true} +spaces_protocol = { workspace = true } +spaces_nums = { workspace = true } +sip7 = { workspace = true } serde = { version = "1.0", features = ["derive"] } base64 = "0.22" risc0-zkvm = { version = "3.0.5", default-features = false, features = ["std"] } borsh = { version = "1.6", features = ["derive", "std"] } hex = "0.4" serde_json = "1.0" -libveritas_zk = {path = "../zk"} [dev-dependencies] -borsh_utils = {workspace = true} -bitcoin = { version = "0.32", default-features = false, features = [ "rand-std"] } -libveritas_testutil = { path = "../testutil" } - +borsh_utils = { workspace = true } +bitcoin = { version = "0.32", default-features = false, features = ["rand-std"] } +libveritas_testutil = { path = "../testutil" } \ No newline at end of file diff --git a/veritas/elfs/fold.bin b/veritas/elfs/fold.bin index 5fc04de..e052397 100644 Binary files a/veritas/elfs/fold.bin and b/veritas/elfs/fold.bin differ diff --git a/veritas/elfs/step.bin b/veritas/elfs/step.bin index 744c5e9..b6f4647 100644 Binary files a/veritas/elfs/step.bin and b/veritas/elfs/step.bin differ diff --git a/veritas/src/builder.rs b/veritas/src/builder.rs index 1b66864..3fa73c5 100644 --- a/veritas/src/builder.rs +++ b/veritas/src/builder.rs @@ -1,12 +1,12 @@ -use std::collections::HashMap; -use sip7::SIG_PRIMARY_ZONE; +use crate::MessageError; use crate::cert::{Certificate, CertificateChain, ChainProofRequestUtils, Witness}; use crate::msg::{ChainProof, Message, UnsignedRecordSet}; use crate::names::NameResolver; -use spaces_protocol::sname::{NameLike, SName}; -use crate::MessageError; +use sip7::SIG_PRIMARY_ZONE; use spaces_nums::ChainProofRequest; use spaces_protocol::slabel::SLabel; +use spaces_protocol::sname::{NameLike, SName}; +use std::collections::HashMap; pub struct DataUpdateRequest { pub handle: SName, @@ -19,6 +19,12 @@ pub struct MessageBuilder { updates: Vec, } +impl Default for MessageBuilder { + fn default() -> Self { + Self::new() + } +} + impl MessageBuilder { pub fn new() -> Self { Self { @@ -81,7 +87,10 @@ impl MessageBuilder { /// Returns the message and unsigned record sets that need signing. /// Call `unsigned.signing_id()` to get the hash, sign it, /// then `unsigned.pack_sig(sig)` and `msg.set_records(canonical, signed)`. - pub fn build(self, chain: ChainProof) -> Result<(Message, Vec), MessageError> { + pub fn build( + self, + chain: ChainProof, + ) -> Result<(Message, Vec), MessageError> { let certs = dedup_root_certs(self.certs, &chain); let resolver = NameResolver::from_certificates(&certs, &chain.nums); let mut msg = Message::try_from_certificates(chain, certs)?; @@ -143,14 +152,20 @@ impl Message { /// Resolve the block height for a root certificate's receipt by looking up /// its commitment in the chain proof's nums tree. fn root_cert_block_height(cert: &Certificate, chain: &ChainProof) -> u32 { - let Some(space) = cert.subject.space() else { return 0 }; + let Some(space) = cert.subject.space() else { + return 0; + }; let receipt = match &cert.witness { Witness::Root { receipt } => receipt.as_ref(), _ => return 0, }; let Some(receipt) = receipt else { return 0 }; - let Ok(zkc) = receipt.journal.decode::() else { return 0 }; - chain.nums.find_commitment(&space, zkc.final_root) + let Ok(zkc) = receipt.journal.decode::() else { + return 0; + }; + chain + .nums + .find_commitment(&space, zkc.final_root) .ok() .flatten() .map(|c| c.block_height) @@ -173,7 +188,9 @@ fn dedup_root_certs(certs: Vec, chain: &ChainProof) -> Vec= height => continue, - _ => { best_roots.insert(space, (cert, height)); } + _ => { + best_roots.insert(space, (cert, height)); + } } } diff --git a/veritas/src/cert.rs b/veritas/src/cert.rs index 2e7bd45..9b5f788 100644 --- a/veritas/src/cert.rs +++ b/veritas/src/cert.rs @@ -1,18 +1,21 @@ -use std::fmt; -use std::io::{Read, Write}; -use spacedb::{Hash, NodeHasher, Sha256Hasher}; -use spacedb::subtree::{SubTree, SubtreeIter}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64}; use borsh::{BorshDeserialize, BorshSerialize}; use risc0_zkvm::Receipt; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use spacedb::subtree::{SubTree, SubtreeIter}; +use spacedb::{Hash, NodeHasher, Sha256Hasher}; +use spaces_nums::num_id::NumId; +use spaces_nums::{ + ChainProofRequest, Commitment, CommitmentKey, CommitmentTipKey, NumKeyKind, NumOut, NumericKey, + snumeric::SNumeric, +}; +use spaces_protocol::SpaceOut; use spaces_protocol::bitcoin::ScriptBuf; use spaces_protocol::hasher::{KeyHasher, OutpointKey}; use spaces_protocol::slabel::SLabel; -use spaces_protocol::SpaceOut; -use spaces_nums::{snumeric::SNumeric, ChainProofRequest, Commitment, CommitmentKey, NumericKey, NumKeyKind, NumOut, CommitmentTipKey}; -use spaces_nums::num_id::NumId; -use spaces_protocol::sname::{Subname, NameLike, SName}; +use spaces_protocol::sname::{NameLike, SName, Subname}; +use std::fmt; +use std::io::{Read, Write}; /// Current certificate version. pub const CERTIFICATE_VERSION: u8 = 0; @@ -76,7 +79,10 @@ impl CertificateChain { use std::io::{Error, ErrorKind}; if bytes.len() < 7 { - return Err(Error::new(ErrorKind::InvalidData, "too short for chain header")); + return Err(Error::new( + ErrorKind::InvalidData, + "too short for chain header", + )); } if &bytes[0..4] != CHAIN_MAGIC { return Err(Error::new(ErrorKind::InvalidData, "invalid magic bytes")); @@ -100,7 +106,10 @@ impl CertificateChain { let mut certs = Vec::with_capacity(count); for _ in 0..count { if offset + 4 > bytes.len() { - return Err(Error::new(ErrorKind::UnexpectedEof, "truncated cert length")); + return Err(Error::new( + ErrorKind::UnexpectedEof, + "truncated cert length", + )); } let len = u32::from_le_bytes([ bytes[offset], @@ -120,7 +129,6 @@ impl CertificateChain { } } - /// Offline certificate for space handle ownership. /// /// Contains data not recoverable from on-chain state: @@ -148,6 +156,7 @@ pub struct Certificate { /// Witness for a certificate, containing only non-recoverable proof data. #[derive(Clone, Serialize, Deserialize)] +#[allow(clippy::large_enum_variant)] // boxing the Receipt would change the wire format pub enum Witness { /// Root certificate for a top-level space. Root { @@ -199,7 +208,10 @@ impl Certificate { pub fn is_temporary(&self) -> bool { matches!( self.witness, - Witness::Leaf { signature: Some(_), .. } + Witness::Leaf { + signature: Some(_), + .. + } ) } @@ -223,7 +235,8 @@ impl Certificate { /// Returns the NumId derived from the genesis script pubkey if this is a leaf certificate. pub fn num_id(&self) -> Option { - self.genesis_spk().map(|spk| NumId::from_spk::(spk.clone())) + self.genesis_spk() + .map(|spk| NumId::from_spk::(spk.clone())) } } @@ -252,7 +265,11 @@ impl ChainProofRequestUtils for ChainProofRequest { // Registry key for commitment tip let registry_key = CommitmentTipKey::from_slabel::(&space); - if !self.nums.iter().any(|k| matches!(k, NumKeyKind::CommitmentTip(r) if *r == registry_key)) { + if !self + .nums + .iter() + .any(|k| matches!(k, NumKeyKind::CommitmentTip(r) if *r == registry_key)) + { self.nums.push(NumKeyKind::CommitmentTip(registry_key)); } @@ -262,18 +279,30 @@ impl ChainProofRequestUtils for ChainProofRequest { if let Some(receipt) = receipt { if let Ok(zkc) = receipt.journal.decode::() { let ck = CommitmentKey::new::(&space, zkc.final_root); - if !self.nums.iter().any(|k| matches!(k, NumKeyKind::Commitment(c) if *c == ck)) { + if !self + .nums + .iter() + .any(|k| matches!(k, NumKeyKind::Commitment(c) if *c == ck)) + { self.nums.push(NumKeyKind::Commitment(ck)); } } } } - Witness::Leaf { genesis_spk, handles, .. } => { + Witness::Leaf { + genesis_spk, + handles, + .. + } => { // Commitment key for epoch root (only if tree is non-empty) if !handles.0.is_empty() { if let Ok(root) = handles.compute_root() { let ck = CommitmentKey::new::(&space, root); - if !self.nums.iter().any(|k| matches!(k, NumKeyKind::Commitment(c) if *c == ck)) { + if !self + .nums + .iter() + .any(|k| matches!(k, NumKeyKind::Commitment(c) if *c == ck)) + { self.nums.push(NumKeyKind::Commitment(ck)); } } @@ -281,7 +310,11 @@ impl ChainProofRequestUtils for ChainProofRequest { // NumId key for key rotation lookup let num_id = NumId::from_spk::(genesis_spk.clone()); - if !self.nums.iter().any(|k| matches!(k, NumKeyKind::Id(s) if *s == num_id)) { + if !self + .nums + .iter() + .any(|k| matches!(k, NumKeyKind::Id(s) if *s == num_id)) + { self.nums.push(NumKeyKind::Id(num_id)); } } @@ -289,7 +322,7 @@ impl ChainProofRequestUtils for ChainProofRequest { } /// Build from an iterator of certificates. - fn from_certificates<'a>(certs: impl Iterator) -> Self { + fn from_certificates<'a>(certs: impl Iterator) -> Self { let mut req = Self { spaces: vec![], nums: vec![], @@ -309,7 +342,11 @@ impl ChainProofRequestUtils for ChainProofRequest { // Registry key for commitment tip let registry_key = CommitmentTipKey::from_slabel::(space); - if !self.nums.iter().any(|k| matches!(k, NumKeyKind::CommitmentTip(r) if *r == registry_key)) { + if !self + .nums + .iter() + .any(|k| matches!(k, NumKeyKind::CommitmentTip(r) if *r == registry_key)) + { self.nums.push(NumKeyKind::CommitmentTip(registry_key)); } @@ -320,7 +357,11 @@ impl ChainProofRequestUtils for ChainProofRequest { // Commitment key for subtree root if let Ok(root) = handles.compute_root() { let ck = CommitmentKey::new::(space, root); - if !self.nums.iter().any(|k| matches!(k, NumKeyKind::Commitment(c) if *c == ck)) { + if !self + .nums + .iter() + .any(|k| matches!(k, NumKeyKind::Commitment(c) if *c == ck)) + { self.nums.push(NumKeyKind::Commitment(ck)); } } @@ -329,7 +370,11 @@ impl ChainProofRequestUtils for ChainProofRequest { for (_, value) in handles.0.iter() { if let Ok(handle_out) = HandleOut::from_slice(value) { let num_id = NumId::from_spk::(handle_out.spk); - if !self.nums.iter().any(|k| matches!(k, NumKeyKind::Id(s) if *s == num_id)) { + if !self + .nums + .iter() + .any(|k| matches!(k, NumKeyKind::Id(s) if *s == num_id)) + { self.nums.push(NumKeyKind::Id(num_id)); } } @@ -339,7 +384,11 @@ impl ChainProofRequestUtils for ChainProofRequest { fn add_space(&mut self, space: SLabel) { if space.is_numeric() { let numeric: SNumeric = space.try_into().expect("valid numeric"); - if !self.nums.iter().any(|k| matches!(k, NumKeyKind::Num(n) if *n == numeric)) { + if !self + .nums + .iter() + .any(|k| matches!(k, NumKeyKind::Num(n) if *n == numeric)) + { self.nums.push(NumKeyKind::Num(numeric)); } return; @@ -355,7 +404,7 @@ impl ChainProofRequestUtils for ChainProofRequest { } fn add_numeric(&mut self, numeric: SNumeric) { - self.nums.push(NumKeyKind::Num(numeric)); + self.nums.push(NumKeyKind::Num(numeric)); } } @@ -457,16 +506,22 @@ impl HandleSubtree { &mut self.0 } - pub fn contains_subspace(&self, label: &Subname, genesis_spk: &ScriptBuf) -> Result { + pub fn contains_subspace( + &self, + label: &Subname, + genesis_spk: &ScriptBuf, + ) -> Result { let key = Sha256Hasher::hash(label.as_slabel().as_ref()); if !self.0.contains(&key)? { return Ok(false); } - let matches = self.0.iter() - .any(|(k, v)| *k == key && HandleOut::from_slice(v) - .is_ok_and(|h| h.spk.as_bytes() == genesis_spk.as_bytes())); + let matches = self.0.iter().any(|(k, v)| { + *k == key + && HandleOut::from_slice(v) + .is_ok_and(|h| h.spk.as_bytes() == genesis_spk.as_bytes()) + }); Ok(matches) } } @@ -588,7 +643,11 @@ impl NumsSubtree { /// - `Ok(true)` if the commitment tip for this space matches state_root /// - `Ok(false)` if the commitment tip exists but doesn't match /// - `Err` if the tip cannot be proven - pub fn is_latest_commitment(&self, space: &SLabel, state_root: Hash) -> Result { + pub fn is_latest_commitment( + &self, + space: &SLabel, + state_root: Hash, + ) -> Result { let key: Hash = CommitmentTipKey::from_slabel::(space).into(); // Find the commitment tip entry @@ -629,7 +688,7 @@ impl NumsSubtree { } } - let numeric : Hash = NumericKey::from_numeric::(numeric).into(); + let numeric: Hash = NumericKey::from_numeric::(numeric).into(); // Not found in UTXOs - verify the num provably doesn't exist. // If contains() returns true, the proof is incomplete (has key but missing UTXO). @@ -672,7 +731,11 @@ impl NumsSubtree { Ok(None) } - pub fn find_commitment(&self, space: &SLabel, commitment_root: Hash) -> Result, SubtreeError> { + pub fn find_commitment( + &self, + space: &SLabel, + commitment_root: Hash, + ) -> Result, SubtreeError> { let ck = CommitmentKey::new::(space, commitment_root); let key: Hash = ck.into(); @@ -680,11 +743,15 @@ impl NumsSubtree { if !self.0.contains(&key)? { return Ok(None); } - let (_, data) = self.0.iter().find(|(k, _)| **k == key) + let (_, data) = self + .0 + .iter() + .find(|(k, _)| **k == key) .expect("commitment must be found after checking with contains"); - let v: Commitment = borsh::from_slice(data) - .map_err(|e| SubtreeError::DecodeFailed { reason: e.to_string() })?; + let v: Commitment = borsh::from_slice(data).map_err(|e| SubtreeError::DecodeFailed { + reason: e.to_string(), + })?; Ok(Some(v)) } @@ -740,7 +807,7 @@ impl Iterator for SpacesIter<'_> { if OutpointKey::is_valid(k) { let result = borsh::from_slice::(v.as_slice()) .ok() - .map(|raw| SpacesValue::UTXO(raw)); + .map(SpacesValue::UTXO); return (*k, result.unwrap_or(SpacesValue::Unknown(v.clone()))); } (*k, SpacesValue::Unknown(v.clone())) @@ -748,7 +815,6 @@ impl Iterator for SpacesIter<'_> { } } - // Serde implementations for subtree types (uses SubTreeEncoder for wire format) impl Serialize for SpacesSubtree { @@ -787,9 +853,13 @@ impl<'de> Deserialize<'de> for HandleSubtree { } } -fn serialize_subtree(subtree: &SubTree, serializer: S) -> Result { +fn serialize_subtree( + subtree: &SubTree, + serializer: S, +) -> Result { use serde::ser::Error; - let buf = subtree.to_vec() + let buf = subtree + .to_vec() .map_err(|e| S::Error::custom(format!("SubTree encode error: {}", e)))?; if serializer.is_human_readable() { @@ -800,19 +870,21 @@ fn serialize_subtree(subtree: &SubTree, serializer: } } -fn deserialize_subtree<'de, D: Deserializer<'de>>(deserializer: D) -> Result, D::Error> { +fn deserialize_subtree<'de, D: Deserializer<'de>>( + deserializer: D, +) -> Result, D::Error> { use serde::de::Error; let buf = if deserializer.is_human_readable() { let encoded = ::deserialize(deserializer)?; - BASE64.decode(&encoded) + BASE64 + .decode(&encoded) .map_err(|e| D::Error::custom(format!("base64 decode error: {}", e)))? } else { as Deserialize>::deserialize(deserializer)? }; - SubTree::from_slice(&buf) - .map_err(|e| D::Error::custom(format!("SubTreeEncoder error: {}", e))) + SubTree::from_slice(&buf).map_err(|e| D::Error::custom(format!("SubTreeEncoder error: {}", e))) } // Manual Borsh implementations for Certificate and CertificateWitness @@ -831,7 +903,11 @@ impl BorshDeserialize for Certificate { let version = u8::deserialize_reader(reader)?; let subject = SName::deserialize_reader(reader)?; let witness = Witness::deserialize_reader(reader)?; - Ok(Certificate { version, subject, witness }) + Ok(Certificate { + version, + subject, + witness, + }) } } @@ -842,7 +918,11 @@ impl BorshSerialize for Witness { BorshSerialize::serialize(&0u8, writer)?; BorshSerialize::serialize(receipt, writer) } - Witness::Leaf { genesis_spk, handles, signature } => { + Witness::Leaf { + genesis_spk, + handles, + signature, + } => { BorshSerialize::serialize(&1u8, writer)?; BorshSerialize::serialize(&genesis_spk.as_bytes().to_vec(), writer)?; BorshSerialize::serialize(handles, writer)?; @@ -865,7 +945,11 @@ impl BorshDeserialize for Witness { let genesis_spk = ScriptBuf::from_bytes(spk_bytes); let handles = HandleSubtree::deserialize_reader(reader)?; let signature = Option::::deserialize_reader(reader)?; - Ok(Witness::Leaf { genesis_spk, handles, signature }) + Ok(Witness::Leaf { + genesis_spk, + handles, + signature, + }) } _ => Err(std::io::Error::new( std::io::ErrorKind::InvalidData, @@ -908,4 +992,4 @@ impl From for SubtreeError { reason: e.to_string(), } } -} \ No newline at end of file +} diff --git a/veritas/src/constants.rs b/veritas/src/constants.rs index 82b7019..87be798 100644 --- a/veritas/src/constants.rs +++ b/veritas/src/constants.rs @@ -5,8 +5,10 @@ // To update after changing guest programs, run: // ./update-elfs.sh -pub const FOLD_ID: [u32; 8] = [2137388158, 139300334, 1332819426, 2098328572, 332487338, 683648994, 3447504890, 2081197365]; -pub const STEP_ID: [u32; 8] = [3974921952, 1965078643, 1566874199, 1666253710, 1661334525, 217836664, 127468841, 245176993]; +#[rustfmt::skip] +pub const FOLD_ID: [u32; 8] = [3538164873, 3494660837, 1605885420, 2756930862, 1952720968, 91802116, 3635727049, 436347682]; +#[rustfmt::skip] +pub const STEP_ID: [u32; 8] = [2719979593, 62333512, 1158600685, 3512173834, 1442236244, 869560259, 553115519, 3467999922]; #[cfg(feature = "elf")] pub const FOLD_ELF: &[u8] = include_bytes!("../elfs/fold.bin"); diff --git a/veritas/src/lib.rs b/veritas/src/lib.rs index dcbe2d2..45a040e 100644 --- a/veritas/src/lib.rs +++ b/veritas/src/lib.rs @@ -1,26 +1,49 @@ -use crate::cert::{Certificate, KeyHash, Witness, Signature}; +//! Offline verification library for the [Spaces protocol](https://spacesprotocol.org). +//! +//! `libveritas` verifies space handle ownership and zone records against on-chain +//! anchors using ZK receipts and Merkle proofs. It is the verifier counterpart to +//! the Spaces fabric / relay infrastructure. +//! +//! # Quick start +//! +//! ```ignore +//! use libveritas::{Veritas, msg::QueryContext}; +//! +//! let veritas = Veritas::new().with_anchors(anchors)?; +//! let result = veritas.verify(&QueryContext::new(), message)?; +//! for zone in &result.zones { +//! // ... +//! } +//! ``` +//! +//! # Features +//! +//! - `elf` — embed the prover ELF binaries (`FOLD_ELF`, `STEP_ELF`) alongside +//! the image IDs. Verifiers only need the image IDs and can skip this feature. + +use crate::cert::{Certificate, KeyHash, Signature, Witness}; use borsh::{BorshDeserialize, BorshSerialize}; use libveritas_zk::guest::CommitmentKind; use risc0_zkvm::{Receipt, VerifierContext}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use spacedb::subtree::SubTree; use spacedb::{Hash, NodeHasher, Sha256Hasher}; -use spaces_protocol::bitcoin::hashes::{Hash as HashUtil, sha256, HashEngine}; +use spaces_nums::RootAnchor; +use spaces_nums::constants::COMMITMENT_FINALITY_INTERVAL; +use spaces_protocol::bitcoin::ScriptBuf; +use spaces_protocol::bitcoin::hashes::{Hash as HashUtil, HashEngine, sha256}; use spaces_protocol::bitcoin::secp256k1::{self, XOnlyPublicKey}; -use spaces_protocol::bitcoin::{ScriptBuf}; -use spaces_protocol::sname::{SName}; use spaces_protocol::constants::SPACES_SIGNED_MSG_PREFIX; use spaces_protocol::slabel::SLabel; -use spaces_nums::constants::COMMITMENT_FINALITY_INTERVAL; +use spaces_protocol::sname::SName; use std::collections::HashSet; use std::fmt; use std::io::{Read, Write}; -use spacedb::subtree::SubTree; -use spaces_nums::RootAnchor; +pub mod builder; pub mod cert; -pub mod msg; pub mod constants; -pub mod builder; +pub mod msg; pub mod names; pub use sip7; @@ -186,7 +209,6 @@ pub struct Zone { pub num_id: Option, } - /// Information about a space's commitment state. #[derive(Clone, Serialize, Deserialize)] pub struct CommitmentInfo { @@ -203,7 +225,8 @@ pub struct CommitmentInfo { impl CommitmentInfo { pub fn empty() -> Self { let empty_root = SubTree::::empty() - .compute_root().expect("valid"); + .compute_root() + .expect("valid"); Self { onchain: spaces_nums::Commitment { state_root: empty_root, @@ -290,7 +313,10 @@ impl BorshDeserialize for CommitmentInfo { fn deserialize_reader(reader: &mut R) -> std::io::Result { let onchain = spaces_nums::Commitment::deserialize_reader(reader)?; let receipt_hash = Option::::deserialize_reader(reader)?; - Ok(CommitmentInfo { onchain, receipt_hash }) + Ok(CommitmentInfo { + onchain, + receipt_hash, + }) } } @@ -352,7 +378,8 @@ impl BorshDeserialize for Zone { let fallback_bytes: Vec = Vec::deserialize_reader(reader)?; let records_bytes: Vec = Vec::deserialize_reader(reader)?; let delegate: ProvableOption = ProvableOption::deserialize_reader(reader)?; - let commitment: ProvableOption = ProvableOption::deserialize_reader(reader)?; + let commitment: ProvableOption = + ProvableOption::deserialize_reader(reader)?; let script_pubkey = ScriptBuf::from_bytes(spk_bytes); let num_id = Option::::deserialize_reader(reader)?; @@ -415,9 +442,12 @@ pub fn hash_signable_message(msg: &[u8]) -> secp256k1::Message { /// - `msg`: the raw message bytes (will be prefixed and hashed internally) /// - `signature`: 64-byte Schnorr signature /// - `pubkey`: 32-byte x-only public key -pub fn verify_spaces_message(msg: &[u8], signature: &[u8; 64], pubkey: &[u8; 32]) -> Result<(), SignatureError> { - let xonly = XOnlyPublicKey::from_slice(pubkey) - .map_err(|_| SignatureError::InvalidPublicKey)?; +pub fn verify_spaces_message( + msg: &[u8], + signature: &[u8; 64], + pubkey: &[u8; 32], +) -> Result<(), SignatureError> { + let xonly = XOnlyPublicKey::from_slice(pubkey).map_err(|_| SignatureError::InvalidPublicKey)?; let sig = secp256k1::schnorr::Signature::from_slice(signature) .map_err(|_| SignatureError::InvalidSignature)?; let hashed = hash_signable_message(msg); @@ -431,9 +461,12 @@ pub fn verify_spaces_message(msg: &[u8], signature: &[u8; 64], pubkey: &[u8; 32] /// - `msg_hash`: 32-byte SHA256 hash of the message /// - `signature`: 64-byte Schnorr signature /// - `pubkey`: 32-byte x-only public key -pub fn verify_schnorr(msg_hash: &[u8; 32], signature: &[u8; 64], pubkey: &[u8; 32]) -> Result<(), SignatureError> { - let xonly = XOnlyPublicKey::from_slice(pubkey) - .map_err(|_| SignatureError::InvalidPublicKey)?; +pub fn verify_schnorr( + msg_hash: &[u8; 32], + signature: &[u8; 64], + pubkey: &[u8; 32], +) -> Result<(), SignatureError> { + let xonly = XOnlyPublicKey::from_slice(pubkey).map_err(|_| SignatureError::InvalidPublicKey)?; let sig = secp256k1::schnorr::Signature::from_slice(signature) .map_err(|_| SignatureError::InvalidSignature)?; let msg = secp256k1::Message::from_digest(*msg_hash); @@ -482,7 +515,11 @@ impl Zone { /// /// The message is the borsh-serialized zone data (with anchor zeroed), /// prefixed with the spaces signed message prefix and hashed with SHA256. - pub fn verify_signature(&self, signature: &Signature, signer: &ScriptBuf) -> Result<(), SignatureError> { + pub fn verify_signature( + &self, + signature: &Signature, + signer: &ScriptBuf, + ) -> Result<(), SignatureError> { let script_bytes = signer.as_bytes(); if script_bytes.len() != secp256k1::constants::SCHNORR_PUBLIC_KEY_SIZE + 2 { return Err(SignatureError::InvalidPublicKey); @@ -499,7 +536,6 @@ impl Zone { .map_err(|_| SignatureError::VerificationFailed) } - /// Returns true if self is fresher/better than other. /// /// Comparison order: @@ -520,10 +556,10 @@ impl Zone { // Higher commitment height = newer committed state match (&self.commitment, &other.commitment) { - (ProvableOption::Exists { value: a }, ProvableOption::Exists { value: b }) => { - if a.onchain.block_height != b.onchain.block_height { - return Ok(a.onchain.block_height > b.onchain.block_height); - } + (ProvableOption::Exists { value: a }, ProvableOption::Exists { value: b }) + if a.onchain.block_height != b.onchain.block_height => + { + return Ok(a.onchain.block_height > b.onchain.block_height); } (ProvableOption::Exists { .. }, _) => return Ok(true), (_, ProvableOption::Exists { .. }) => return Ok(false), @@ -534,17 +570,25 @@ impl Zone { // Delegate knowledge match (&self.delegate, &other.delegate) { - (ProvableOption::Exists { value: a }, ProvableOption::Exists { value: b }) => { - if !a.records.is_empty() || !b.records.is_empty() { - if a.records.is_empty() { return Ok(false); } - if b.records.is_empty() { return Ok(true); } - if records_is_better(&a.records, &b.records) { - return Ok(true); - } + (ProvableOption::Exists { value: a }, ProvableOption::Exists { value: b }) + if (!a.records.is_empty() || !b.records.is_empty()) => + { + if a.records.is_empty() { + return Ok(false); + } + if b.records.is_empty() { + return Ok(true); + } + if records_is_better(&a.records, &b.records) { + return Ok(true); } } - (ProvableOption::Exists { .. }, ProvableOption::Empty | ProvableOption::Unknown) => return Ok(true), - (ProvableOption::Empty | ProvableOption::Unknown, ProvableOption::Exists { .. }) => return Ok(false), + (ProvableOption::Exists { .. }, ProvableOption::Empty | ProvableOption::Unknown) => { + return Ok(true); + } + (ProvableOption::Empty | ProvableOption::Unknown, ProvableOption::Exists { .. }) => { + return Ok(false); + } (ProvableOption::Empty, ProvableOption::Unknown) => return Ok(true), (ProvableOption::Unknown, ProvableOption::Empty) => return Ok(false), _ => {} @@ -552,8 +596,12 @@ impl Zone { // Higher records seq = newer owner-signed records if !self.records.is_empty() || !other.records.is_empty() { - if self.records.is_empty() { return Ok(false); } - if other.records.is_empty() { return Ok(true); } + if self.records.is_empty() { + return Ok(false); + } + if other.records.is_empty() { + return Ok(true); + } if records_is_better(&self.records, &other.records) { return Ok(true); } @@ -570,16 +618,15 @@ impl Zone { /// Copy receipt_hash from other if commitment roots match. /// Avoids re-verifying ZK receipts for commitments we've already verified. pub fn update_receipt_cache(&mut self, other: &Self) { - if let ( - ProvableOption::Exists { value: mine }, - ProvableOption::Exists { value: theirs }, - ) = (&mut self.commitment, &other.commitment) { + if let (ProvableOption::Exists { value: mine }, ProvableOption::Exists { value: theirs }) = + (&mut self.commitment, &other.commitment) + { if mine.onchain.state_root == theirs.onchain.state_root && mine.receipt_hash.is_none() { mine.receipt_hash = theirs.receipt_hash; } } } - + /// Returns true if the zone has a commitment that requires ZK verification. /// /// Returns false if: @@ -593,9 +640,7 @@ impl Zone { if value.receipt_hash.is_some() { return None; } - if value.onchain.prev_root.is_none() { - return None; - } + value.onchain.prev_root?; Some(value) } _ => None, @@ -603,7 +648,12 @@ impl Zone { } } -fn verify_receipt(ci: &mut CommitmentInfo, space: &SLabel, receipt: &Receipt, options: u32) -> Result<(), MessageError> { +fn verify_receipt( + ci: &mut CommitmentInfo, + space: &SLabel, + receipt: &Receipt, + options: u32, +) -> Result<(), MessageError> { let space_str = space.to_string(); let zkc = decode_journal(receipt, space)?; verify_zk_journal_matches_onchain(space, &zkc, &ci.onchain)?; @@ -690,6 +740,12 @@ impl fmt::Display for AnchorError { impl std::error::Error for AnchorError {} +impl Default for Veritas { + fn default() -> Self { + Self::new() + } +} + impl Veritas { pub fn new() -> Self { Veritas { @@ -738,7 +794,11 @@ impl Veritas { } /// Verify a message with default options. - pub fn verify(&self, ctx: &msg::QueryContext, msg: crate::msg::Message) -> Result { + pub fn verify( + &self, + ctx: &msg::QueryContext, + msg: crate::msg::Message, + ) -> Result { self.verify_with_options(ctx, msg, VERIFY_DEFAULT) } @@ -783,7 +843,6 @@ impl Veritas { }) } - fn verify_bundle( &self, ctx: &msg::QueryContext, @@ -805,19 +864,23 @@ impl Veritas { (Some(cached), Some(zone)) => { zone.update_receipt_cache(cached); if zone.is_better_than(cached).unwrap_or(false) { - receipt_verified = maybe_verify_receipt(zone, bundle.receipt.as_ref(), &space, options)?; + receipt_verified = + maybe_verify_receipt(zone, bundle.receipt.as_ref(), &space, options)?; zone } else { - *cached + cached } } - (Some(cached), None) => *cached, + (Some(cached), None) => cached, (None, Some(zone)) => { - receipt_verified = maybe_verify_receipt(zone, bundle.receipt.as_ref(), &space, options)?; + receipt_verified = + maybe_verify_receipt(zone, bundle.receipt.as_ref(), &space, options)?; zone } (None, None) => { - return Err(MessageError::ParentZoneRequired { space: space.to_string() }); + return Err(MessageError::ParentZoneRequired { + space: space.to_string(), + }); } }; @@ -834,7 +897,11 @@ impl Veritas { let verified_bundle = if wants_root { Some(msg::Bundle { subject: space, - receipt: if receipt_verified { bundle.receipt } else { None }, + receipt: if receipt_verified { + bundle.receipt + } else { + None + }, epochs: vec![], records: bundle.records, delegate_records: bundle.delegate_records, @@ -850,11 +917,14 @@ impl Veritas { let mut verified_epochs: Vec = Vec::new(); for epoch in bundle.epochs { - let root = epoch.tree.compute_root() - .map_err(|e| MessageError::HandleProofMalformed { - handle: format!("*@{}", space), - reason: e.to_string(), - })?; + let root = + epoch + .tree + .compute_root() + .map_err(|e| MessageError::HandleProofMalformed { + handle: format!("*@{}", space), + reason: e.to_string(), + })?; if checked.contains(&root) { return Err(MessageError::DuplicateEpoch { @@ -868,15 +938,21 @@ impl Veritas { let sovereignty = if epoch.tree.0.is_empty() { SovereigntyState::Dependent } else { - let onchain = chain.nums.find_commitment(&space, root) - .map_err(|e| MessageError::NumsProofMalformed { reason: e.to_string() })? + let onchain = chain + .nums + .find_commitment(&space, root) + .map_err(|e| MessageError::NumsProofMalformed { + reason: e.to_string(), + })? .ok_or_else(|| MessageError::CommitmentNotFound { space: space.to_string(), root, })?; if onchain.block_height > verified_tip.onchain.block_height { - return Err(MessageError::EpochExceedsTip { space: space.to_string() }); + return Err(MessageError::EpochExceedsTip { + space: space.to_string(), + }); } self.sovereignty_for(onchain.block_height) @@ -885,10 +961,11 @@ impl Veritas { let mut verified_handles: Vec = Vec::new(); for handle in epoch.handles { - let subject = SName::join(&handle.name, &space) - .map_err(|_| MessageError::InvalidSubject { + let subject = SName::join(&handle.name, &space).map_err(|_| { + MessageError::InvalidSubject { subject: format!("{}@{}", handle.name, space), - })?; + } + })?; if !ctx.wants(&subject) { continue; @@ -899,12 +976,25 @@ impl Veritas { return Err(MessageError::TemporaryRequiresTip { handle: subject.to_string(), tip: verified_tip.onchain.state_root, - got: root + got: root, }); } - verify_temporary_handle(chain.anchor.height, &handle, &subject, &epoch.tree, target_zone)? + verify_temporary_handle( + chain.anchor.height, + &handle, + &subject, + &epoch.tree, + target_zone, + )? } else { - verify_final_handle(chain.anchor.height, &handle, &subject, &epoch.tree, &chain.nums, sovereignty)? + verify_final_handle( + chain.anchor.height, + &handle, + &subject, + &epoch.tree, + &chain.nums, + sovereignty, + )? }; push_best_zone(ctx, &mut zones, zone); @@ -923,7 +1013,11 @@ impl Veritas { let verified_bundle = if wants_root || !verified_epochs.is_empty() { Some(msg::Bundle { subject: space, - receipt: if receipt_verified { bundle.receipt } else { None }, + receipt: if receipt_verified { + bundle.receipt + } else { + None + }, epochs: verified_epochs, records: bundle.records, delegate_records: bundle.delegate_records, @@ -935,10 +1029,7 @@ impl Veritas { Ok((zones, verified_bundle)) } - fn check_msg_anchor( - &self, - msg: &crate::msg::Message, - ) -> Result { + fn check_msg_anchor(&self, msg: &crate::msg::Message) -> Result { let height = msg.chain.anchor.height; if height < self.oldest_anchor { @@ -954,7 +1045,8 @@ impl Veritas { }); } - let anchor = self.find_by_anchor(height) + let anchor = self + .find_by_anchor(height) .ok_or(MessageError::NoAnchorAtHeight { anchor: height })? .clone(); @@ -974,12 +1066,14 @@ impl Veritas { msg: &crate::msg::Message, anchor: &RootAnchor, ) -> Result<(), MessageError> { - let spaces_root = msg.chain.spaces - .compute_root() - .map_err(|_| MessageError::SpacesRootMismatch { - expected: anchor.spaces_root, - got: [0u8; 32], - })?; + let spaces_root = + msg.chain + .spaces + .compute_root() + .map_err(|_| MessageError::SpacesRootMismatch { + expected: anchor.spaces_root, + got: [0u8; 32], + })?; if spaces_root != anchor.spaces_root { return Err(MessageError::SpacesRootMismatch { @@ -989,12 +1083,14 @@ impl Veritas { } if let Some(expected) = anchor.nums_root { - let nums_root = msg.chain.nums - .compute_root() - .map_err(|_| MessageError::NumsRootMismatch { - expected: Some(expected), - got: [0u8; 32], - })?; + let nums_root = + msg.chain + .nums + .compute_root() + .map_err(|_| MessageError::NumsRootMismatch { + expected: Some(expected), + got: [0u8; 32], + })?; if nums_root != expected { return Err(MessageError::NumsRootMismatch { @@ -1025,28 +1121,44 @@ impl Veritas { } /// Extract parent zone from chain proofs and set sovereignty based on commitment finality. - fn extract_parent_zone(&self, chain: &msg::ChainProof, bundle: &msg::Bundle) -> Result, MessageError> { + fn extract_parent_zone( + &self, + chain: &msg::ChainProof, + bundle: &msg::Bundle, + ) -> Result, MessageError> { let mut num_id = None; let (spk, records) = if !bundle.subject.is_numeric() { let Some(spaceout) = chain.spaces.find_space(&bundle.subject) else { - return Err(MessageError::SpaceNotFound { space: bundle.subject.to_string() }) + return Err(MessageError::SpaceNotFound { + space: bundle.subject.to_string(), + }); }; let Some(space) = spaceout.space else { - return Err(MessageError::SpaceNotFound { space: bundle.subject.to_string() }); + return Err(MessageError::SpaceNotFound { + space: bundle.subject.to_string(), + }); }; - let data = space.data() + let data = space + .data() .filter(|d| !d.is_empty()) .map(|d| sip7::RecordSet::new(d.to_vec())) .unwrap_or_default(); (spaceout.script_pubkey, data) } else { - let Some(numout) = chain.nums + let Some(numout) = chain + .nums .find_numeric(&bundle.subject.clone().try_into().expect("numeric")) - .ok().flatten() else { - return Err(MessageError::NumericNotFound { numeric: bundle.subject.to_string() }) + .ok() + .flatten() + else { + return Err(MessageError::NumericNotFound { + numeric: bundle.subject.to_string(), + }); }; num_id = Some(numout.num.id); - let data = numout.num.data + let data = numout + .num + .data .filter(|d| !d.is_empty()) .map(|d| sip7::RecordSet::new(d.to_vec())) .unwrap_or_default(); @@ -1071,11 +1183,12 @@ impl Veritas { // Verify records signature if present if let Some(records) = &bundle.records { - msg::verify_records(records, &z.script_pubkey, &z.canonical) - .map_err(|e| MessageError::RecordsInvalid { + msg::verify_records(records, &z.script_pubkey, &z.canonical).map_err(|e| { + MessageError::RecordsInvalid { handle: z.handle.to_string(), reason: e.to_string(), - })?; + } + })?; z.records = records.clone(); } @@ -1096,7 +1209,9 @@ impl Veritas { z.delegate = ProvableOption::Exists { value: Delegate { script_pubkey: delegate.script_pubkey, - fallback_records: delegate.num.data + fallback_records: delegate + .num + .data .filter(|d| !d.is_empty()) .map(|d| sip7::RecordSet::new(d.to_vec())) .unwrap_or_default(), @@ -1118,7 +1233,7 @@ impl Veritas { value: CommitmentInfo { onchain: commitment, receipt_hash: None, - } + }, }; } } @@ -1138,32 +1253,38 @@ fn verify_temporary_handle( parent_zone: &Zone, ) -> Result { // Empty tree = nothing exists, otherwise check exclusion - let exists = !epoch_tree.0.is_empty() && epoch_tree - .contains_subspace(&handle.name, &handle.genesis_spk) - .map_err(|e| MessageError::HandleProofMalformed { - handle: subject.to_string(), - reason: e.to_string(), - })?; + let exists = !epoch_tree.0.is_empty() + && epoch_tree + .contains_subspace(&handle.name, &handle.genesis_spk) + .map_err(|e| MessageError::HandleProofMalformed { + handle: subject.to_string(), + reason: e.to_string(), + })?; if exists { - return Err(MessageError::HandleAlreadyExists { handle: subject.to_string() }); + return Err(MessageError::HandleAlreadyExists { + handle: subject.to_string(), + }); } let signer = match &parent_zone.delegate { ProvableOption::Exists { value: delegate } => &delegate.script_pubkey, ProvableOption::Empty => &parent_zone.script_pubkey, ProvableOption::Unknown => { - return Err(MessageError::ParentDelegateUnknown { handle: subject.to_string() }); + return Err(MessageError::ParentDelegateUnknown { + handle: subject.to_string(), + }); } }; let mut verified_records = sip7::RecordSet::default(); if let Some(records) = &handle.records { - msg::verify_records(records, &handle.genesis_spk, &subject) - .map_err(|e| MessageError::RecordsInvalid { + msg::verify_records(records, &handle.genesis_spk, subject).map_err(|e| { + MessageError::RecordsInvalid { handle: subject.to_string(), reason: e.to_string(), - })?; + } + })?; verified_records = records.clone(); } @@ -1182,13 +1303,11 @@ fn verify_temporary_handle( num_id, }; - zone.verify_signature( - handle.signature.as_ref().unwrap(), - signer, - ).map_err(|e| MessageError::SignatureInvalid { - handle: zone.handle.to_string(), - reason: e.to_string(), - })?; + zone.verify_signature(handle.signature.as_ref().unwrap(), signer) + .map_err(|e| MessageError::SignatureInvalid { + handle: zone.handle.to_string(), + reason: e.to_string(), + })?; Ok(zone) } @@ -1203,7 +1322,9 @@ fn verify_final_handle( sovereignty: SovereigntyState, ) -> Result { if epoch_tree.0.is_empty() { - return Err(MessageError::FinalCertRequiresTree { handle: subject.to_string() }); + return Err(MessageError::FinalCertRequiresTree { + handle: subject.to_string(), + }); } let included = epoch_tree @@ -1214,34 +1335,44 @@ fn verify_final_handle( })?; if !included { - return Err(MessageError::HandleNotFound { handle: subject.to_string() }); + return Err(MessageError::HandleNotFound { + handle: subject.to_string(), + }); } // Key rotation lookup - let numout = nums - .find_num(&handle.genesis_spk) - .map_err(|e| MessageError::NumsProofMalformed { reason: e.to_string() })?; + let numout = + nums.find_num(&handle.genesis_spk) + .map_err(|e| MessageError::NumsProofMalformed { + reason: e.to_string(), + })?; let (num_id, spk, onchain_data, alias) = match numout { Some(numout) => ( numout.num.id, numout.script_pubkey, - numout.num.data + numout + .num + .data .filter(|d| !d.is_empty()) .map(|d| sip7::RecordSet::new(d.to_vec())) .unwrap_or_default(), - Some(numout.num.name.to_slabel()) + Some(numout.num.name.to_slabel()), + ), + None => ( + NumId::from_spk::(handle.genesis_spk.clone()), + handle.genesis_spk.clone(), + sip7::RecordSet::default(), + None, ), - None => (NumId::from_spk::(handle.genesis_spk.clone()), handle.genesis_spk.clone(), sip7::RecordSet::default(), None), }; let mut verified_records = sip7::RecordSet::default(); if let Some(records) = &handle.records { - msg::verify_records(records, &spk, &subject) - .map_err(|e| MessageError::RecordsInvalid { - handle: subject.to_string(), - reason: e.to_string(), - })?; + msg::verify_records(records, &spk, subject).map_err(|e| MessageError::RecordsInvalid { + handle: subject.to_string(), + reason: e.to_string(), + })?; verified_records = records.clone(); } @@ -1272,7 +1403,11 @@ pub enum MessageError { /// No anchor exists at this height NoAnchorAtHeight { anchor: u32 }, /// Anchor hash doesn't match our known anchor at this height - AnchorHashMismatch { height: u32, expected: Hash, got: Hash }, + AnchorHashMismatch { + height: u32, + expected: Hash, + got: Hash, + }, /// Duplicate space in message bundles DuplicateSpace { space: String }, /// Receipt journal could not be decoded @@ -1302,7 +1437,11 @@ pub enum MessageError { /// Subject name is invalid InvalidSubject { subject: String }, /// Temporary certificate must prove against the tip state - TemporaryRequiresTip { handle: String, tip: Hash, got: Hash }, + TemporaryRequiresTip { + handle: String, + tip: Hash, + got: Hash, + }, /// Handle already exists when exclusion proof expected HandleAlreadyExists { handle: String }, /// Parent delegate is unknown, cannot verify signature @@ -1335,11 +1474,17 @@ impl fmt::Display for MessageError { Self::NoAnchorAtHeight { anchor } => { write!(f, "no anchor at height {}", anchor) } - Self::AnchorHashMismatch { height, expected, got } => { + Self::AnchorHashMismatch { + height, + expected, + got, + } => { write!( f, "anchor hash mismatch at {}: expected {}, got {}", - height, hex::encode(expected), hex::encode(got) + height, + hex::encode(expected), + hex::encode(got) ) } Self::DuplicateSpace { space } => { @@ -1355,7 +1500,8 @@ impl fmt::Display for MessageError { write!( f, "spaces root mismatch: expected {}, got {}", - hex::encode(expected), hex::encode(got) + hex::encode(expected), + hex::encode(got) ) } Self::NumsRootMismatch { expected, got } => { @@ -1373,7 +1519,12 @@ impl fmt::Display for MessageError { write!(f, "numeric space {} not found in proof", numeric) } Self::CommitmentNotFound { space, root } => { - write!(f, "commitment {} not found for {}", hex::encode(root), space) + write!( + f, + "commitment {} not found for {}", + hex::encode(root), + space + ) } Self::ReceiptRequired { space } => { write!(f, "receipt required for {}", space) @@ -1395,8 +1546,11 @@ impl fmt::Display for MessageError { } Self::TemporaryRequiresTip { handle, tip, got } => { write!( - f, "Temporary handle {} verifies against {} but requires tip {}", - handle, hex::encode(got), hex::encode(tip) + f, + "Temporary handle {} verifies against {} but requires tip {}", + handle, + hex::encode(got), + hex::encode(tip) ) } Self::HandleAlreadyExists { handle } => { @@ -1412,7 +1566,11 @@ impl fmt::Display for MessageError { write!(f, "records invalid for {}: {}", handle, reason) } Self::FinalCertRequiresTree { handle } => { - write!(f, "final certificate requires non-empty tree for {}", handle) + write!( + f, + "final certificate requires non-empty tree for {}", + handle + ) } Self::HandleNotFound { handle } => { write!(f, "handle {} not found", handle) @@ -1432,7 +1590,6 @@ impl fmt::Display for MessageError { impl std::error::Error for MessageError {} - /// Push the better zone: if cached exists and is better, push cached; otherwise push the new zone. fn push_best_zone(ctx: &msg::QueryContext, zones: &mut Vec, zone: Zone) { let Some(cached) = ctx.get_zone(&zone.canonical) else { @@ -1457,7 +1614,9 @@ fn maybe_verify_receipt( let Some(ci) = zone.requires_receipt() else { return Ok(false); }; - let receipt = receipt.ok_or_else(|| MessageError::ReceiptRequired { space: space.to_string() })?; + let receipt = receipt.ok_or_else(|| MessageError::ReceiptRequired { + space: space.to_string(), + })?; verify_receipt(ci, space, receipt, options)?; Ok(true) } @@ -1467,16 +1626,16 @@ fn decode_journal( receipt: &risc0_zkvm::Receipt, space: &SLabel, ) -> Result { - receipt.journal.decode().map_err(|e| MessageError::MalformedReceipt { - space: space.to_string(), - reason: e.to_string(), - }) + receipt + .journal + .decode() + .map_err(|e| MessageError::MalformedReceipt { + space: space.to_string(), + reason: e.to_string(), + }) } -fn serialize_option_hash( - hash: &Option, - serializer: S, -) -> Result +fn serialize_option_hash(hash: &Option, serializer: S) -> Result where S: Serializer, { @@ -1512,23 +1671,31 @@ where } } -fn verify_zk_journal_matches_onchain(space: &SLabel, zk: &libveritas_zk::guest::Commitment, onchain: &spaces_nums::Commitment) -> Result<(), MessageError> { +fn verify_zk_journal_matches_onchain( + space: &SLabel, + zk: &libveritas_zk::guest::Commitment, + onchain: &spaces_nums::Commitment, +) -> Result<(), MessageError> { let space_str = space.to_string(); if zk.policy_fold != constants::FOLD_ID || zk.policy_step != constants::STEP_ID { return Err(MessageError::ReceiptPolicyMismatch { space: space_str }); } if zk.final_root != onchain.state_root { - return Err(MessageError::CommitmentReceiptMismatch { space: space_str.clone(), field: "state_root" }); + return Err(MessageError::CommitmentReceiptMismatch { + space: space_str.clone(), + field: "state_root", + }); } if zk.rolling_hash != onchain.rolling_hash { - return Err(MessageError::CommitmentReceiptMismatch { space: space_str, field: "rolling_hash" }); + return Err(MessageError::CommitmentReceiptMismatch { + space: space_str, + field: "rolling_hash", + }); } Ok(()) } // Retrieve parent zone without zk verification fn hash_receipt(receipt: &Receipt) -> Hash { - Sha256Hasher::hash( - &borsh::to_vec(receipt).unwrap_or_default() - ) + Sha256Hasher::hash(&borsh::to_vec(receipt).unwrap_or_default()) } diff --git a/veritas/src/msg.rs b/veritas/src/msg.rs index 359b6d8..f312aa5 100644 --- a/veritas/src/msg.rs +++ b/veritas/src/msg.rs @@ -1,15 +1,15 @@ +use crate::cert::{Certificate, HandleSubtree, NumsSubtree, Signature, SpacesSubtree, Witness}; +use crate::{MessageError, Zone}; use borsh::{BorshDeserialize, BorshSerialize}; use risc0_zkvm::Receipt; use serde::{Deserialize, Serialize}; +use sip7::{Record, RecordSet}; use spaces_protocol::bitcoin::{ScriptBuf, secp256k1}; use spaces_protocol::constants::ChainAnchor; use spaces_protocol::slabel::SLabel; +use spaces_protocol::sname::{NameLike, SName, Subname}; use std::collections::HashMap; use std::io::{Read, Write}; -use sip7::{Record, RecordSet}; -use crate::{MessageError, Zone}; -use crate::cert::{Certificate, HandleSubtree, NumsSubtree, Signature, SpacesSubtree, Witness}; -use spaces_protocol::sname::{Subname, NameLike, SName}; /// Context for a verification query. /// @@ -22,6 +22,12 @@ pub struct QueryContext { pub zones: Vec, } +impl Default for QueryContext { + fn default() -> Self { + Self::new() + } +} + impl QueryContext { /// Create an empty context (verify all, no prior zones). pub fn new() -> Self { @@ -48,7 +54,11 @@ impl QueryContext { /// Add a zone to the context. Replaces if handle already exists. pub fn add_zone(&mut self, zone: Zone) { - if let Some(existing) = self.zones.iter_mut().find(|z| z.canonical == zone.canonical) { + if let Some(existing) = self + .zones + .iter_mut() + .find(|z| z.canonical == zone.canonical) + { *existing = zone; } else { self.zones.push(zone); @@ -107,13 +117,14 @@ impl Message { fn set_records_inner(&mut self, canonical: &SName, data: sip7::RecordSet, delegate: bool) { let (space, subspace) = match canonical.label_count() { 1 => (canonical.space().unwrap(), None), - 2 => (canonical.space().unwrap(), Some(canonical.subspace().unwrap())), + 2 => ( + canonical.space().unwrap(), + Some(canonical.subspace().unwrap()), + ), _ => return, }; - let Some(bundle) = self.spaces - .iter_mut() - .find(|b| b.subject == space) else { + let Some(bundle) = self.spaces.iter_mut().find(|b| b.subject == space) else { return; }; @@ -121,7 +132,7 @@ impl Message { None => match delegate { true => bundle.delegate_records = Some(data), false => bundle.records = Some(data), - } + }, Some(name) => { if let Some(handle) = bundle .epochs @@ -175,14 +186,21 @@ impl Message { let root = leaf.subject.space().unwrap(); let (genesis_spk, handles, signature) = match leaf.witness { Witness::Root { .. } => continue, - Witness::Leaf { genesis_spk, handles, signature } => - (genesis_spk, handles, signature), + Witness::Leaf { + genesis_spk, + handles, + signature, + } => (genesis_spk, handles, signature), }; let Some(bundle) = bundles.get_mut(&root) else { continue; }; let epoch_root = handles.compute_root().expect("todo bubble error"); - match bundle.epochs.iter_mut().find(|e| e.tree.compute_root().unwrap() == epoch_root) { + match bundle + .epochs + .iter_mut() + .find(|e| e.tree.compute_root().unwrap() == epoch_root) + { Some(e) => { let subtree = std::mem::replace(&mut e.tree, HandleSubtree::empty()); e.tree = subtree.merge(handles).expect("todo bubble error"); @@ -195,17 +213,14 @@ impl Message { } None => bundle.epochs.push(Epoch { tree: handles, - handles: vec![ - Handle { - name: leaf.subject.subspace().unwrap(), - genesis_spk, - records: None, - signature, - } - ], - }) + handles: vec![Handle { + name: leaf.subject.subspace().unwrap(), + genesis_spk, + records: None, + signature, + }], + }), }; - } msg.spaces = bundles.into_values().collect(); Ok(msg) @@ -287,7 +302,9 @@ pub fn verify_records( expected_canonical: &SName, ) -> Result<(), crate::SignatureError> { let signable = records.signable(); - let sig_data = signable.sig.ok_or_else(|| crate::SignatureError::InvalidSignature)?; + let sig_data = signable + .sig + .ok_or(crate::SignatureError::InvalidSignature)?; use secp256k1::XOnlyPublicKey; @@ -309,9 +326,6 @@ pub fn verify_records( .map_err(|_| crate::SignatureError::VerificationFailed) } - - - /// An unsigned record set pending signature. pub struct UnsignedRecordSet { /// Original handle name (e.g., `example.alice@bitcoin`). @@ -338,7 +352,9 @@ impl UnsignedRecordSet { self.handle.clone(), vec![0u8; 64], self.flags, - ).pack().expect("valid sig"); + ) + .pack() + .expect("valid sig"); buf.extend(&dummy); let full = RecordSet::new(buf); full.signable().bytes.to_vec() @@ -358,7 +374,9 @@ impl UnsignedRecordSet { self.handle.clone(), signature, self.flags, - ).pack().expect("valid sig"); + ) + .pack() + .expect("valid sig"); buf.extend(r); RecordSet::new(buf) } @@ -403,7 +421,11 @@ impl BorshDeserialize for ChainProof { let anchor = ChainAnchor::deserialize_reader(reader)?; let spaces = SpacesSubtree::deserialize_reader(reader)?; let nums = NumsSubtree::deserialize_reader(reader)?; - Ok(ChainProof { anchor, spaces, nums }) + Ok(ChainProof { + anchor, + spaces, + nums, + }) } } @@ -414,7 +436,10 @@ impl BorshSerialize for Bundle { BorshSerialize::serialize(&self.epochs, writer)?; let records_bytes: Option> = self.records.as_ref().map(|d| d.as_slice().to_vec()); BorshSerialize::serialize(&records_bytes, writer)?; - let delegate_bytes: Option> = self.delegate_records.as_ref().map(|d| d.as_slice().to_vec()); + let delegate_bytes: Option> = self + .delegate_records + .as_ref() + .map(|d| d.as_slice().to_vec()); BorshSerialize::serialize(&delegate_bytes, writer) } } @@ -475,4 +500,4 @@ impl BorshDeserialize for Handle { signature, }) } -} \ No newline at end of file +} diff --git a/veritas/src/names.rs b/veritas/src/names.rs index 0225b72..d6df3ab 100644 --- a/veritas/src/names.rs +++ b/veritas/src/names.rs @@ -1,11 +1,10 @@ -use std::collections::HashMap; -use std::str::FromStr; -use std::sync::Mutex; -use crate::cert::{Certificate, NumsSubtree}; use crate::Zone; +use crate::cert::{Certificate, NumsSubtree}; use spaces_protocol::slabel::SLabel; use spaces_protocol::sname::{NameLike, SName, Subname}; - +use std::collections::HashMap; +use std::str::FromStr; +use std::sync::Mutex; /// Bidirectional name resolver for space handle hierarchies. /// @@ -27,7 +26,8 @@ pub struct NameResolver { impl NameResolver { fn from_aliases(aliases: HashMap) -> Self { - let reverse = aliases.iter() + let reverse = aliases + .iter() .map(|(handle, numeric)| (numeric.clone(), handle.clone())) .collect(); Self { aliases, reverse } @@ -40,9 +40,15 @@ impl NameResolver { pub fn from_certificates(certs: &[Certificate], nums: &NumsSubtree) -> Self { let mut aliases = HashMap::new(); for cert in certs { - let Some(genesis_spk) = cert.genesis_spk() else { continue }; - if cert.subject.space().is_none() { continue }; - let Ok(Some(numout)) = nums.find_num(genesis_spk) else { continue }; + let Some(genesis_spk) = cert.genesis_spk() else { + continue; + }; + if cert.subject.space().is_none() { + continue; + }; + let Ok(Some(numout)) = nums.find_num(genesis_spk) else { + continue; + }; aliases.insert(cert.subject.clone(), numout.num.name.to_slabel()); } Self::from_aliases(aliases) @@ -73,7 +79,9 @@ impl NameResolver { } let labels: Vec<&[u8]> = name.iter().collect(); - let Some(space) = name.space() else { return name.clone() }; + let Some(space) = name.space() else { + return name.clone(); + }; let mut current = match build_2label(labels[count - 2], &space) { Some(n) => n, @@ -105,12 +113,16 @@ impl NameResolver { return name.clone(); } - let Some(space) = name.space() else { return name.clone() }; + let Some(space) = name.space() else { + return name.clone(); + }; if !space.is_numeric() { return name.clone(); } - let Some(subspace) = name.subspace() else { return name.clone() }; + let Some(subspace) = name.subspace() else { + return name.clone(); + }; let sub_str = subspace.to_string(); // Resolve the numeric space to a human-readable parent handle, @@ -159,7 +171,8 @@ impl LookupEntry { if count == 0 { return None; } - let labels: Vec = name.iter() + let labels: Vec = name + .iter() .map(|l| std::str::from_utf8(l).unwrap_or("").to_string()) .collect(); let space = name.space()?; @@ -211,9 +224,7 @@ pub struct Lookup { impl Lookup { pub fn new(names: Vec) -> Self { - let entries = names.iter() - .filter_map(|n| LookupEntry::new(n)) - .collect(); + let entries = names.iter().filter_map(LookupEntry::new).collect(); Self { state: Mutex::new(LookupState { entries, @@ -225,7 +236,9 @@ impl Lookup { /// Returns the first batch of handles to look up. pub fn start(&self) -> Vec { let state = self.state.lock().unwrap(); - state.entries.iter() + state + .entries + .iter() .filter_map(|e| e.current_handle()) .collect() } @@ -237,8 +250,14 @@ impl Lookup { for zone in zones { if let Some(alias) = &zone.alias { - state.resolver.aliases.insert(zone.canonical.clone(), alias.clone()); - state.resolver.reverse.insert(alias.clone(), zone.canonical.clone()); + state + .resolver + .aliases + .insert(zone.canonical.clone(), alias.clone()); + state + .resolver + .reverse + .insert(alias.clone(), zone.canonical.clone()); } } @@ -246,15 +265,21 @@ impl Lookup { if entry.done { continue; } - let Some(handle) = entry.current_handle() else { continue }; - let Some(zone) = zones.iter().find(|z| z.canonical == handle) else { continue }; + let Some(handle) = entry.current_handle() else { + continue; + }; + let Some(zone) = zones.iter().find(|z| z.canonical == handle) else { + continue; + }; match &zone.alias { Some(alias) if entry.cursor > 0 => entry.advance(alias.clone()), _ => entry.done = true, } } - state.entries.iter() + state + .entries + .iter() .filter(|e| !e.done) .filter_map(|e| e.current_handle()) .collect() @@ -271,12 +296,12 @@ impl Lookup { mod tests { use super::*; use crate::cert::{HandleSubtree, KeyHash, Witness}; + use spacedb::NodeHasher; use spacedb::Sha256Hasher; use spacedb::subtree::{SubTree, ValueOrHash}; - use spacedb::NodeHasher; - use spaces_nums::{Num, NumOut}; use spaces_nums::num_id::NumId; use spaces_nums::snumeric::SNumeric; + use spaces_nums::{Num, NumOut}; use spaces_protocol::bitcoin::ScriptBuf; use std::str::FromStr; @@ -427,7 +452,10 @@ mod tests { let flat = SName::from_str("pancakes#822-88-22").unwrap(); let expanded = flattener.expand(&flat); - assert_eq!(expanded, SName::from_str("pancakes.nested1.alice@bitcoin").unwrap()); + assert_eq!( + expanded, + SName::from_str("pancakes.nested1.alice@bitcoin").unwrap() + ); } #[test] @@ -442,9 +470,7 @@ mod tests { #[test] fn lookup_2_labels_resolves_immediately() { - let lookup = Lookup::new(vec![ - SName::from_str("alice@bitcoin").unwrap(), - ]); + let lookup = Lookup::new(vec![SName::from_str("alice@bitcoin").unwrap()]); let batch = lookup.start(); assert_eq!(batch, vec![SName::from_str("alice@bitcoin").unwrap()]); @@ -457,9 +483,7 @@ mod tests { #[test] fn lookup_3_labels() { // nested1.alice@bitcoin requires: alice@bitcoin → #800-12-12 → nested1#800-12-12 - let lookup = Lookup::new(vec![ - SName::from_str("nested1.alice@bitcoin").unwrap(), - ]); + let lookup = Lookup::new(vec![SName::from_str("nested1.alice@bitcoin").unwrap()]); let batch = lookup.start(); assert_eq!(batch, vec![SName::from_str("alice@bitcoin").unwrap()]); @@ -486,7 +510,10 @@ mod tests { let next = lookup.advance(&zones); assert_eq!(next, vec![SName::from_str("nested1#800-12-12").unwrap()]); - let zones2 = vec![make_zone("nested1#800-12-12", Some(SNumeric::new(822, 88, 22)))]; + let zones2 = vec![make_zone( + "nested1#800-12-12", + Some(SNumeric::new(822, 88, 22)), + )]; let next2 = lookup.advance(&zones2); assert_eq!(next2, vec![SName::from_str("pancakes#822-88-22").unwrap()]); @@ -524,9 +551,7 @@ mod tests { #[test] fn lookup_single_label() { - let lookup = Lookup::new(vec![ - SName::from_str("@bitcoin").unwrap(), - ]); + let lookup = Lookup::new(vec![SName::from_str("@bitcoin").unwrap()]); let names = lookup.start(); assert_eq!(names[0], SName::from_str("@bitcoin").unwrap()); @@ -535,19 +560,18 @@ mod tests { #[test] fn lookup_expand_zones_at_end() { - let lookup = Lookup::new(vec![ - SName::from_str("nested1.alice@bitcoin").unwrap(), - ]); + let lookup = Lookup::new(vec![SName::from_str("nested1.alice@bitcoin").unwrap()]); let _ = lookup.start(); let zones = vec![make_zone("alice@bitcoin", Some(SNumeric::new(800, 12, 12)))]; let _ = lookup.advance(&zones); // Now expand a zone with numeric handle - let mut zones_to_expand = vec![ - make_zone("nested1#800-12-12", None), - ]; + let mut zones_to_expand = vec![make_zone("nested1#800-12-12", None)]; lookup.expand_zones(&mut zones_to_expand); - assert_eq!(zones_to_expand[0].handle, SName::from_str("nested1.alice@bitcoin").unwrap()); + assert_eq!( + zones_to_expand[0].handle, + SName::from_str("nested1.alice@bitcoin").unwrap() + ); } } diff --git a/veritas/tests/fixture_tests.rs b/veritas/tests/fixture_tests.rs index eae055d..cc3e710 100644 --- a/veritas/tests/fixture_tests.rs +++ b/veritas/tests/fixture_tests.rs @@ -1,9 +1,9 @@ -use spacedb::subtree::{ProofType}; use libveritas::cert::{NumsSubtree, SpacesSubtree}; +use libveritas::msg::QueryContext; use libveritas::{ProvableOption, SovereigntyState}; -use libveritas::msg::{QueryContext}; +use libveritas_testutil::fixture::{ChainState, FixtureRunner, kitchen_sink}; +use spacedb::subtree::ProofType; use spaces_protocol::sname::NameLike; -use libveritas_testutil::fixture::{kitchen_sink, ChainState, FixtureRunner}; #[test] fn test_space_not_found_in_chain_proof() { @@ -15,12 +15,19 @@ fn test_space_not_found_in_chain_proof() { // omit space from chain proof msg.chain.spaces = SpacesSubtree( - msg.chain.spaces.0 - .prove(&[[0u8;32]], ProofType::Standard).expect("proving failed") + msg.chain + .spaces + .0 + .prove(&[[0u8; 32]], ProofType::Standard) + .expect("proving failed"), ); let veritas = state.veritas(); let ctx = QueryContext::new(); - assert!(veritas.verify_with_options(&ctx, msg, libveritas::VERIFY_DEV_MODE).is_err()); + assert!( + veritas + .verify_with_options(&ctx, msg, libveritas::VERIFY_DEV_MODE) + .is_err() + ); } #[test] @@ -32,33 +39,39 @@ fn test_no_delegate_info_provided() { let initial_bundle = runner.build_bundle(); let mut msg = state.message(vec![initial_bundle.clone()]); msg.chain.nums = NumsSubtree( - msg.chain.nums.0 - .prove(&[[64u8;32]], ProofType::Standard).expect("proving failed") + msg.chain + .nums + .0 + .prove(&[[64u8; 32]], ProofType::Standard) + .expect("proving failed"), ); let veritas = state.veritas(); let ctx = QueryContext::new(); - let res = veritas.verify_with_options(&ctx, msg, libveritas::VERIFY_DEV_MODE).expect("valid"); + let res = veritas + .verify_with_options(&ctx, msg, libveritas::VERIFY_DEV_MODE) + .expect("valid"); assert_eq!(res.zones.len(), 1, "expected 1 zones"); let zone = res.zones.first().unwrap(); assert!(matches!(zone.delegate, ProvableOption::Unknown)); - assert!(matches!(zone.sovereignty, SovereigntyState::Sovereign)); - assert!(!matches!(zone.commitment, ProvableOption::Exists {..})); + assert!(matches!(zone.sovereignty, SovereigntyState::Sovereign)); + assert!(!matches!(zone.commitment, ProvableOption::Exists { .. })); // Now create the message without omitting chain proofs let msg = state.message(vec![initial_bundle]); let mut ctx = QueryContext::new(); ctx.add_zone(zone.clone()); - let res = veritas.verify_with_options(&ctx, msg, libveritas::VERIFY_DEV_MODE).expect("valid"); + let res = veritas + .verify_with_options(&ctx, msg, libveritas::VERIFY_DEV_MODE) + .expect("valid"); assert_eq!(res.zones.len(), 1, "expected 1 zones"); let zone = res.zones.first().unwrap(); assert!(matches!(zone.delegate, ProvableOption::Exists { .. })); - assert!(matches!(zone.sovereignty, SovereigntyState::Sovereign)); - assert!(matches!(zone.commitment, ProvableOption::Empty)); + assert!(matches!(zone.sovereignty, SovereigntyState::Sovereign)); + assert!(matches!(zone.commitment, ProvableOption::Empty)); } - #[test] fn test_kitchen_sink() { let mut state = ChainState::new(); @@ -67,24 +80,36 @@ fn test_kitchen_sink() { let mut runner = FixtureRunner::new(&mut state, fixture); runner.run(&mut state); - let latest_root = runner.handles.handle_tree.compute_root().expect("compute root"); + let latest_root = runner + .handles + .handle_tree + .compute_root() + .expect("compute root"); let bundle = runner.build_bundle(); let msg = state.message(vec![bundle]); let ctx = QueryContext::new(); let veritas = state.veritas(); - let res = veritas.verify_with_options(&ctx, msg, libveritas::VERIFY_DEV_MODE).expect("valid"); + let res = veritas + .verify_with_options(&ctx, msg, libveritas::VERIFY_DEV_MODE) + .expect("valid"); assert_eq!( states.staged.len(), - res.zones.iter().filter(|z| z.sovereignty == SovereigntyState::Dependent).count() + res.zones + .iter() + .filter(|z| z.sovereignty == SovereigntyState::Dependent) + .count() ); - let parent_zone = res.zones.iter().find(|z| z.handle.is_single_label()) + let parent_zone = res + .zones + .iter() + .find(|z| z.handle.is_single_label()) .expect("missing parent"); - let ProvableOption::Exists { value : commitment } = &parent_zone.commitment else { + let ProvableOption::Exists { value: commitment } = &parent_zone.commitment else { panic!("commit should exist"); }; @@ -95,9 +120,9 @@ fn test_kitchen_sink() { if zone.handle.is_single_label() { continue; } - let expected = states.sovereignty( - &zone.handle.subspace().unwrap().to_string() - ).expect("handle exists"); + let expected = states + .sovereignty(&zone.handle.subspace().unwrap().to_string()) + .expect("handle exists"); assert_eq!(expected, zone.sovereignty); } diff --git a/veritas/tests/integration_tests.rs b/veritas/tests/integration_tests.rs index fd5232c..508d166 100644 --- a/veritas/tests/integration_tests.rs +++ b/veritas/tests/integration_tests.rs @@ -1,27 +1,32 @@ -use bitcoin::hashes::{Hash as BitcoinHash}; +use bitcoin::hashes::Hash as BitcoinHash; use bitcoin::key::Keypair; use bitcoin::key::rand::Rng; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::rand; use bitcoin::{BlockHash, OutPoint, ScriptBuf, Txid}; use borsh::{BorshDeserialize, BorshSerialize}; -use libveritas::cert::{Certificate, HandleOut, HandleSubtree, KeyHash, NumsSubtree, Signature, SpacesSubtree, Witness}; +use libveritas::cert::{ + Certificate, HandleOut, HandleSubtree, KeyHash, NumsSubtree, Signature, SpacesSubtree, Witness, +}; +use libveritas::msg::{self, Message, QueryContext}; +use libveritas::{ProvableOption, SovereigntyState, Veritas, Zone, hash_signable_message}; +use risc0_zkvm::{FakeReceipt, InnerReceipt, Receipt, ReceiptClaim}; use spacedb::Sha256Hasher; use spacedb::subtree::{ProofType, SubTree, ValueOrHash}; +use spaces_nums::constants::COMMITMENT_FINALITY_INTERVAL; +use spaces_nums::num_id::NumId; +use spaces_nums::snumeric::SNumeric; +use spaces_nums::{ + CommitmentKey, CommitmentTipKey, DelegatorKey, FullNumOut, Num, NumOut, NumOutpointKey, + RootAnchor, rolling_hash, +}; +use spaces_protocol::constants::ChainAnchor; use spaces_protocol::hasher::{KeyHasher, OutpointKey, SpaceKey}; use spaces_protocol::slabel::SLabel; +use spaces_protocol::sname::{SName, Subname}; use spaces_protocol::{Covenant, FullSpaceOut, Space, SpaceOut}; -use spaces_nums::num_id::NumId; -use spaces_nums::{rolling_hash, CommitmentKey, FullNumOut, Num, NumOut, NumOutpointKey, CommitmentTipKey, RootAnchor, DelegatorKey}; -use spaces_nums::snumeric::SNumeric; use std::collections::HashMap; use std::str::FromStr; -use risc0_zkvm::{FakeReceipt, InnerReceipt, Receipt, ReceiptClaim}; -use spaces_protocol::constants::ChainAnchor; -use spaces_nums::constants::COMMITMENT_FINALITY_INTERVAL; -use libveritas::{hash_signable_message, ProvableOption, SovereigntyState, Veritas, Zone}; -use libveritas::msg::{self, Message, QueryContext}; -use spaces_protocol::sname::{Subname, SName}; fn sname(s: &str) -> SName { SName::from_str(s).unwrap() @@ -52,8 +57,8 @@ pub struct EncodableOutpoint( ); fn gen_p2tr_spk() -> (ScriptBuf, Keypair) { - use bitcoin::script::Builder; use bitcoin::opcodes::all::OP_PUSHNUM_1; + use bitcoin::script::Builder; let secp = Secp256k1::new(); let (secret_key, public_key) = secp.generate_keypair(&mut rand::thread_rng()); @@ -172,7 +177,7 @@ impl TestNum { } pub fn id(&self) -> NumId { - self.fso.numout.num.id.clone() + self.fso.numout.num.id } pub fn outpoint_key(&self) -> NumOutpointKey { @@ -211,12 +216,18 @@ pub struct StagedHandle { } pub struct TestCommitmentBundle { - root: [u8;32], + root: [u8; 32], handles: HashMap, handle_tree: SubTree, receipt: Option, } +impl Default for TestChain { + fn default() -> Self { + Self::new() + } +} + impl TestChain { pub fn new() -> Self { Self { @@ -259,8 +270,8 @@ impl TestChain { let spaces_root = self.spaces_tree.compute_root().expect("spaces root"); let nums_root = self.nums_tree.compute_root().expect("nums root"); - let block_hash = BlockHash - ::from_byte_array(rolling_hash::(spaces_root, nums_root)); + let block_hash = + BlockHash::from_byte_array(rolling_hash::(spaces_root, nums_root)); RootAnchor { spaces_root, @@ -288,7 +299,7 @@ impl TestChain { self.nums_tree .insert(num.id().into(), ValueOrHash::Value(num.outpoint_bytes())) .expect("insert outpoint"); - self.nums.insert(num.id().into(), num.clone()); + self.nums.insert(num.id(), num.clone()); num } @@ -308,9 +319,12 @@ impl TestChain { TestDelegatedSpace { space, ptr: num } } - pub fn insert_commitment(&mut self, ds: &TestDelegatedSpace, root: [u8; 32]) -> spaces_nums::Commitment { - let prev_finalized = self - .rollback_to_finalized_commitment(&ds.space.label()); + pub fn insert_commitment( + &mut self, + ds: &TestDelegatedSpace, + root: [u8; 32], + ) -> spaces_nums::Commitment { + let prev_finalized = self.rollback_to_finalized_commitment(&ds.space.label()); let commitment = match prev_finalized { None => spaces_nums::Commitment { @@ -324,17 +338,22 @@ impl TestChain { prev_root: Some(prev.state_root), rolling_hash: rolling_hash::(prev.rolling_hash, root), block_height: self.block_height, - } + }, }; let commitment_key = CommitmentKey::new::(&ds.space.label(), root); let commitment_bytes = borsh::to_vec(&commitment).expect("valid"); - self.nums_tree.insert(commitment_key.into(), ValueOrHash::Value(commitment_bytes)) + self.nums_tree + .insert(commitment_key.into(), ValueOrHash::Value(commitment_bytes)) .expect("insert commitment"); let registry_key = CommitmentTipKey::from_slabel::(&ds.space.label()); - self.nums_tree.update(registry_key.into(), ValueOrHash::Value(commitment.state_root.to_vec())) + self.nums_tree + .update( + registry_key.into(), + ValueOrHash::Value(commitment.state_root.to_vec()), + ) .expect("insert registry"); commitment @@ -374,7 +393,10 @@ impl TestChain { }) } - pub fn rollback_to_finalized_commitment(&mut self, space: &SLabel) -> Option { + pub fn rollback_to_finalized_commitment( + &mut self, + space: &SLabel, + ) -> Option { let commitment = self.get_commitment(space, None)?; if commitment.is_finalized(self.block_height) { return Some(commitment); @@ -393,7 +415,11 @@ impl TestChain { let finalized = self.get_commitment(space, Some(prev_root))?; // update tip pointer - self.nums_tree.update(registry_key.into(), ValueOrHash::Value(finalized.state_root.to_vec())) + self.nums_tree + .update( + registry_key.into(), + ValueOrHash::Value(finalized.state_root.to_vec()), + ) .expect("update"); Some(finalized) @@ -403,7 +429,7 @@ impl TestChain { pub struct TestHandle { pub name: Subname, pub genesis_spk: ScriptBuf, - pub keypair: Keypair + pub keypair: Keypair, } impl TestHandleTree { @@ -421,7 +447,10 @@ impl TestHandleTree { let label = label(name); let label_hash = KeyHash::hash(label.as_slabel().as_ref()); assert!( - !self.handle_tree.contains(&label_hash).expect("complete tree"), + !self + .handle_tree + .contains(&label_hash) + .expect("complete tree"), "already exists" ); assert!(!self.staged.contains_key(&label), "already staged"); @@ -450,10 +479,7 @@ impl TestHandleTree { }; let signature = sign_zone(&zone, &self.ds.ptr.keypair); - let staged = StagedHandle { - handle, - signature, - }; + let staged = StagedHandle { handle, signature }; self.staged.insert(staged.handle.name.clone(), staged); } @@ -491,16 +517,17 @@ impl TestHandleTree { kind: libveritas_zk::guest::CommitmentKind::Fold, }; - // Serialize using risc0 serde format (u32 words → le bytes), // matching what a real guest would write via env::commit() let words = risc0_zkvm::serde::to_vec(&commitment).expect("serialize commitment"); let journal_bytes: Vec = words.iter().flat_map(|w| w.to_le_bytes()).collect(); - let receipt_claim = ReceiptClaim::ok(libveritas::constants::FOLD_ID, journal_bytes.clone()); - Some( - Receipt::new(InnerReceipt::Fake(FakeReceipt::new(receipt_claim)), journal_bytes) - ) + let receipt_claim = + ReceiptClaim::ok(libveritas::constants::FOLD_ID, journal_bytes.clone()); + Some(Receipt::new( + InnerReceipt::Fake(FakeReceipt::new(receipt_claim)), + journal_bytes, + )) } else { None }; @@ -535,10 +562,8 @@ impl TestHandleTree { ]; // --- Nums tree keys --- - let mut nums_keys: Vec<[u8; 32]> = vec![ - self.ds.ptr.outpoint_key().into(), - self.ds.ptr.id().into(), - ]; + let mut nums_keys: Vec<[u8; 32]> = + vec![self.ds.ptr.outpoint_key().into(), self.ds.ptr.id().into()]; // Registry key (commitment tip pointer) nums_keys.push(CommitmentTipKey::from_slabel::(&self.space).into()); @@ -590,7 +615,7 @@ impl TestHandleTree { // --- Build message --- Message { chain: msg::ChainProof { - anchor: anchor.clone(), + anchor: *anchor, spaces: SpacesSubtree(spaces_proof), nums: NumsSubtree(nums_proof), }, @@ -619,7 +644,9 @@ impl TestHandleTree { anchor: &ChainAnchor, ) -> Message { let tcb = &self.commitments[commitment_idx]; - let staged = self.staged.get(&label(handle_name)) + let staged = self + .staged + .get(&label(handle_name)) .expect("handle must be staged"); // --- Spaces tree keys --- @@ -629,10 +656,8 @@ impl TestHandleTree { ]; // --- Nums tree keys --- - let mut nums_keys: Vec<[u8; 32]> = vec![ - self.ds.ptr.outpoint_key().into(), - self.ds.ptr.id().into(), - ]; + let mut nums_keys: Vec<[u8; 32]> = + vec![self.ds.ptr.outpoint_key().into(), self.ds.ptr.id().into()]; nums_keys.push(CommitmentTipKey::from_slabel::(&self.space).into()); nums_keys.push(CommitmentKey::new::(&self.space, tcb.root).into()); @@ -645,19 +670,22 @@ impl TestHandleTree { let handle_keys: Vec<[u8; 32]> = vec![handle_key]; // --- Create proved subtrees --- - let spaces_proof = chain.spaces_tree + let spaces_proof = chain + .spaces_tree .prove(&spaces_keys, ProofType::Standard) .expect("prove spaces"); - let nums_proof = chain.nums_tree + let nums_proof = chain + .nums_tree .prove(&nums_keys, ProofType::Standard) .expect("prove nums"); - let handles_proof = tcb.handle_tree + let handles_proof = tcb + .handle_tree .prove(&handle_keys, ProofType::Standard) .expect("prove handles exclusion"); Message { chain: msg::ChainProof { - anchor: anchor.clone(), + anchor: *anchor, spaces: SpacesSubtree(spaces_proof), nums: NumsSubtree(nums_proof), }, @@ -731,30 +759,31 @@ impl Fixture { fn veritas(&self) -> Veritas { let anchors = vec![self.latest_anchor.clone(), self.finalized_anchor.clone()]; - Veritas::new() - .with_anchors(anchors).expect("valid anchors") + Veritas::new().with_anchors(anchors).expect("valid anchors") } /// Message proving commitment 0 (finalized) against the finalized anchor. fn finalized_message(&self, handles: &[&str]) -> Message { self.handles.build_message( - &self.finalized_chain, 0, handles, + &self.finalized_chain, + 0, + handles, &self.finalized_anchor.block, ) } /// Message proving commitment 1 (pending) against the latest anchor. fn pending_message(&self, handles: &[&str]) -> Message { - self.handles.build_message( - &self.latest_chain, 1, handles, - &self.latest_anchor.block, - ) + self.handles + .build_message(&self.latest_chain, 1, handles, &self.latest_anchor.block) } /// Temporary certificate message for a staged handle (not yet committed). fn temporary_message(&self, handle_name: &str) -> Message { self.handles.build_temporary_message( - &self.latest_chain, 1, handle_name, + &self.latest_chain, + 1, + handle_name, &self.latest_anchor.block, ) } @@ -766,7 +795,9 @@ fn verify_root_finalized() { let veritas = f.veritas(); let ctx = QueryContext::new(); - let result = veritas.verify_with_options(&ctx,f.finalized_message(&[]), libveritas::VERIFY_DEV_MODE).expect("verify"); + let result = veritas + .verify_with_options(&ctx, f.finalized_message(&[]), libveritas::VERIFY_DEV_MODE) + .expect("verify"); assert_eq!(result.zones.len(), 1); let zone = &result.zones[0]; @@ -786,15 +817,35 @@ fn verify_leaf_finalized() { let veritas = f.veritas(); let ctx = QueryContext::new(); - let result = veritas.verify_with_options(&ctx,f.finalized_message(&["alice"]), libveritas::VERIFY_DEV_MODE).expect("verify"); + let result = veritas + .verify_with_options( + &ctx, + f.finalized_message(&["alice"]), + libveritas::VERIFY_DEV_MODE, + ) + .expect("verify"); // Should have root zone + alice zone assert_eq!(result.zones.len(), 2); - let alice = result.zones.iter().find(|z| z.handle == sname("alice@bitcoin")).expect("alice"); + let alice = result + .zones + .iter() + .find(|z| z.handle == sname("alice@bitcoin")) + .expect("alice"); assert!(matches!(alice.sovereignty, SovereigntyState::Sovereign)); - let result = veritas.verify_with_options(&ctx,f.finalized_message(&["bob"]), libveritas::VERIFY_DEV_MODE).expect("verify"); - let bob = result.zones.iter().find(|z| z.handle == sname("bob@bitcoin")).expect("bob"); + let result = veritas + .verify_with_options( + &ctx, + f.finalized_message(&["bob"]), + libveritas::VERIFY_DEV_MODE, + ) + .expect("verify"); + let bob = result + .zones + .iter() + .find(|z| z.handle == sname("bob@bitcoin")) + .expect("bob"); assert!(matches!(bob.sovereignty, SovereigntyState::Sovereign)); } @@ -804,7 +855,9 @@ fn verify_root_pending() { let veritas = f.veritas(); let ctx = QueryContext::new(); - let result = veritas.verify_with_options(&ctx,f.pending_message(&[]), libveritas::VERIFY_DEV_MODE).expect("verify"); + let result = veritas + .verify_with_options(&ctx, f.pending_message(&[]), libveritas::VERIFY_DEV_MODE) + .expect("verify"); assert_eq!(result.zones.len(), 1); let zone = &result.zones[0]; @@ -822,8 +875,18 @@ fn verify_leaf_pending() { let veritas = f.veritas(); let ctx = QueryContext::new(); - let result = veritas.verify_with_options(&ctx,f.pending_message(&["charlie"]), libveritas::VERIFY_DEV_MODE).expect("verify"); - let charlie = result.zones.iter().find(|z| z.handle == sname("charlie@bitcoin")).expect("charlie"); + let result = veritas + .verify_with_options( + &ctx, + f.pending_message(&["charlie"]), + libveritas::VERIFY_DEV_MODE, + ) + .expect("verify"); + let charlie = result + .zones + .iter() + .find(|z| z.handle == sname("charlie@bitcoin")) + .expect("charlie"); assert!(matches!(charlie.sovereignty, SovereigntyState::Pending)); } @@ -834,8 +897,18 @@ fn verify_leaf_across_anchors() { let ctx = QueryContext::new(); // alice was committed in commitment 0, verified against the latest anchor - let result = veritas.verify_with_options(&ctx,f.pending_message(&["alice"]), libveritas::VERIFY_DEV_MODE).expect("verify"); - let alice = result.zones.iter().find(|z| z.handle == sname("alice@bitcoin")).expect("alice"); + let result = veritas + .verify_with_options( + &ctx, + f.pending_message(&["alice"]), + libveritas::VERIFY_DEV_MODE, + ) + .expect("verify"); + let alice = result + .zones + .iter() + .find(|z| z.handle == sname("alice@bitcoin")) + .expect("alice"); assert_eq!(alice.handle, sname("alice@bitcoin")); } @@ -846,8 +919,18 @@ fn verify_leaf_temporary() { let ctx = QueryContext::new(); // "staged" is in staged but not committed — uses delegate's signature - let result = veritas.verify_with_options(&ctx,f.temporary_message("staged"), libveritas::VERIFY_DEV_MODE).expect("verify"); - let staged = result.zones.iter().find(|z| z.handle == sname("staged@bitcoin")).expect("staged"); + let result = veritas + .verify_with_options( + &ctx, + f.temporary_message("staged"), + libveritas::VERIFY_DEV_MODE, + ) + .expect("verify"); + let staged = result + .zones + .iter() + .find(|z| z.handle == sname("staged@bitcoin")) + .expect("staged"); assert_eq!(staged.handle, sname("staged@bitcoin")); assert!(matches!(staged.sovereignty, SovereigntyState::Dependent)); } @@ -861,7 +944,13 @@ fn verify_with_request_filter() { let mut ctx = QueryContext::new(); ctx.add_request(sname("alice@bitcoin")); - let result = veritas.verify_with_options(&ctx,f.finalized_message(&["alice", "bob"]), libveritas::VERIFY_DEV_MODE).expect("verify"); + let result = veritas + .verify_with_options( + &ctx, + f.finalized_message(&["alice", "bob"]), + libveritas::VERIFY_DEV_MODE, + ) + .expect("verify"); // Should only return alice (root not requested, bob not requested) assert_eq!(result.zones.len(), 1); @@ -875,15 +964,27 @@ fn verify_with_cached_parent_zone() { // First verify to get parent zone let ctx = QueryContext::new(); - let result = veritas.verify_with_options(&ctx,f.finalized_message(&[]), libveritas::VERIFY_DEV_MODE).expect("verify"); + let result = veritas + .verify_with_options(&ctx, f.finalized_message(&[]), libveritas::VERIFY_DEV_MODE) + .expect("verify"); let parent_zone = result.zones[0].clone(); // Now verify with cached parent let ctx = QueryContext::from_zones(vec![parent_zone]); - let result = veritas.verify_with_options(&ctx,f.finalized_message(&["alice"]), libveritas::VERIFY_DEV_MODE).expect("verify"); + let result = veritas + .verify_with_options( + &ctx, + f.finalized_message(&["alice"]), + libveritas::VERIFY_DEV_MODE, + ) + .expect("verify"); // Should succeed and include alice - let alice = result.zones.iter().find(|z| z.handle == sname("alice@bitcoin")).expect("alice"); + let alice = result + .zones + .iter() + .find(|z| z.handle == sname("alice@bitcoin")) + .expect("alice"); assert_eq!(alice.handle, sname("alice@bitcoin")); } @@ -908,10 +1009,20 @@ fn verify_uses_better_cached_zone() { }; let ctx = QueryContext::from_zones(vec![cached_zone.clone()]); - let result = veritas.verify_with_options(&ctx,f.finalized_message(&["alice"]), libveritas::VERIFY_DEV_MODE).expect("verify"); + let result = veritas + .verify_with_options( + &ctx, + f.finalized_message(&["alice"]), + libveritas::VERIFY_DEV_MODE, + ) + .expect("verify"); // Should return the newly verified zone (better anchor) - let alice = result.zones.iter().find(|z| z.handle == sname("alice@bitcoin")).expect("alice"); + let alice = result + .zones + .iter() + .find(|z| z.handle == sname("alice@bitcoin")) + .expect("alice"); assert!(alice.anchor > 0); assert!(matches!(alice.sovereignty, SovereigntyState::Sovereign)); } @@ -923,7 +1034,13 @@ fn certificate_iterator() { let ctx = QueryContext::new(); // Verify root + two leaves - let result = veritas.verify_with_options(&ctx,f.finalized_message(&["alice", "bob"]), libveritas::VERIFY_DEV_MODE).expect("verify"); + let result = veritas + .verify_with_options( + &ctx, + f.finalized_message(&["alice", "bob"]), + libveritas::VERIFY_DEV_MODE, + ) + .expect("verify"); let certs: Vec = result.certificates().collect(); @@ -935,10 +1052,16 @@ fn certificate_iterator() { assert!(matches!(certs[0].witness, Witness::Root { .. })); // Then leaves - let alice_cert = certs.iter().find(|c| c.subject == sname("alice@bitcoin")).expect("alice cert"); + let alice_cert = certs + .iter() + .find(|c| c.subject == sname("alice@bitcoin")) + .expect("alice cert"); assert!(matches!(alice_cert.witness, Witness::Leaf { .. })); - let bob_cert = certs.iter().find(|c| c.subject == sname("bob@bitcoin")).expect("bob cert"); + let bob_cert = certs + .iter() + .find(|c| c.subject == sname("bob@bitcoin")) + .expect("bob cert"); assert!(matches!(bob_cert.witness, Witness::Leaf { .. })); } @@ -951,7 +1074,13 @@ fn certificate_iterator_leaves_only() { let mut ctx = QueryContext::new(); ctx.add_request(sname("alice@bitcoin")); - let result = veritas.verify_with_options(&ctx,f.finalized_message(&["alice"]), libveritas::VERIFY_DEV_MODE).expect("verify"); + let result = veritas + .verify_with_options( + &ctx, + f.finalized_message(&["alice"]), + libveritas::VERIFY_DEV_MODE, + ) + .expect("verify"); let certs: Vec = result.certificates().collect(); diff --git a/zk/Cargo.toml b/zk/Cargo.toml index a600f59..f229d79 100644 --- a/zk/Cargo.toml +++ b/zk/Cargo.toml @@ -1,9 +1,18 @@ [package] name = "libveritas_zk" -version = "0.1.0" -edition = "2024" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +repository.workspace = true +homepage.workspace = true +authors.workspace = true +description = "ZK guest types and helpers for libveritas." +documentation = "https://docs.rs/libveritas_zk" +keywords = ["spaces", "zk", "risc0"] +categories = ["cryptography"] [dependencies] spacedb = { workspace = true } borsh = { version = "1.6", default-features = false, features = ["derive"] } -serde = { version = "1.0", default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0", default-features = false, features = ["derive", "alloc"] } \ No newline at end of file diff --git a/zk/src/guest.rs b/zk/src/guest.rs index f36bff0..90df0b4 100644 --- a/zk/src/guest.rs +++ b/zk/src/guest.rs @@ -1,9 +1,11 @@ +use crate::BatchReader; use alloc::vec::Vec; use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Serialize}; -use spacedb::{Hash, Sha256Hasher, subtree::{SubTree, ValueOrHash}, VerifyError, NodeHasher}; -use crate::BatchReader; - +use spacedb::{ + Hash, NodeHasher, Sha256Hasher, VerifyError, + subtree::{SubTree, ValueOrHash}, +}; #[derive(Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct Commitment { @@ -29,7 +31,12 @@ pub enum GuestError { pub type Result = core::result::Result; -pub fn run(subtree: Vec, input: Vec, policy_step: [u32; 8], policy_fold: [u32; 8]) -> Result { +pub fn run( + subtree: Vec, + input: Vec, + policy_step: [u32; 8], + policy_fold: [u32; 8], +) -> Result { let mut subtree: SubTree = borsh::from_slice(&subtree).expect("decoding subtree error"); @@ -37,19 +44,26 @@ pub fn run(subtree: Vec, input: Vec, policy_step: [u32; 8], policy_fold: let reader = BatchReader(&input); for entry in reader.iter() { - subtree.insert( - entry.handle.try_into().expect("32 byte subspace hash slice"), - ValueOrHash::Hash(entry.value_hash.try_into().expect("32 byte value hash slice")), - ) + subtree + .insert( + entry + .handle + .try_into() + .expect("32 byte subspace hash slice"), + ValueOrHash::Hash( + entry + .value_hash + .try_into() + .expect("32 byte value hash slice"), + ), + ) .map_err(|e| match e { - spacedb::Error::Verify(e) => { - match e { - VerifyError::IncompleteProof => GuestError::IncompleteSubTree, - VerifyError::KeyNotFound => GuestError::IncompleteSubTree, - VerifyError::RootMismatch => GuestError::IncompleteSubTree, - VerifyError::KeyExists => GuestError::KeyExists, - } - } + spacedb::Error::Verify(e) => match e { + VerifyError::IncompleteProof => GuestError::IncompleteSubTree, + VerifyError::KeyNotFound => GuestError::IncompleteSubTree, + VerifyError::RootMismatch => GuestError::IncompleteSubTree, + VerifyError::KeyExists => GuestError::KeyExists, + }, _ => { unreachable!("expected verify error") } diff --git a/zk/src/lib.rs b/zk/src/lib.rs index 050b892..05bc0ec 100644 --- a/zk/src/lib.rs +++ b/zk/src/lib.rs @@ -1,3 +1,8 @@ +//! ZK guest types and helpers for [`libveritas`](https://docs.rs/libveritas). +//! +//! Defines the [`guest::Commitment`] proven by the RISC Zero guest programs +//! and a [`BatchReader`] for reading the host-prepared input batches. + extern crate alloc; extern crate core; @@ -18,11 +23,9 @@ impl<'a> BatchReader<'a> { pub fn new(data: &'a [u8]) -> Self { BatchReader(data) } - + pub fn iter(&self) -> BodyIterator<'a> { - BodyIterator { - data: &self.0, - } + BodyIterator { data: self.0 } } }