From 770ed19662bd344afaf32a21d87c490162bf96de Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Wed, 29 Apr 2026 11:23:40 -0700 Subject: [PATCH 01/25] move rust code into qdk --- Cargo.lock | 48 ++++---- Cargo.toml | 2 +- source/qdk_package/.gitignore | 1 - source/{pip => qdk_package}/Cargo.toml | 6 +- source/{pip => qdk_package}/MANIFEST.in | 0 source/qdk_package/README.md | 110 ++++++++++++++++++ source/qdk_package/pyproject.toml | 25 +++- .../src/displayable_output.rs | 0 .../src/displayable_output/tests.rs | 0 source/{pip => qdk_package}/src/fs.rs | 0 .../src/generic_estimator.rs | 0 .../src/generic_estimator/code.rs | 0 .../src/generic_estimator/counts.rs | 0 .../src/generic_estimator/factory.rs | 0 .../src/generic_estimator/factory/dispatch.rs | 0 .../generic_estimator/factory/round_based.rs | 0 .../src/generic_estimator/tests.rs | 0 .../src/generic_estimator/utils.rs | 0 source/{pip => qdk_package}/src/interop.rs | 0 .../{pip => qdk_package}/src/interpreter.rs | 6 +- .../src/interpreter/data_interop.rs | 2 +- source/{pip => qdk_package}/src/lib.rs | 0 .../src/noisy_simulator.rs | 0 .../src/qir_simulation.rs | 10 +- .../src/qir_simulation/correlated_noise.rs | 0 .../qir_simulation/correlated_noise/tests.rs | 0 .../src/qir_simulation/cpu_simulators.rs | 0 .../qir_simulation/cpu_simulators/tests.rs | 0 .../tests/clifford_noiseless.rs | 0 .../cpu_simulators/tests/clifford_noisy.rs | 0 .../tests/full_state_noiseless.rs | 0 .../cpu_simulators/tests/full_state_noisy.rs | 0 .../cpu_simulators/tests/test_utils.rs | 0 .../src/qir_simulation/gpu_full_state.rs | 2 +- source/{pip => qdk_package}/src/qre.rs | 0 .../src/state_header_template.html | 0 .../src/state_row_template.html | 0 37 files changed, 168 insertions(+), 44 deletions(-) rename source/{pip => qdk_package}/Cargo.toml (95%) rename source/{pip => qdk_package}/MANIFEST.in (100%) rename source/{pip => qdk_package}/src/displayable_output.rs (100%) rename source/{pip => qdk_package}/src/displayable_output/tests.rs (100%) rename source/{pip => qdk_package}/src/fs.rs (100%) rename source/{pip => qdk_package}/src/generic_estimator.rs (100%) rename source/{pip => qdk_package}/src/generic_estimator/code.rs (100%) rename source/{pip => qdk_package}/src/generic_estimator/counts.rs (100%) rename source/{pip => qdk_package}/src/generic_estimator/factory.rs (100%) rename source/{pip => qdk_package}/src/generic_estimator/factory/dispatch.rs (100%) rename source/{pip => qdk_package}/src/generic_estimator/factory/round_based.rs (100%) rename source/{pip => qdk_package}/src/generic_estimator/tests.rs (100%) rename source/{pip => qdk_package}/src/generic_estimator/utils.rs (100%) rename source/{pip => qdk_package}/src/interop.rs (100%) rename source/{pip => qdk_package}/src/interpreter.rs (99%) rename source/{pip => qdk_package}/src/interpreter/data_interop.rs (99%) rename source/{pip => qdk_package}/src/lib.rs (100%) rename source/{pip => qdk_package}/src/noisy_simulator.rs (100%) rename source/{pip => qdk_package}/src/qir_simulation.rs (99%) rename source/{pip => qdk_package}/src/qir_simulation/correlated_noise.rs (100%) rename source/{pip => qdk_package}/src/qir_simulation/correlated_noise/tests.rs (100%) rename source/{pip => qdk_package}/src/qir_simulation/cpu_simulators.rs (100%) rename source/{pip => qdk_package}/src/qir_simulation/cpu_simulators/tests.rs (100%) rename source/{pip => qdk_package}/src/qir_simulation/cpu_simulators/tests/clifford_noiseless.rs (100%) rename source/{pip => qdk_package}/src/qir_simulation/cpu_simulators/tests/clifford_noisy.rs (100%) rename source/{pip => qdk_package}/src/qir_simulation/cpu_simulators/tests/full_state_noiseless.rs (100%) rename source/{pip => qdk_package}/src/qir_simulation/cpu_simulators/tests/full_state_noisy.rs (100%) rename source/{pip => qdk_package}/src/qir_simulation/cpu_simulators/tests/test_utils.rs (100%) rename source/{pip => qdk_package}/src/qir_simulation/gpu_full_state.rs (99%) rename source/{pip => qdk_package}/src/qre.rs (100%) rename source/{pip => qdk_package}/src/state_header_template.html (100%) rename source/{pip => qdk_package}/src/state_row_template.html (100%) diff --git a/Cargo.lock b/Cargo.lock index 432d8283f7..d72676991b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2081,6 +2081,30 @@ dependencies = [ "cc", ] +[[package]] +name = "qdk" +version = "0.0.0" +dependencies = [ + "allocator", + "expect-test", + "memchr", + "miette", + "noisy_simulator", + "num-bigint", + "num-complex", + "num-traits", + "pyo3", + "qdk_simulators", + "qre", + "qsc", + "rand 0.8.5", + "rayon", + "resource_estimator", + "rustc-hash", + "serde", + "serde_json", +] + [[package]] name = "qdk_simulators" version = "0.0.0" @@ -2515,30 +2539,6 @@ dependencies = [ "wasm-bindgen-futures", ] -[[package]] -name = "qsharp" -version = "0.0.0" -dependencies = [ - "allocator", - "expect-test", - "memchr", - "miette", - "noisy_simulator", - "num-bigint", - "num-complex", - "num-traits", - "pyo3", - "qdk_simulators", - "qre", - "qsc", - "rand 0.8.5", - "rayon", - "resource_estimator", - "rustc-hash", - "serde", - "serde_json", -] - [[package]] name = "qsls" version = "0.0.0" diff --git a/Cargo.toml b/Cargo.toml index 688b27457b..04de7dfc9a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,7 @@ members = [ "source/index_map", "source/language_service", "source/simulators", - "source/pip", + "source/qdk_package", "source/qre", "source/resource_estimator", "source/samples_test", diff --git a/source/qdk_package/.gitignore b/source/qdk_package/.gitignore index 7a560a45da..6b5bae3b7c 100644 --- a/source/qdk_package/.gitignore +++ b/source/qdk_package/.gitignore @@ -1,4 +1,3 @@ /build/ -/src/ /*.egg-info/ /doc/ diff --git a/source/pip/Cargo.toml b/source/qdk_package/Cargo.toml similarity index 95% rename from source/pip/Cargo.toml rename to source/qdk_package/Cargo.toml index 7d576bbcc9..9a36669c4c 100644 --- a/source/pip/Cargo.toml +++ b/source/qdk_package/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "qsharp" -description = "Q# Python Bindings" +name = "qdk" +description = "QDK Python Bindings" version.workspace = true authors.workspace = true @@ -59,4 +59,4 @@ crate-type = ["cdylib"] doctest = false [package.metadata.maturin] -name = "qsharp._native" +name = "qdk._native" diff --git a/source/pip/MANIFEST.in b/source/qdk_package/MANIFEST.in similarity index 100% rename from source/pip/MANIFEST.in rename to source/qdk_package/MANIFEST.in diff --git a/source/qdk_package/README.md b/source/qdk_package/README.md index 7ffda46294..66535a03f2 100644 --- a/source/qdk_package/README.md +++ b/source/qdk_package/README.md @@ -101,3 +101,113 @@ For convenience, the following helpers and types are also importable directly fr This library sends telemetry. Minimal anonymous data is collected to help measure feature usage and performance. All telemetry events can be seen in the source file [telemetry_events.py](https://github.com/microsoft/qdk/tree/main/source/pip/qsharp/telemetry_events.py). + +## Target Package Structure (Migration WIP) + +The `qsharp` package (pip/) is being deprecated. All implementation is moving into `qdk` (qdk_package/). The `qsharp` package will become a thin deprecation shim that depends on `qdk`. + +``` +qdk_package/ +├── Cargo.toml +├── pyproject.toml +├── MANIFEST.in +├── README.md +├── test_requirements.txt +│ +├── src/ # Rust source for _native +│ └── *.rs +│ +├── qdk/ +│ ├── __init__.py # Same public API as today +│ │ +│ │── # ——— Moved from pip/qsharp/ (implementation modules) ——— +│ ├── _native.pyd/.so # Built by maturin (module-name = "qdk._native") +│ ├── _qsharp.py # Core interpreter +│ ├── _simulation.py # QIR simulation +│ ├── _ipython.py # %%qsharp cell magic +│ ├── _http.py # fetch_github() +│ ├── _fs.py # File system callbacks +│ ├── _adaptive_pass.py +│ ├── _adaptive_bytecode.py +│ ├── telemetry.py +│ ├── telemetry_events.py +│ │ +│ ├── code/ +│ │ └── __init__.py # Dynamic Q# callables namespace +│ │ +│ ├── estimator/ # Direct module — no re-export shim needed +│ │ └── __init__.py +│ │ +│ ├── openqasm/ # Direct module — no re-export shim needed +│ │ └── __init__.py +│ │ +│ ├── utils/ +│ │ └── __init__.py # dump_operation +│ │ +│ ├── noisy_simulator/ +│ │ └── __init__.py +│ │ +│ ├── qiskit/ # Lifted out of interop/ +│ │ ├── __init__.py # QSharpBackend, NeutralAtomBackend, etc. +│ │ ├── backends/__init__.py +│ │ ├── passes/__init__.py +│ │ ├── jobs/__init__.py +│ │ └── execution/__init__.py +│ │ +│ ├── cirq/ # Lifted out of interop/ +│ │ └── __init__.py # NeutralAtomSampler +│ │ +│ ├── _device/ +│ │ ├── __init__.py +│ │ └── _atom/ +│ │ └── __init__.py # NeutralAtomDevice +│ │ +│ ├── qre/ +│ │ ├── __init__.py +│ │ ├── application/__init__.py +│ │ ├── models/__init__.py +│ │ │ ├── qubits/__init__.py +│ │ │ ├── qec/__init__.py +│ │ │ └── factories/__init__.py +│ │ ├── interop/__init__.py +│ │ ├── property_keys.py # Merged with custom_property helpers +│ │ └── instruction_ids.py +│ │ +│ ├── applications/ +│ │ ├── __init__.py +│ │ └── magnets/ +│ │ ├── __init__.py +│ │ ├── utilities/__init__.py +│ │ ├── trotter/__init__.py +│ │ ├── models/__init__.py +│ │ └── geometry/__init__.py +│ │ +│ │── # ——— Remaining re-export modules (to revisit later) ——— +│ ├── qsharp.py # Re-exports full qsharp-like API from qdk._qsharp +│ ├── simulation.py # Re-exports NeutralAtomDevice, NoiseConfig +│ │ +│ │── # ——— Unchanged ——— +│ ├── widgets.py # from qsharp_widgets import * (external) +│ │ +│ └── azure/ # Unchanged — re-exports from azure.quantum +│ ├── __init__.py +│ ├── job.py +│ ├── qiskit.py +│ ├── cirq.py +│ ├── argument_types.py +│ └── target/ +│ ├── __init__.py +│ └── rigetti.py +│ +└── tests/ + ├── conftest.py + ├── mocks.py + ├── test_reexports.py + ├── test_extras.py + ├── test_integration/ + │ ├── test_*.py + │ ├── utils.py + │ └── resources/ + └── benchmarks/ + └── bench_qre.py +``` diff --git a/source/qdk_package/pyproject.toml b/source/qdk_package/pyproject.toml index edb2e19ec2..a9dcf666fb 100644 --- a/source/qdk_package/pyproject.toml +++ b/source/qdk_package/pyproject.toml @@ -1,6 +1,6 @@ [build-system] -requires = ["setuptools>=64", "wheel"] -build-backend = "setuptools.build_meta" +requires = ["maturin ~= 1.10.2"] +build-backend = "maturin" [project] name = "qdk" @@ -10,7 +10,22 @@ readme = "README.md" authors = [ { name = "Microsoft" } ] license = { file = "LICENSE.txt" } requires-python = ">=3.10" -dependencies = ["qsharp==0.0.0", "pyqir>=0.12.3,<0.13"] +dependencies = ["pyqir>=0.12.3,<0.13"] +classifiers = [ + "License :: OSI Approved :: MIT License", + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python", + "Programming Language :: Rust", + "Operating System :: MacOS", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX :: Linux", +] [project.optional-dependencies] jupyter = ["qsharp-widgets==0.0.0", "qsharp-jupyterlab==0.0.0"] @@ -30,6 +45,6 @@ all = [ "qsharp-jupyterlab==0.0.0", ] -[tool.setuptools.packages.find] -where = ["."] +[tool.maturin] +module-name = "qdk._native" diff --git a/source/pip/src/displayable_output.rs b/source/qdk_package/src/displayable_output.rs similarity index 100% rename from source/pip/src/displayable_output.rs rename to source/qdk_package/src/displayable_output.rs diff --git a/source/pip/src/displayable_output/tests.rs b/source/qdk_package/src/displayable_output/tests.rs similarity index 100% rename from source/pip/src/displayable_output/tests.rs rename to source/qdk_package/src/displayable_output/tests.rs diff --git a/source/pip/src/fs.rs b/source/qdk_package/src/fs.rs similarity index 100% rename from source/pip/src/fs.rs rename to source/qdk_package/src/fs.rs diff --git a/source/pip/src/generic_estimator.rs b/source/qdk_package/src/generic_estimator.rs similarity index 100% rename from source/pip/src/generic_estimator.rs rename to source/qdk_package/src/generic_estimator.rs diff --git a/source/pip/src/generic_estimator/code.rs b/source/qdk_package/src/generic_estimator/code.rs similarity index 100% rename from source/pip/src/generic_estimator/code.rs rename to source/qdk_package/src/generic_estimator/code.rs diff --git a/source/pip/src/generic_estimator/counts.rs b/source/qdk_package/src/generic_estimator/counts.rs similarity index 100% rename from source/pip/src/generic_estimator/counts.rs rename to source/qdk_package/src/generic_estimator/counts.rs diff --git a/source/pip/src/generic_estimator/factory.rs b/source/qdk_package/src/generic_estimator/factory.rs similarity index 100% rename from source/pip/src/generic_estimator/factory.rs rename to source/qdk_package/src/generic_estimator/factory.rs diff --git a/source/pip/src/generic_estimator/factory/dispatch.rs b/source/qdk_package/src/generic_estimator/factory/dispatch.rs similarity index 100% rename from source/pip/src/generic_estimator/factory/dispatch.rs rename to source/qdk_package/src/generic_estimator/factory/dispatch.rs diff --git a/source/pip/src/generic_estimator/factory/round_based.rs b/source/qdk_package/src/generic_estimator/factory/round_based.rs similarity index 100% rename from source/pip/src/generic_estimator/factory/round_based.rs rename to source/qdk_package/src/generic_estimator/factory/round_based.rs diff --git a/source/pip/src/generic_estimator/tests.rs b/source/qdk_package/src/generic_estimator/tests.rs similarity index 100% rename from source/pip/src/generic_estimator/tests.rs rename to source/qdk_package/src/generic_estimator/tests.rs diff --git a/source/pip/src/generic_estimator/utils.rs b/source/qdk_package/src/generic_estimator/utils.rs similarity index 100% rename from source/pip/src/generic_estimator/utils.rs rename to source/qdk_package/src/generic_estimator/utils.rs diff --git a/source/pip/src/interop.rs b/source/qdk_package/src/interop.rs similarity index 100% rename from source/pip/src/interop.rs rename to source/qdk_package/src/interop.rs diff --git a/source/pip/src/interpreter.rs b/source/qdk_package/src/interpreter.rs similarity index 99% rename from source/pip/src/interpreter.rs rename to source/qdk_package/src/interpreter.rs index 9fff6f80d2..ec7112740e 100644 --- a/source/pip/src/interpreter.rs +++ b/source/qdk_package/src/interpreter.rs @@ -153,7 +153,7 @@ fn _native<'a>(py: Python<'a>, m: &Bound<'a, PyModule>) -> PyResult<()> { // This ordering must match the _native.pyi file. #[derive(Clone, Copy, Default, PartialEq)] -#[pyclass(eq, eq_int, from_py_object, module = "qsharp._native")] +#[pyclass(eq, eq_int, from_py_object, module = "qdk._native")] #[allow(non_camel_case_types)] /// A Q# target profile. /// @@ -260,7 +260,7 @@ impl From for Profile { // This ordering must match the _native.pyi file. #[derive(Clone, Copy, Default, PartialEq)] -#[pyclass(eq, eq_int, from_py_object, module = "qsharp._native")] +#[pyclass(eq, eq_int, from_py_object, module = "qdk._native")] #[allow(non_camel_case_types)] /// Represents the output semantics for OpenQASM 3 compilation. /// Each has implications on the output of the compilation @@ -324,7 +324,7 @@ impl From for qsc::openqasm::OutputSemantics { // This ordering must match the _native.pyi file. #[derive(Clone, Copy, Default, PartialEq)] -#[pyclass(eq, eq_int, from_py_object, module = "qsharp._native")] +#[pyclass(eq, eq_int, from_py_object, module = "qdk._native")] #[allow(non_camel_case_types)] /// Represents the type of compilation output to create pub enum ProgramType { diff --git a/source/pip/src/interpreter/data_interop.rs b/source/qdk_package/src/interpreter/data_interop.rs similarity index 99% rename from source/pip/src/interpreter/data_interop.rs rename to source/qdk_package/src/interpreter/data_interop.rs index 8d052c44c3..f2ec59a090 100644 --- a/source/pip/src/interpreter/data_interop.rs +++ b/source/qdk_package/src/interpreter/data_interop.rs @@ -25,7 +25,7 @@ use std::rc::Rc; /// Instances of this enum represent a Q# type. This is used /// to send the definitions of Q# UDTs defined by the user to Python -/// and creating equivalent Python dataclasses in `qsharp.code.*`. +/// and creating equivalent Python dataclasses in `qdk.code.*`. #[pyclass(from_py_object)] #[derive(Clone)] pub(super) enum TypeIR { diff --git a/source/pip/src/lib.rs b/source/qdk_package/src/lib.rs similarity index 100% rename from source/pip/src/lib.rs rename to source/qdk_package/src/lib.rs diff --git a/source/pip/src/noisy_simulator.rs b/source/qdk_package/src/noisy_simulator.rs similarity index 100% rename from source/pip/src/noisy_simulator.rs rename to source/qdk_package/src/noisy_simulator.rs diff --git a/source/pip/src/qir_simulation.rs b/source/qdk_package/src/qir_simulation.rs similarity index 99% rename from source/pip/src/qir_simulation.rs rename to source/qdk_package/src/qir_simulation.rs index 6e41c2da97..78cf7b5bae 100644 --- a/source/pip/src/qir_simulation.rs +++ b/source/qdk_package/src/qir_simulation.rs @@ -72,7 +72,7 @@ pub enum QirInstructionId { } #[derive(Debug)] -#[pyclass(module = "qsharp._native")] +#[pyclass(module = "qdk._native")] #[derive(FromPyObject)] pub enum QirInstruction { OneQubitGate(QirInstructionId, u32), @@ -89,7 +89,7 @@ pub enum QirInstruction { } #[derive(Debug)] -#[pyclass(module = "qsharp._native")] +#[pyclass(module = "qdk._native")] pub struct NoiseConfig { #[pyo3(get)] pub i: Py, @@ -312,7 +312,7 @@ pub(crate) fn unbind_noise_config( } #[derive(Clone, Copy, Debug)] -#[pyclass(from_py_object, module = "qsharp._native")] +#[pyclass(from_py_object, module = "qdk._native")] pub struct IdleNoiseParams { #[pyo3(get, set)] pub s_probability: Probability, @@ -354,7 +354,7 @@ impl From for IdleNoiseParams { } #[derive(Clone, Debug)] -#[pyclass(from_py_object, module = "qsharp._native")] +#[pyclass(from_py_object, module = "qdk._native")] pub struct NoiseTable { qubits: u32, pauli_noise: FxHashMap, @@ -669,7 +669,7 @@ impl From> for NoiseTable } #[derive(Debug, Default)] -#[pyclass(from_py_object, module = "qsharp._native")] +#[pyclass(from_py_object, module = "qdk._native")] pub struct NoiseIntrinsicsTable { next_id: u32, table: FxHashMap)>, diff --git a/source/pip/src/qir_simulation/correlated_noise.rs b/source/qdk_package/src/qir_simulation/correlated_noise.rs similarity index 100% rename from source/pip/src/qir_simulation/correlated_noise.rs rename to source/qdk_package/src/qir_simulation/correlated_noise.rs diff --git a/source/pip/src/qir_simulation/correlated_noise/tests.rs b/source/qdk_package/src/qir_simulation/correlated_noise/tests.rs similarity index 100% rename from source/pip/src/qir_simulation/correlated_noise/tests.rs rename to source/qdk_package/src/qir_simulation/correlated_noise/tests.rs diff --git a/source/pip/src/qir_simulation/cpu_simulators.rs b/source/qdk_package/src/qir_simulation/cpu_simulators.rs similarity index 100% rename from source/pip/src/qir_simulation/cpu_simulators.rs rename to source/qdk_package/src/qir_simulation/cpu_simulators.rs diff --git a/source/pip/src/qir_simulation/cpu_simulators/tests.rs b/source/qdk_package/src/qir_simulation/cpu_simulators/tests.rs similarity index 100% rename from source/pip/src/qir_simulation/cpu_simulators/tests.rs rename to source/qdk_package/src/qir_simulation/cpu_simulators/tests.rs diff --git a/source/pip/src/qir_simulation/cpu_simulators/tests/clifford_noiseless.rs b/source/qdk_package/src/qir_simulation/cpu_simulators/tests/clifford_noiseless.rs similarity index 100% rename from source/pip/src/qir_simulation/cpu_simulators/tests/clifford_noiseless.rs rename to source/qdk_package/src/qir_simulation/cpu_simulators/tests/clifford_noiseless.rs diff --git a/source/pip/src/qir_simulation/cpu_simulators/tests/clifford_noisy.rs b/source/qdk_package/src/qir_simulation/cpu_simulators/tests/clifford_noisy.rs similarity index 100% rename from source/pip/src/qir_simulation/cpu_simulators/tests/clifford_noisy.rs rename to source/qdk_package/src/qir_simulation/cpu_simulators/tests/clifford_noisy.rs diff --git a/source/pip/src/qir_simulation/cpu_simulators/tests/full_state_noiseless.rs b/source/qdk_package/src/qir_simulation/cpu_simulators/tests/full_state_noiseless.rs similarity index 100% rename from source/pip/src/qir_simulation/cpu_simulators/tests/full_state_noiseless.rs rename to source/qdk_package/src/qir_simulation/cpu_simulators/tests/full_state_noiseless.rs diff --git a/source/pip/src/qir_simulation/cpu_simulators/tests/full_state_noisy.rs b/source/qdk_package/src/qir_simulation/cpu_simulators/tests/full_state_noisy.rs similarity index 100% rename from source/pip/src/qir_simulation/cpu_simulators/tests/full_state_noisy.rs rename to source/qdk_package/src/qir_simulation/cpu_simulators/tests/full_state_noisy.rs diff --git a/source/pip/src/qir_simulation/cpu_simulators/tests/test_utils.rs b/source/qdk_package/src/qir_simulation/cpu_simulators/tests/test_utils.rs similarity index 100% rename from source/pip/src/qir_simulation/cpu_simulators/tests/test_utils.rs rename to source/qdk_package/src/qir_simulation/cpu_simulators/tests/test_utils.rs diff --git a/source/pip/src/qir_simulation/gpu_full_state.rs b/source/qdk_package/src/qir_simulation/gpu_full_state.rs similarity index 99% rename from source/pip/src/qir_simulation/gpu_full_state.rs rename to source/qdk_package/src/qir_simulation/gpu_full_state.rs index d548c96a0b..df7d0ec629 100644 --- a/source/pip/src/qir_simulation/gpu_full_state.rs +++ b/source/qdk_package/src/qir_simulation/gpu_full_state.rs @@ -96,7 +96,7 @@ pub fn run_parallel_shots<'py>( type NativeGpuContext = gpu_context::GpuContext; #[derive(Debug)] -#[pyclass(module = "qsharp._native")] +#[pyclass(module = "qdk._native")] pub struct GpuContext { native_context: Mutex, last_set_result_count: usize, // Needed to format results diff --git a/source/pip/src/qre.rs b/source/qdk_package/src/qre.rs similarity index 100% rename from source/pip/src/qre.rs rename to source/qdk_package/src/qre.rs diff --git a/source/pip/src/state_header_template.html b/source/qdk_package/src/state_header_template.html similarity index 100% rename from source/pip/src/state_header_template.html rename to source/qdk_package/src/state_header_template.html diff --git a/source/pip/src/state_row_template.html b/source/qdk_package/src/state_row_template.html similarity index 100% rename from source/pip/src/state_row_template.html rename to source/qdk_package/src/state_row_template.html From 98ab35482695ee18f3ab4263a4cd3bdd29674f66 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Wed, 29 Apr 2026 11:46:32 -0700 Subject: [PATCH 02/25] add python defs to qdk --- .../qdk/.data/qsharp_codemirror.js | 95 + source/qdk_package/qdk/__init__.py | 6 +- source/qdk_package/qdk/_adaptive_bytecode.py | 132 ++ source/qdk_package/qdk/_adaptive_pass.py | 983 ++++++++++ source/qdk_package/qdk/_device/__init__.py | 8 + .../qdk_package/qdk/_device/_atom/__init__.py | 299 +++ .../qdk_package/qdk/_device/_atom/_decomp.py | 510 +++++ .../qdk/_device/_atom/_optimize.py | 315 ++++ .../qdk_package/qdk/_device/_atom/_reorder.py | 114 ++ .../qdk/_device/_atom/_scheduler.py | 938 +++++++++ .../qdk_package/qdk/_device/_atom/_trace.py | 76 + .../qdk_package/qdk/_device/_atom/_utils.py | 92 + .../qdk/_device/_atom/_validate.py | 45 + source/qdk_package/qdk/_device/_device.py | 139 ++ source/qdk_package/qdk/_fs.py | 90 + source/qdk_package/qdk/_http.py | 30 + source/qdk_package/qdk/_ipython.py | 88 + source/qdk_package/qdk/_native.pyi | 1140 +++++++++++ source/qdk_package/qdk/_qsharp.py | 1181 ++++++++++++ source/qdk_package/qdk/_simulation.py | 727 +++++++ .../qdk_package/qdk/applications/__init__.py | 23 - .../qdk_package/qdk/applications/magnets.py | 27 - .../qdk/applications/magnets/__init__.py | 14 + .../applications/magnets/geometry/__init__.py | 22 + .../applications/magnets/geometry/complete.py | 150 ++ .../magnets/geometry/lattice1d.py | 123 ++ .../magnets/geometry/lattice2d.py | 187 ++ .../applications/magnets/models/__init__.py | 12 + .../qdk/applications/magnets/models/model.py | 230 +++ .../applications/magnets/trotter/__init__.py | 22 + .../applications/magnets/trotter/trotter.py | 372 ++++ .../magnets/utilities/__init__.py | 26 + .../magnets/utilities/hypergraph.py | 317 ++++ .../applications/magnets/utilities/pauli.py | 270 +++ source/qdk_package/qdk/cirq.py | 38 - source/qdk_package/qdk/cirq/__init__.py | 33 + source/qdk_package/qdk/cirq/_neutral_atom.py | 172 ++ source/qdk_package/qdk/cirq/_result.py | 310 +++ source/qdk_package/qdk/code/__init__.py | 6 + source/qdk_package/qdk/code/__init__.pyi | 4 + source/qdk_package/qdk/estimator.py | 22 - source/qdk_package/qdk/estimator/__init__.py | 36 + .../qdk_package/qdk/estimator/_estimator.py | 1180 ++++++++++++ .../qdk/noisy_simulator/__init__.py | 18 + .../qdk/noisy_simulator/_noisy_simulator.py | 10 + .../qdk/noisy_simulator/_noisy_simulator.pyi | 242 +++ source/qdk_package/qdk/openqasm.py | 21 - source/qdk_package/qdk/openqasm/__init__.py | 20 + source/qdk_package/qdk/openqasm/_circuit.py | 114 ++ source/qdk_package/qdk/openqasm/_compile.py | 100 + source/qdk_package/qdk/openqasm/_estimate.py | 107 ++ source/qdk_package/qdk/openqasm/_import.py | 72 + source/qdk_package/qdk/openqasm/_ipython.py | 24 + source/qdk_package/qdk/openqasm/_run.py | 195 ++ source/qdk_package/qdk/openqasm/_utils.py | 40 + source/qdk_package/qdk/qiskit.py | 43 - source/qdk_package/qdk/qiskit/__init__.py | 108 ++ .../qdk/qiskit/backends/__init__.py | 10 + .../qdk/qiskit/backends/backend_base.py | 614 ++++++ .../qdk/qiskit/backends/compilation.py | 36 + .../qdk_package/qdk/qiskit/backends/errors.py | 29 + .../qiskit/backends/neutral_atom_backend.py | 288 +++ .../qiskit/backends/neutral_atom_target.py | 44 + .../qdk/qiskit/backends/qirtarget.py | 191 ++ .../qdk/qiskit/backends/qsharp_backend.py | 233 +++ .../qdk/qiskit/backends/re_backend.py | 194 ++ .../qdk/qiskit/execution/__init__.py | 4 + .../qdk/qiskit/execution/default.py | 10 + .../qdk_package/qdk/qiskit/jobs/__init__.py | 5 + source/qdk_package/qdk/qiskit/jobs/qsjob.py | 194 ++ .../qdk_package/qdk/qiskit/jobs/qsjobset.py | 150 ++ .../qdk_package/qdk/qiskit/passes/__init__.py | 4 + .../qdk/qiskit/passes/remove_delay.py | 22 + source/qdk_package/qdk/qre/__init__.py | 102 +- source/qdk_package/qdk/qre/_application.py | 172 ++ source/qdk_package/qdk/qre/_architecture.py | 244 +++ source/qdk_package/qdk/qre/_enumeration.py | 242 +++ source/qdk_package/qdk/qre/_estimation.py | 218 +++ source/qdk_package/qdk/qre/_instruction.py | 473 +++++ .../qdk_package/qdk/qre/_isa_enumeration.py | 428 +++++ source/qdk_package/qdk/qre/_qre.py | 36 + source/qdk_package/qdk/qre/_qre.pyi | 1679 +++++++++++++++++ source/qdk_package/qdk/qre/_results.py | 418 ++++ source/qdk_package/qdk/qre/_trace.py | 195 ++ source/qdk_package/qdk/qre/application.py | 26 - .../qdk/qre/application/__init__.py | 14 + .../qdk_package/qdk/qre/application/_cirq.py | 58 + .../qdk/qre/application/_openqasm.py | 68 + .../qdk_package/qdk/qre/application/_qir.py | 42 + .../qdk/qre/application/_qsharp.py | 60 + source/qdk_package/qdk/qre/instruction_ids.py | 25 +- .../qdk_package/qdk/qre/instruction_ids.pyi | 99 + source/qdk_package/qdk/qre/interop.py | 27 - .../qdk_package/qdk/qre/interop/__init__.py | 35 + source/qdk_package/qdk/qre/interop/_cirq.py | 822 ++++++++ source/qdk_package/qdk/qre/interop/_qir.py | 136 ++ source/qdk_package/qdk/qre/interop/_qsharp.py | 155 ++ source/qdk_package/qdk/qre/models.py | 27 - source/qdk_package/qdk/qre/models/__init__.py | 23 + .../qdk/qre/models/factories/__init__.py | 8 + .../qdk/qre/models/factories/_litinski.py | 395 ++++ .../qdk/qre/models/factories/_round_based.py | 461 +++++ .../qdk/qre/models/factories/_utils.py | 90 + .../qdk/qre/models/qec/__init__.py | 13 + .../qdk/qre/models/qec/_surface_code.py | 154 ++ .../qdk/qre/models/qec/_three_aux.py | 120 ++ .../qdk_package/qdk/qre/models/qec/_yoked.py | 243 +++ .../qdk/qre/models/qubits/__init__.py | 7 + .../qdk/qre/models/qubits/_gate_based.py | 139 ++ .../qdk/qre/models/qubits/_msft.py | 70 + source/qdk_package/qdk/qre/property_keys.py | 25 +- source/qdk_package/qdk/qre/property_keys.pyi | 23 + source/qdk_package/qdk/qsharp.py | 4 +- source/qdk_package/qdk/simulation.py | 4 +- source/qdk_package/qdk/telemetry.py | 310 +++ source/qdk_package/qdk/telemetry_events.py | 357 ++++ source/qdk_package/qdk/utils/__init__.py | 8 + source/qdk_package/qdk/utils/_utils.py | 50 + 118 files changed, 21726 insertions(+), 325 deletions(-) create mode 100644 source/qdk_package/qdk/.data/qsharp_codemirror.js create mode 100644 source/qdk_package/qdk/_adaptive_bytecode.py create mode 100644 source/qdk_package/qdk/_adaptive_pass.py create mode 100644 source/qdk_package/qdk/_device/__init__.py create mode 100644 source/qdk_package/qdk/_device/_atom/__init__.py create mode 100644 source/qdk_package/qdk/_device/_atom/_decomp.py create mode 100644 source/qdk_package/qdk/_device/_atom/_optimize.py create mode 100644 source/qdk_package/qdk/_device/_atom/_reorder.py create mode 100644 source/qdk_package/qdk/_device/_atom/_scheduler.py create mode 100644 source/qdk_package/qdk/_device/_atom/_trace.py create mode 100644 source/qdk_package/qdk/_device/_atom/_utils.py create mode 100644 source/qdk_package/qdk/_device/_atom/_validate.py create mode 100644 source/qdk_package/qdk/_device/_device.py create mode 100644 source/qdk_package/qdk/_fs.py create mode 100644 source/qdk_package/qdk/_http.py create mode 100644 source/qdk_package/qdk/_ipython.py create mode 100644 source/qdk_package/qdk/_native.pyi create mode 100644 source/qdk_package/qdk/_qsharp.py create mode 100644 source/qdk_package/qdk/_simulation.py delete mode 100644 source/qdk_package/qdk/applications/magnets.py create mode 100644 source/qdk_package/qdk/applications/magnets/__init__.py create mode 100644 source/qdk_package/qdk/applications/magnets/geometry/__init__.py create mode 100644 source/qdk_package/qdk/applications/magnets/geometry/complete.py create mode 100644 source/qdk_package/qdk/applications/magnets/geometry/lattice1d.py create mode 100644 source/qdk_package/qdk/applications/magnets/geometry/lattice2d.py create mode 100644 source/qdk_package/qdk/applications/magnets/models/__init__.py create mode 100644 source/qdk_package/qdk/applications/magnets/models/model.py create mode 100644 source/qdk_package/qdk/applications/magnets/trotter/__init__.py create mode 100644 source/qdk_package/qdk/applications/magnets/trotter/trotter.py create mode 100644 source/qdk_package/qdk/applications/magnets/utilities/__init__.py create mode 100644 source/qdk_package/qdk/applications/magnets/utilities/hypergraph.py create mode 100644 source/qdk_package/qdk/applications/magnets/utilities/pauli.py delete mode 100644 source/qdk_package/qdk/cirq.py create mode 100644 source/qdk_package/qdk/cirq/__init__.py create mode 100644 source/qdk_package/qdk/cirq/_neutral_atom.py create mode 100644 source/qdk_package/qdk/cirq/_result.py create mode 100644 source/qdk_package/qdk/code/__init__.py create mode 100644 source/qdk_package/qdk/code/__init__.pyi delete mode 100644 source/qdk_package/qdk/estimator.py create mode 100644 source/qdk_package/qdk/estimator/__init__.py create mode 100644 source/qdk_package/qdk/estimator/_estimator.py create mode 100644 source/qdk_package/qdk/noisy_simulator/__init__.py create mode 100644 source/qdk_package/qdk/noisy_simulator/_noisy_simulator.py create mode 100644 source/qdk_package/qdk/noisy_simulator/_noisy_simulator.pyi delete mode 100644 source/qdk_package/qdk/openqasm.py create mode 100644 source/qdk_package/qdk/openqasm/__init__.py create mode 100644 source/qdk_package/qdk/openqasm/_circuit.py create mode 100644 source/qdk_package/qdk/openqasm/_compile.py create mode 100644 source/qdk_package/qdk/openqasm/_estimate.py create mode 100644 source/qdk_package/qdk/openqasm/_import.py create mode 100644 source/qdk_package/qdk/openqasm/_ipython.py create mode 100644 source/qdk_package/qdk/openqasm/_run.py create mode 100644 source/qdk_package/qdk/openqasm/_utils.py delete mode 100644 source/qdk_package/qdk/qiskit.py create mode 100644 source/qdk_package/qdk/qiskit/__init__.py create mode 100644 source/qdk_package/qdk/qiskit/backends/__init__.py create mode 100644 source/qdk_package/qdk/qiskit/backends/backend_base.py create mode 100644 source/qdk_package/qdk/qiskit/backends/compilation.py create mode 100644 source/qdk_package/qdk/qiskit/backends/errors.py create mode 100644 source/qdk_package/qdk/qiskit/backends/neutral_atom_backend.py create mode 100644 source/qdk_package/qdk/qiskit/backends/neutral_atom_target.py create mode 100644 source/qdk_package/qdk/qiskit/backends/qirtarget.py create mode 100644 source/qdk_package/qdk/qiskit/backends/qsharp_backend.py create mode 100644 source/qdk_package/qdk/qiskit/backends/re_backend.py create mode 100644 source/qdk_package/qdk/qiskit/execution/__init__.py create mode 100644 source/qdk_package/qdk/qiskit/execution/default.py create mode 100644 source/qdk_package/qdk/qiskit/jobs/__init__.py create mode 100644 source/qdk_package/qdk/qiskit/jobs/qsjob.py create mode 100644 source/qdk_package/qdk/qiskit/jobs/qsjobset.py create mode 100644 source/qdk_package/qdk/qiskit/passes/__init__.py create mode 100644 source/qdk_package/qdk/qiskit/passes/remove_delay.py create mode 100644 source/qdk_package/qdk/qre/_application.py create mode 100644 source/qdk_package/qdk/qre/_architecture.py create mode 100644 source/qdk_package/qdk/qre/_enumeration.py create mode 100644 source/qdk_package/qdk/qre/_estimation.py create mode 100644 source/qdk_package/qdk/qre/_instruction.py create mode 100644 source/qdk_package/qdk/qre/_isa_enumeration.py create mode 100644 source/qdk_package/qdk/qre/_qre.py create mode 100644 source/qdk_package/qdk/qre/_qre.pyi create mode 100644 source/qdk_package/qdk/qre/_results.py create mode 100644 source/qdk_package/qdk/qre/_trace.py delete mode 100644 source/qdk_package/qdk/qre/application.py create mode 100644 source/qdk_package/qdk/qre/application/__init__.py create mode 100644 source/qdk_package/qdk/qre/application/_cirq.py create mode 100644 source/qdk_package/qdk/qre/application/_openqasm.py create mode 100644 source/qdk_package/qdk/qre/application/_qir.py create mode 100644 source/qdk_package/qdk/qre/application/_qsharp.py create mode 100644 source/qdk_package/qdk/qre/instruction_ids.pyi delete mode 100644 source/qdk_package/qdk/qre/interop.py create mode 100644 source/qdk_package/qdk/qre/interop/__init__.py create mode 100644 source/qdk_package/qdk/qre/interop/_cirq.py create mode 100644 source/qdk_package/qdk/qre/interop/_qir.py create mode 100644 source/qdk_package/qdk/qre/interop/_qsharp.py delete mode 100644 source/qdk_package/qdk/qre/models.py create mode 100644 source/qdk_package/qdk/qre/models/__init__.py create mode 100644 source/qdk_package/qdk/qre/models/factories/__init__.py create mode 100644 source/qdk_package/qdk/qre/models/factories/_litinski.py create mode 100644 source/qdk_package/qdk/qre/models/factories/_round_based.py create mode 100644 source/qdk_package/qdk/qre/models/factories/_utils.py create mode 100644 source/qdk_package/qdk/qre/models/qec/__init__.py create mode 100644 source/qdk_package/qdk/qre/models/qec/_surface_code.py create mode 100644 source/qdk_package/qdk/qre/models/qec/_three_aux.py create mode 100644 source/qdk_package/qdk/qre/models/qec/_yoked.py create mode 100644 source/qdk_package/qdk/qre/models/qubits/__init__.py create mode 100644 source/qdk_package/qdk/qre/models/qubits/_gate_based.py create mode 100644 source/qdk_package/qdk/qre/models/qubits/_msft.py create mode 100644 source/qdk_package/qdk/qre/property_keys.pyi create mode 100644 source/qdk_package/qdk/telemetry.py create mode 100644 source/qdk_package/qdk/telemetry_events.py create mode 100644 source/qdk_package/qdk/utils/__init__.py create mode 100644 source/qdk_package/qdk/utils/_utils.py diff --git a/source/qdk_package/qdk/.data/qsharp_codemirror.js b/source/qdk_package/qdk/.data/qsharp_codemirror.js new file mode 100644 index 0000000000..8078c1a8cb --- /dev/null +++ b/source/qdk_package/qdk/.data/qsharp_codemirror.js @@ -0,0 +1,95 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +// This file provides CodeMirror syntax highlighting for Q# magic cells +// in classic Jupyter Notebooks. It does nothing in other (Jupyter Notebook 7, +// VS Code, Azure Notebooks, etc.) environments. + +// Detect the prerequisites and do nothing if they don't exist. +if (window.require && window.CodeMirror && window.Jupyter) { + // The simple mode plugin for CodeMirror is not loaded by default, so require it. + window.require(["codemirror/addon/mode/simple"], function defineMode() { + let rules = [ + { + token: "comment", + regex: /(\/\/).*/, + beginWord: false, + }, + { + token: "string", + regex: String.raw`^\"(?:[^\"\\]|\\[\s\S])*(?:\"|$)`, + beginWord: false, + }, + { + token: "keyword", + regex: String.raw`(namespace|open|as|operation|function|body|adjoint|newtype|controlled|internal)\b`, + beginWord: true, + }, + { + token: "keyword", + regex: String.raw`(if|elif|else|repeat|until|fixup|for|in|return|fail|within|apply)\b`, + beginWord: true, + }, + { + token: "keyword", + regex: String.raw`(Adjoint|Controlled|Adj|Ctl|is|self|auto|distribute|invert|intrinsic)\b`, + beginWord: true, + }, + { + token: "keyword", + regex: String.raw`(let|set|use|borrow|mutable)\b`, + beginWord: true, + }, + { + token: "operatorKeyword", + regex: String.raw`(not|and|or)\b|(w/)`, + beginWord: true, + }, + { + token: "operatorKeyword", + regex: String.raw`(=)|(!)|(<)|(>)|(\+)|(-)|(\*)|(/)|(\^)|(%)|(\|)|(&&&)|(~~~)|(\.\.\.)|(\.\.)|(\?)`, + beginWord: false, + }, + { + token: "meta", + regex: String.raw`(Int|BigInt|Double|Bool|Qubit|Pauli|Result|Range|String|Unit)\b`, + beginWord: true, + }, + { + token: "atom", + regex: String.raw`(true|false|Pauli(I|X|Y|Z)|One|Zero)\b`, + beginWord: true, + }, + ]; + let simpleRules = []; + for (let rule of rules) { + simpleRules.push({ + token: rule.token, + regex: new RegExp(rule.regex, "g"), + sol: rule.beginWord, + }); + if (rule.beginWord) { + // Need an additional rule due to the fact that CodeMirror simple mode doesn't work with ^ token + simpleRules.push({ + token: rule.token, + regex: new RegExp(String.raw`\W` + rule.regex, "g"), + sol: false, + }); + } + } + + // Register the mode defined above with CodeMirror + window.CodeMirror.defineSimpleMode("qsharp", { start: simpleRules }); + window.CodeMirror.defineMIME("text/x-qsharp", "qsharp"); + + // Tell Jupyter to associate %%qsharp magic cells with the qsharp mode + window.Jupyter.CodeCell.options_default.highlight_modes["qsharp"] = { + reg: [/^%%qsharp/], + }; + + // Force re-highlighting of all cells the first time this code runs + for (const cell of window.Jupyter.notebook.get_cells()) { + cell.auto_highlight(); + } + }); +} diff --git a/source/qdk_package/qdk/__init__.py b/source/qdk_package/qdk/__init__.py index dfe5b0d012..67674a3649 100644 --- a/source/qdk_package/qdk/__init__.py +++ b/source/qdk_package/qdk/__init__.py @@ -16,13 +16,13 @@ """ -from qsharp.telemetry_events import on_qdk_import +from .telemetry_events import on_qdk_import on_qdk_import() # Some common utilities are lifted to the qdk root. -from qsharp import code -from qsharp import ( +from . import code +from ._qsharp import ( set_quantum_seed, set_classical_seed, dump_machine, diff --git a/source/qdk_package/qdk/_adaptive_bytecode.py b/source/qdk_package/qdk/_adaptive_bytecode.py new file mode 100644 index 0000000000..876a0a196f --- /dev/null +++ b/source/qdk_package/qdk/_adaptive_bytecode.py @@ -0,0 +1,132 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Shared opcode constants for the Adaptive Profile QIR bytecode interpreter. + +These constants define the bytecode encoding used by the Python AdaptiveProfilePass +(emitter) and the Rust GPU receiver. Values must stay in sync with the Rust +``bytecode.rs`` module and the WGSL interpreter. + +Opcode word layout:: + + bits [7:0] = primary opcode + bits [15:8] = sub-opcode / condition code + bits [23:16] = flags + +Compose via bitwise OR: ``opcode | (sub << 8) | flag`` +Example: ``OP_ICMP | (ICMP_SLE << 8) | FLAG_SRC1_IMM`` +""" + +# ── Flags (pre-shifted to bit 16+) ────────────────────────────────────────── +FLAG_DST_IMM = 1 << 18 # dst field is an immediate value, not a register +FLAG_SRC0_IMM = 1 << 16 # src0 field is an immediate value, not a register +FLAG_SRC1_IMM = 1 << 17 # src1 field is an immediate value, not a register +FLAG_AUX0_IMM = 1 << 19 # aux0 field is an immediate value, not a register +FLAG_AUX1_IMM = 1 << 20 # aux1 field is an immediate value, not a register +FLAG_AUX2_IMM = 1 << 21 # aux2 field is an immediate value, not a register +FLAG_AUX3_IMM = 1 << 22 # aux3 field is an immediate value, not a register + +FLAG_FLOAT = 1 << 23 # operation uses float semantics + + +# ── Control Flow ───────────────────────────────────────────────────────────── +OP_NOP = 0x00 +OP_RET = 0x02 +OP_JUMP = 0x04 +OP_BRANCH = 0x05 +OP_SWITCH = 0x06 +OP_CALL = 0x07 +OP_CALL_RETURN = 0x08 + +# ── Quantum ────────────────────────────────────────────────────────────────── +OP_QUANTUM_GATE = 0x10 +OP_MEASURE = 0x11 +OP_RESET = 0x12 +OP_READ_RESULT = 0x13 +OP_RECORD_OUTPUT = 0x14 + +# ── Integer Arithmetic ─────────────────────────────────────────────────────── +OP_ADD = 0x20 +OP_SUB = 0x21 +OP_MUL = 0x22 +OP_UDIV = 0x23 +OP_SDIV = 0x24 +OP_UREM = 0x25 +OP_SREM = 0x26 + +# ── Bitwise / Shift ───────────────────────────────────────────────────────── +OP_AND = 0x28 +OP_OR = 0x29 +OP_XOR = 0x2A +OP_SHL = 0x2B +OP_LSHR = 0x2C +OP_ASHR = 0x2D + +# ── Comparison ─────────────────────────────────────────────────────────────── +OP_ICMP = 0x30 +OP_FCMP = 0x31 + +# ── Float Arithmetic ───────────────────────────────────────────────────────── +OP_FADD = 0x38 +OP_FSUB = 0x39 +OP_FMUL = 0x3A +OP_FDIV = 0x3B + +# ── Type Conversion ────────────────────────────────────────────────────────── +OP_ZEXT = 0x40 +OP_SEXT = 0x41 +OP_TRUNC = 0x42 +OP_FPEXT = 0x43 +OP_FPTRUNC = 0x44 +OP_INTTOPTR = 0x45 +OP_FPTOSI = 0x46 +OP_SITOFP = 0x47 + +# ── SSA / Data Movement ───────────────────────────────────────────────────── +OP_PHI = 0x50 +OP_SELECT = 0x51 +OP_MOV = 0x52 +OP_CONST = 0x53 + +# ── ICmp condition codes (sub-opcode, placed in bits[15:8] via << 8) ───────── +# Reference: https://llvm.org/docs/LangRef.html#icmp-instruction +ICMP_EQ = 0 +ICMP_NE = 1 +ICMP_SLT = 2 +ICMP_SLE = 3 +ICMP_SGT = 4 +ICMP_SGE = 5 +ICMP_ULT = 6 +ICMP_ULE = 7 +ICMP_UGT = 8 +ICMP_UGE = 9 + +# ── FCmp condition codes ───────────────────────────────────────────────────── +# Reference: https://llvm.org/docs/LangRef.html#fcmp-instruction +FCMP_FALSE = 0 +FCMP_OEQ = 1 +FCMP_OGT = 2 +FCMP_OGE = 3 +FCMP_OLT = 4 +FCMP_OLE = 5 +FCMP_ONE = 6 +FCMP_ORD = 7 +FCMP_UNO = 8 +FCMP_UEQ = 9 +FCMP_UGT = 10 +FCMP_UGE = 11 +FCMP_ULT = 12 +FCMP_ULE = 13 +FCMP_UNE = 14 +FCMP_TRUE = 15 + +# ── Register type tags ─────────────────────────────────────────────────────── +REG_TYPE_BOOL = 0 +REG_TYPE_I32 = 1 +REG_TYPE_I64 = 2 +REG_TYPE_F32 = 3 +REG_TYPE_F64 = 4 +REG_TYPE_PTR = 5 + +# ── Sentinel values ────────────────────────────────────────────────────────── +VOID_RETURN = 0xFFFFFFFF # Function does not have a return value. diff --git a/source/qdk_package/qdk/_adaptive_pass.py b/source/qdk_package/qdk/_adaptive_pass.py new file mode 100644 index 0000000000..a3cd815de6 --- /dev/null +++ b/source/qdk_package/qdk/_adaptive_pass.py @@ -0,0 +1,983 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""AdaptiveProfilePass: walks Adaptive Profile QIR and emits the intermediate +format consumed by Rust. + +Unlike ``AggregateGatesPass`` (which subclasses ``pyqir.QirModuleVisitor`` and +only dispatches CALL instructions), this pass iterates basic blocks and +instructions directly so it can handle *all* LLVM IR opcodes required by the +Adaptive Profile specification. +""" + +from __future__ import annotations +from dataclasses import dataclass, astuple +import pyqir +import struct +from typing import Any, Dict, List, Optional, Tuple, TypeAlias, cast +from ._adaptive_bytecode import * + +# --------------------------------------------------------------------------- +# Gate name → OpID mapping (must match shader_types.rs OpID enum) +# --------------------------------------------------------------------------- + +GATE_MAP: Dict[str, int] = { + "reset": 1, + "x": 2, + "y": 3, + "z": 4, + "h": 5, + "s": 6, + "s__adj": 7, + "t": 8, + "t__adj": 9, + "sx": 10, + "sx__adj": 11, + "rx": 12, + "ry": 13, + "rz": 14, + "cnot": 15, + "cx": 15, + "cz": 16, + "cy": 29, + "rxx": 17, + "ryy": 18, + "rzz": 19, + "ccx": 20, + "m": 21, + "mz": 21, + "mresetz": 22, + "swap": 24, +} + +# Gates that take a result ID as a second argument +MEASURE_GATES = {"m", "mz", "mresetz"} + +# Gates that reset a qubit (single qubit argument, no result) +RESET_GATES = {"reset"} + +# Rotation gates that take an angle parameter as first argument +ROTATION_GATES = {"rx", "ry", "rz", "rxx", "ryy", "rzz"} + +# --------------------------------------------------------------------------- +# ICmp / FCmp predicate mappings +# --------------------------------------------------------------------------- + +ICMP_MAP = { + pyqir.IntPredicate.EQ: ICMP_EQ, + pyqir.IntPredicate.NE: ICMP_NE, + pyqir.IntPredicate.SLT: ICMP_SLT, + pyqir.IntPredicate.SLE: ICMP_SLE, + pyqir.IntPredicate.SGT: ICMP_SGT, + pyqir.IntPredicate.SGE: ICMP_SGE, + pyqir.IntPredicate.ULT: ICMP_ULT, + pyqir.IntPredicate.ULE: ICMP_ULE, + pyqir.IntPredicate.UGT: ICMP_UGT, + pyqir.IntPredicate.UGE: ICMP_UGE, +} + +FCMP_MAP = { + pyqir.FloatPredicate.FALSE: FCMP_FALSE, + pyqir.FloatPredicate.OEQ: FCMP_OEQ, + pyqir.FloatPredicate.OGT: FCMP_OGT, + pyqir.FloatPredicate.OGE: FCMP_OGE, + pyqir.FloatPredicate.OLT: FCMP_OLT, + pyqir.FloatPredicate.OLE: FCMP_OLE, + pyqir.FloatPredicate.ONE: FCMP_ONE, + pyqir.FloatPredicate.ORD: FCMP_ORD, + pyqir.FloatPredicate.UNO: FCMP_UNO, + pyqir.FloatPredicate.UEQ: FCMP_UEQ, + pyqir.FloatPredicate.UGT: FCMP_UGT, + pyqir.FloatPredicate.UGE: FCMP_UGE, + pyqir.FloatPredicate.ULT: FCMP_ULT, + pyqir.FloatPredicate.ULE: FCMP_ULE, + pyqir.FloatPredicate.UNE: FCMP_UNE, + pyqir.FloatPredicate.TRUE: FCMP_TRUE, +} + + +@dataclass +class AdaptiveProgram: + num_qubits: int + num_results: int + num_registers: int + entry_block: int + blocks: List[Block] + instructions: List[Instruction] + quantum_ops: List[QuantumOp] + functions: List[Function] + phi_entries: List[PhiNodeEntry] + switch_cases: List[SwitchCase] + call_args: List[CallArg] + labels: List[Label] + register_types: List[RegisterType] + + def as_dict(self): + """ + Transforms the program to a dictionary, and each of + the helper dataclasses to a tuple. This format is intended + to be used in the FFI between Python and Rust. + """ + return { + "num_qubits": self.num_qubits, + "num_results": self.num_results, + "num_registers": self.num_registers, + "entry_block": self.entry_block, + "blocks": [astuple(x) for x in self.blocks], + "instructions": [astuple(x) for x in self.instructions], + "quantum_ops": [astuple(x) for x in self.quantum_ops], + "functions": [astuple(x) for x in self.functions], + "phi_entries": [astuple(x) for x in self.phi_entries], + "switch_cases": [astuple(x) for x in self.switch_cases], + "call_args": self.call_args, + "labels": self.labels, + "register_types": self.register_types, + } + + +@dataclass +class Block: + block_id: int + instr_offset: int + instr_count: int + + +@dataclass +class Instruction: + opcode: int + dst: int + src0: int + src1: int + aux0: int + aux1: int + aux2: int + aux3: int + + +@dataclass +class QuantumOp: + op_id: int + q1: int + q2: int + q3: int + angle: float + + +@dataclass +class Function: + func_entry_block: int + num_params: int + param_base: int + + +@dataclass +class PhiNodeEntry: + block_id: int + val_reg: int + + +@dataclass +class SwitchCase: + case_val: int + target_block: int + + +# OpID for correlated noise (must match shader_types.rs OpID::CorrelatedNoise) +CORRELATED_NOISE_OP_ID = 131 + +CallArg: TypeAlias = int +Label: TypeAlias = str +RegisterType: TypeAlias = int + + +@dataclass +class IntOperand: + val: int = 0 + + def __post_init__(self): + # Mask to u32 range so negative Python ints become their + # two's-complement u32 representation (e.g. -7 → 0xFFFFFFF9). + self.val = self.val & 0xFFFFFFFF + + +class FloatOperand: + def __init__(self, val: float = 0.0) -> None: + self.val: int = encode_float_as_bits(val) + + +@dataclass +class Reg: + val: int # index in the registers table + + +def is_immediate(arg) -> bool: + return isinstance(arg, (IntOperand, FloatOperand)) + + +def prepare_immediate_flags( + *, dst=None, src0=None, src1=None, aux0=None, aux1=None, aux2=None, aux3=None +): + flags = 0 + if is_immediate(dst): + flags |= FLAG_DST_IMM + if is_immediate(src0): + flags |= FLAG_SRC0_IMM + if is_immediate(src1): + flags |= FLAG_SRC1_IMM + if is_immediate(aux0): + flags |= FLAG_AUX0_IMM + if is_immediate(aux1): + flags |= FLAG_AUX1_IMM + if is_immediate(aux2): + flags |= FLAG_AUX2_IMM + if is_immediate(aux3): + flags |= FLAG_AUX3_IMM + return flags + + +def unwrap_operands( + dst, src0, src1, aux0, aux1, aux2, aux3 +) -> Tuple[int, int, int, int, int, int, int]: + if not isinstance(dst, int): + dst = dst.val + if not isinstance(src0, int): + src0 = src0.val + if not isinstance(src1, int): + src1 = src1.val + if not isinstance(aux0, int): + aux0 = aux0.val + if not isinstance(aux1, int): + aux1 = aux1.val + if not isinstance(aux2, int): + aux2 = aux2.val + if not isinstance(aux3, int): + aux3 = aux3.val + return (dst, src0, src1, aux0, aux1, aux2, aux3) + + +def encode_float_as_bits(val: float) -> int: + return struct.unpack(" AdaptiveProgram: + """Process module and return the AdaptiveProgram. + + :param mod: The QIR module to process. + :param noise: Optional NoiseConfig. When provided, noise intrinsic calls + are resolved to correlated noise ops using the intrinsics table. + :param noise_intrinsics: Optional dict mapping noise intrinsic callee names + to noise table IDs. Takes precedence over ``noise`` if both are given. + :return: The processed adaptive program. + :rtype: AdaptiveProgram + """ + if mod.get_flag("arrays"): + raise ValueError("QIR arrays are not currently supported.") + + if noise_intrinsics is not None: + self._noise_intrinsics = noise_intrinsics + elif noise is not None: + # Build {name: table_id} mapping from the NoiseConfig intrinsics + intrinsics = noise.intrinsics + self._noise_intrinsics = {} + for callee_name in mod.functions: + name = callee_name.name + if name in intrinsics: + self._noise_intrinsics[name] = intrinsics.get_intrinsic_id(name) + + errors = mod.verify() + if errors is not None: + raise ValueError(f"Module verification failed: {errors}") + + # Pass 1: Assign block IDs and function IDs for all defined functions + for func in mod.functions: + if len(func.basic_blocks) > 0: + self._assign_function(func) + + # Pass 2: Walk instructions and emit encoding + for func in mod.functions: + if len(func.basic_blocks) > 0: + self._walk_function(func) + + entry_func = next(filter(pyqir.is_entry_point, mod.functions)) + num_qubits = pyqir.required_num_qubits(entry_func) + num_results = pyqir.required_num_results(entry_func) + assert isinstance(num_qubits, int) + assert isinstance(num_results, int) + + return AdaptiveProgram( + num_qubits=num_qubits, + num_results=num_results, + num_registers=self._next_reg, + entry_block=self._block_to_id[entry_func.basic_blocks[0]], + blocks=self.blocks, + instructions=self.instructions, + quantum_ops=self.quantum_ops, + functions=self.functions, + phi_entries=self.phi_entries, + switch_cases=self.switch_cases, + call_args=self.call_args, + labels=self.labels, + register_types=self.register_types, + ) + + # ------------------------------------------------------------------ + # Register allocation + # ------------------------------------------------------------------ + + def _alloc_reg(self, value: Any, type_tag: int) -> Reg: + """Allocate a new register for `value` and record its type. + + If `value` was already pre-allocated (e.g. as a forward reference from + a phi node), return the existing register instead of allocating a new + one. + """ + if value is not None and value in self._value_to_reg: + return self._value_to_reg[value] + reg = Reg(self._next_reg) + self._next_reg += 1 + if value is not None: + self._value_to_reg[value] = reg + self.register_types.append(type_tag) + return reg + + # ------------------------------------------------------------------ + # Instruction emission + # ------------------------------------------------------------------ + + def _emit( + self, + opcode: int, + *, + dst: int | IntOperand | FloatOperand | Reg = 0, + src0: int | IntOperand | FloatOperand | Reg = 0, + src1: int | IntOperand | FloatOperand | Reg = 0, + aux0: int | IntOperand | FloatOperand | Reg = 0, + aux1: int | IntOperand | FloatOperand | Reg = 0, + aux2: int | IntOperand | FloatOperand | Reg = 0, + aux3: int | IntOperand | FloatOperand | Reg = 0, + ) -> None: + imm_flags = prepare_immediate_flags( + dst=dst, src0=src0, src1=src1, aux0=aux0, aux1=aux1, aux2=aux2, aux3=aux3 + ) + (dst, src0, src1, aux0, aux1, aux2, aux3) = unwrap_operands( + dst, src0, src1, aux0, aux1, aux2, aux3 + ) + ins = Instruction(opcode | imm_flags, dst, src0, src1, aux0, aux1, aux2, aux3) + self.instructions.append(ins) + + def _emit_quantum_op( + self, + op_id: int, + q1: int = 0, + q2: int = 0, + q3: int = 0, + angle: float = 0.0, + ) -> int: + idx = self._next_qop + self._next_qop += 1 + qop = QuantumOp(op_id, q1, q2, q3, angle) + self.quantum_ops.append(qop) + return idx + + # ------------------------------------------------------------------ + # Operand resolution + # ------------------------------------------------------------------ + + def _resolve_operand(self, value: pyqir.Value) -> IntOperand | FloatOperand | Reg: + """Resolve a pyqir Value to a register index. + + If `value` is an already-assigned SSA register, return its index. + If `value` is an integer constant, allocate a register and emit + ``OP_CONST`` to materialise it. + """ + if value in self._value_to_reg: + return self._value_to_reg[value] + + if isinstance(value, pyqir.IntConstant): + val = value.value + return IntOperand(val) + + if isinstance(value, pyqir.FloatConstant): + val = value.value + return FloatOperand(val) + + # Forward reference (e.g. phi incoming from a later block). + # Pre-allocate a register; the defining instruction will reuse it + # via _alloc_reg's dedup check. + if isinstance(value, pyqir.Instruction): + return self._alloc_reg(value, self._type_tag(value.type)) + + # Constant expressions (e.g. inttoptr (i64 N to ptr)). + if isinstance(value, pyqir.Constant): + # Try extracting as a qubit/result pointer constant. + pid = pyqir.ptr_id(value) + if pid is not None: + return IntOperand(pid) + # Null pointer + if value.is_null: + reg = self._alloc_reg(value, REG_TYPE_PTR) + self._emit(OP_CONST | FLAG_SRC0_IMM, dst=reg.val, src0=0) + return reg + + raise ValueError(f"Cannot resolve operand: {type(value).__name__}") + + def _type_tag(self, ty: Any) -> int: + """Map a pyqir Type to a register type tag.""" + if isinstance(ty, pyqir.IntType): + w = ty.width + if w == 1: + return REG_TYPE_BOOL + if w <= 32: + return REG_TYPE_I32 + return REG_TYPE_I64 + if isinstance(ty, pyqir.PointerType): + return REG_TYPE_PTR + if ty.is_double: + return REG_TYPE_F64 + # Remaining floating-point types (e.g. float/f32) + return REG_TYPE_F32 + + # ------------------------------------------------------------------ + # Binary / unary helpers + # ------------------------------------------------------------------ + + def _emit_binary(self, opcode: int, instr: Any) -> None: + """Emit a binary arithmetic/bitwise instruction.""" + dst = self._alloc_reg(instr, self._type_tag(instr.type)) + src0 = self._resolve_operand(instr.operands[0]) + src1 = self._resolve_operand(instr.operands[1]) + self._emit(opcode, dst=dst, src0=src0, src1=src1) + + def _emit_unary(self, opcode: int, instr: Any) -> None: + """Emit a unary conversion instruction.""" + dst = self._alloc_reg(instr, self._type_tag(instr.type)) + src0 = self._resolve_operand(instr.operands[0]) + self._emit(opcode, dst=dst, src0=src0) + + def _emit_sext(self, instr: Any) -> None: + """Emit OP_SEXT with source bit width in aux0.""" + dst = self._alloc_reg(instr, self._type_tag(instr.type)) + src0 = self._resolve_operand(instr.operands[0]) + src_type = instr.operands[0].type + src_bits = src_type.width if isinstance(src_type, pyqir.IntType) else 32 + self._emit(OP_SEXT, dst=dst, src0=src0, aux0=src_bits) + + # ------------------------------------------------------------------ + # Function assignment (Pass 1) + # ------------------------------------------------------------------ + + def _assign_function(self, func: pyqir.Function) -> None: + """Assign block IDs and function IDs for a function.""" + if not pyqir.is_entry_point(func) and func.name not in self._func_to_id: + func_id = len(self._func_to_id) + self._func_to_id[func.name] = func_id + for block in func.basic_blocks: + self._block_to_id[block] = self._next_block + self._next_block += 1 + + # ------------------------------------------------------------------ + # Function walking (Pass 2) + # ------------------------------------------------------------------ + + def _walk_function(self, func: pyqir.Function) -> None: + """Walk all blocks and instructions in a function, emitting bytecode.""" + self._current_func_is_entry = pyqir.is_entry_point(func) + + # For non-entry functions, register parameters as registers + if not self._current_func_is_entry: + param_base = self._next_reg + for param in func.params: + self._alloc_reg( + param, REG_TYPE_PTR + ) # params are pointers (%Qubit*, %Result*) + # Record function entry in the function table + if func.name in self._func_to_id: + func_entry_block = self._block_to_id[func.basic_blocks[0]] + f = Function(func_entry_block, len(func.params), param_base) + self.functions.append(f) + + for block in func.basic_blocks: + block_id = self._block_to_id[block] + instr_offset = len(self.instructions) + for instr in block.instructions: + self._on_instruction(instr) + # NOTE: block.terminator is already included in block.instructions + # in pyqir, so we do NOT separately process it. + instr_count = len(self.instructions) - instr_offset + blk = Block(block_id, instr_offset, instr_count) + self.blocks.append(blk) + + # ------------------------------------------------------------------ + # Instruction dispatch + # ------------------------------------------------------------------ + + def _on_instruction(self, instr: pyqir.Instruction) -> None: + """Dispatch a single instruction by opcode.""" + match instr.opcode: + case pyqir.Opcode.CALL: + self._emit_call(cast(pyqir.Call, instr)) + case pyqir.Opcode.PHI: + self._emit_phi(cast(pyqir.Phi, instr)) + case pyqir.Opcode.ICMP: + self._emit_icmp(cast(pyqir.ICmp, instr)) + case pyqir.Opcode.FCMP: + self._emit_fcmp(cast(pyqir.FCmp, instr)) + case pyqir.Opcode.SWITCH: + self._emit_switch(cast(pyqir.Switch, instr)) + case pyqir.Opcode.BR: + self._emit_branch(instr) + case pyqir.Opcode.RET: + self._emit_ret(instr) + case pyqir.Opcode.SELECT: + self._emit_select(instr) + case pyqir.Opcode.ADD: + self._emit_binary(OP_ADD, instr) + case pyqir.Opcode.SUB: + self._emit_binary(OP_SUB, instr) + case pyqir.Opcode.MUL: + self._emit_binary(OP_MUL, instr) + case pyqir.Opcode.UDIV: + self._emit_binary(OP_UDIV, instr) + case pyqir.Opcode.SDIV: + self._emit_binary(OP_SDIV, instr) + case pyqir.Opcode.UREM: + self._emit_binary(OP_UREM, instr) + case pyqir.Opcode.SREM: + self._emit_binary(OP_SREM, instr) + case pyqir.Opcode.AND: + self._emit_binary(OP_AND, instr) + case pyqir.Opcode.OR: + self._emit_binary(OP_OR, instr) + case pyqir.Opcode.XOR: + self._emit_binary(OP_XOR, instr) + case pyqir.Opcode.SHL: + self._emit_binary(OP_SHL, instr) + case pyqir.Opcode.LSHR: + self._emit_binary(OP_LSHR, instr) + case pyqir.Opcode.ASHR: + self._emit_binary(OP_ASHR, instr) + case pyqir.Opcode.ZEXT: + self._emit_unary(OP_ZEXT, instr) + case pyqir.Opcode.SEXT: + self._emit_sext(instr) + case pyqir.Opcode.TRUNC: + self._emit_unary(OP_TRUNC, instr) + case pyqir.Opcode.FADD: + self._emit_binary(OP_FADD | FLAG_FLOAT, instr) + case pyqir.Opcode.FSUB: + self._emit_binary(OP_FSUB | FLAG_FLOAT, instr) + case pyqir.Opcode.FMUL: + self._emit_binary(OP_FMUL | FLAG_FLOAT, instr) + case pyqir.Opcode.FDIV: + self._emit_binary(OP_FDIV | FLAG_FLOAT, instr) + case pyqir.Opcode.FP_EXT: + self._emit_unary(OP_FPEXT | FLAG_FLOAT, instr) + case pyqir.Opcode.FP_TRUNC: + self._emit_unary(OP_FPTRUNC | FLAG_FLOAT, instr) + case pyqir.Opcode.FP_TO_SI: + self._emit_unary(OP_FPTOSI, instr) + case pyqir.Opcode.SI_TO_FP: + self._emit_unary(OP_SITOFP | FLAG_FLOAT, instr) + case pyqir.Opcode.INT_TO_PTR: + self._emit_inttoptr(instr) + case _: + raise ValueError(f"Unsupported instruction: {instr.opcode}") + + # ------------------------------------------------------------------ + # Call dispatch + # ------------------------------------------------------------------ + + def _emit_call(self, call: pyqir.Call) -> None: + """Dispatch a CALL instruction based on callee name.""" + callee = call.callee.name + + match callee: + case "__quantum__qis__read_result__body" | "__quantum__rt__read_result": + dst = self._alloc_reg(call, REG_TYPE_BOOL) + result_reg = self._resolve_result_operand(call.args[0]) + self._emit(OP_READ_RESULT, dst=dst, src0=result_reg) + case _ if callee.startswith("__quantum__qis__"): + self._emit_quantum_call(call) + case "__quantum__rt__result_record_output": + result_reg = self._resolve_result_operand(call.args[0]) + label_str = self._extract_label(call.args[1]) + label_idx = len(self.labels) + self.labels.append(label_str) + self._emit(OP_RECORD_OUTPUT, src0=result_reg, aux0=label_idx) + case "__quantum__rt__array_record_output": + # Record structure output — pass through as-is for output formatting + count = ( + call.args[0].value + if isinstance(call.args[0], pyqir.IntConstant) + else 0 + ) + label_str = self._extract_label(call.args[1]) + label_idx = len(self.labels) + self.labels.append(label_str) + self._emit( + OP_RECORD_OUTPUT, src0=count, aux0=label_idx, aux1=1 + ) # aux1=1 -> array + case "__quantum__rt__tuple_record_output": + count = ( + call.args[0].value + if isinstance(call.args[0], pyqir.IntConstant) + else 0 + ) + label_str = self._extract_label(call.args[1]) + label_idx = len(self.labels) + self.labels.append(label_str) + self._emit( + OP_RECORD_OUTPUT, src0=count, aux0=label_idx, aux1=2 + ) # aux1=2 -> tuple + case "__quantum__rt__bool_record_output": + # Bool record output - pass through + src = self._resolve_operand(call.args[0]) + label_str = self._extract_label(call.args[1]) + label_idx = len(self.labels) + self.labels.append(label_str) + self._emit( + OP_RECORD_OUTPUT, src0=src, aux0=label_idx, aux1=3 + ) # aux1=3 -> bool + case "__quantum__rt__int_record_output": + src = self._resolve_operand(call.args[0]) + label_str = self._extract_label(call.args[1]) + label_idx = len(self.labels) + self.labels.append(label_str) + self._emit( + OP_RECORD_OUTPUT, src0=src, aux0=label_idx, aux1=4 + ) # aux1=4 -> int + case ( + "__quantum__rt__initialize" + | "__quantum__rt__begin_parallel" + | "__quantum__rt__end_parallel" + | "__quantum__qis__barrier__body" + | "__quantum__rt__read_loss" + ): + pass # No-op + case _ if callee in self._func_to_id: + self._emit_ir_function_call(call) + case _ if "qdk_noise" in call.callee.attributes.func: + # Check if this is a noise intrinsic (custom gate with qdk_noise attribute) + self._emit_noise_intrinsic_call(call) + case _: + raise ValueError(f"Unsupported call: {callee}") + + # ------------------------------------------------------------------ + # Quantum call dispatch + # ------------------------------------------------------------------ + + def _resolve_qubit_operands( + self, args: List[pyqir.Value] + ) -> Tuple[IntOperand | Reg, IntOperand | Reg, IntOperand | Reg]: + qs: List[IntOperand | Reg] = [IntOperand(), IntOperand(), IntOperand()] + for i, arg in enumerate(args): + qs[i] = self._resolve_qubit_operand(arg) + return (qs[0], qs[1], qs[2]) + + def _resolve_qubit_operand(self, arg: pyqir.Value) -> IntOperand | Reg: + a = self._resolve_operand(arg) + assert isinstance(a, (IntOperand, Reg)) + return a + + def _resolve_result_operand(self, arg: pyqir.Value) -> IntOperand | Reg: + a = self._resolve_operand(arg) + assert isinstance(a, (IntOperand, Reg)) + return a + + def _resolve_angle_operand(self, arg: pyqir.Value) -> FloatOperand | Reg: + a = self._resolve_operand(arg) + assert isinstance(a, (FloatOperand, Reg)) + return a + + def _emit_quantum_call(self, call: pyqir.Call) -> None: + """Emit a quantum gate, measure, or reset from a ``__quantum__qis__*`` call.""" + callee_name = call.callee.name + gate_name = callee_name.replace("__quantum__qis__", "").replace("__body", "") + op_id = GATE_MAP[gate_name] + if gate_name in MEASURE_GATES: + q = self._resolve_qubit_operand(call.args[0]) + r = self._resolve_result_operand(call.args[1]) + qop_idx = self._emit_quantum_op(op_id, q.val, r.val) + self._emit( + OP_MEASURE, + aux0=qop_idx, + aux1=q, + aux2=r, + ) + return + if gate_name in RESET_GATES: + q = self._resolve_qubit_operand(call.args[0]) + qop_idx = self._emit_quantum_op(op_id, q.val) + self._emit( + OP_RESET, + aux0=qop_idx, + aux1=q, + ) + return + if gate_name in ROTATION_GATES: + qubit_arg_offset = 1 + angle = self._resolve_angle_operand(call.args[0]) + else: + qubit_arg_offset = 0 + angle = FloatOperand() + qubit_arg_offset = 1 if gate_name in ROTATION_GATES else 0 + q1, q2, q3 = self._resolve_qubit_operands(call.args[qubit_arg_offset:]) + qop_idx = self._emit_quantum_op(op_id, q1.val, q2.val, q3.val, angle.val) + self._emit( + OP_QUANTUM_GATE, + aux0=qop_idx, + aux1=q1, + aux2=q2, + aux3=q3, + ) + + def _emit_noise_intrinsic_call(self, call: pyqir.Call) -> None: + """Emit a noise intrinsic call. + + When a noise config is provided and the callee is a known intrinsic, + store qubit register indices in ``call_args`` (following the same + pattern as ``_emit_ir_function_call``), then emit a single + ``OP_QUANTUM_GATE`` whose ``aux1`` = qubit count and ``aux2`` = + offset into ``call_args``. The shader reads qubit IDs from + ``call_arg_table`` at runtime, supporting arbitrarily many qubits. + + When no noise config is provided, emit an identity gate (no-op). + """ + callee_name = call.callee.name + if self._noise_intrinsics is not None and callee_name in self._noise_intrinsics: + table_id = self._noise_intrinsics[callee_name] + qubit_count = len(call.args) + # Store qubit register indices in call_args, materializing + # immediates into registers (same pattern as _emit_ir_function_call). + arg_offset = len(self.call_args) + for arg in call.args: + operand = self._resolve_qubit_operand(arg) + if isinstance(operand, Reg): + self.call_args.append(operand.val) + else: + reg = self._alloc_reg(None, REG_TYPE_PTR) + self._emit(OP_MOV | FLAG_SRC0_IMM, dst=reg, src0=operand.val) + self.call_args.append(reg.val) + # QuantumOp stores table_id in q1 and qubit_count in q2. + qop_idx = self._emit_quantum_op( + CORRELATED_NOISE_OP_ID, table_id, qubit_count + ) + self._emit( + OP_QUANTUM_GATE, + aux0=qop_idx, + aux1=IntOperand(qubit_count), + aux2=IntOperand(arg_offset), + ) + elif self._noise_intrinsics is not None: + raise ValueError(f"Missing noise intrinsic: {callee_name}") + else: + # No noise config — no-op + pass + + # ------------------------------------------------------------------ + # Control flow emitters + # ------------------------------------------------------------------ + + def _emit_branch(self, instr: pyqir.Instruction) -> None: + """Emit jump or conditional branch.""" + operands = instr.operands + if len(operands) == 1: + # Unconditional: br label %target + target = self._block_to_id[operands[0]] + self._emit(OP_JUMP, dst=target) + else: + # Conditional: br i1 %cond, label %true, label %false + # pyqir operands: [condition, FALSE_block, TRUE_block] + cond_reg = self._resolve_operand(operands[0]) + false_block = self._block_to_id[operands[1]] + true_block = self._block_to_id[operands[2]] + self._emit(OP_BRANCH, src0=cond_reg, aux0=true_block, aux1=false_block) + + def _emit_phi(self, phi_instr: pyqir.Phi) -> None: + """Emit a PHI node with side table entries.""" + dst_reg = self._alloc_reg(phi_instr, self._type_tag(phi_instr.type)) + phi_offset = len(self.phi_entries) + for value, block in phi_instr.incoming: + operand = self._resolve_operand(value) + if isinstance(operand, Reg): + val_reg = operand.val + else: + # Immediate values must be materialized into a register + # because the GPU phi_table stores register indices. + reg = self._alloc_reg(None, self._type_tag(phi_instr.type)) + self._emit(OP_MOV | FLAG_SRC0_IMM, dst=reg, src0=operand.val) + val_reg = reg.val + block_id = self._block_to_id[block] + phi_entry = PhiNodeEntry(block_id, val_reg) + self.phi_entries.append(phi_entry) + count = len(phi_instr.incoming) + self._emit(OP_PHI, dst=dst_reg, aux0=phi_offset, aux1=count) + + def _emit_select(self, instr: pyqir.Instruction) -> None: + """Emit a SELECT instruction.""" + dst = self._alloc_reg(instr, self._type_tag(instr.type)) + cond = self._resolve_operand(instr.operands[0]) + true_val = self._resolve_operand(instr.operands[1]) + false_val = self._resolve_operand(instr.operands[2]) + self._emit(OP_SELECT, dst=dst, src0=cond, aux0=true_val, aux1=false_val) + + def _emit_switch(self, switch_instr: pyqir.Switch) -> None: + """Emit a SWITCH instruction with case table entries. + + NOTE: We use ``operands`` instead of the ``.cond`` / ``.cases`` + helpers because pyqir's ``Switch.cond`` returns a stale ``Function`` + reference when ``mod.functions`` has already been iterated (two-pass + compilation). ``operands`` is not affected by this behavior. + """ + # operands layout: [cond, default_block, case_val0, case_block0, ...] + ops = switch_instr.operands + cond_reg = self._resolve_operand(ops[0]) + default_block = self._block_to_id[ops[1]] + case_offset = len(self.switch_cases) + num_case_pairs = (len(ops) - 2) // 2 + for i in range(num_case_pairs): + case_val = ops[2 + 2 * i] + case_block = ops[2 + 2 * i + 1] + target_block = self._block_to_id[case_block] + switch_case = SwitchCase(case_val.value, target_block) + self.switch_cases.append(switch_case) + case_count = num_case_pairs + self._emit( + OP_SWITCH, + src0=cond_reg, + aux0=default_block, + aux1=case_offset, + aux2=case_count, + ) + + def _emit_ret(self, instr: Any) -> None: + """Emit RET or CALL_RETURN.""" + if not self._current_func_is_entry: + # Return from IR-defined function + if len(instr.operands) > 0: + ret_reg = self._resolve_operand(instr.operands[0]) + self._emit(OP_CALL_RETURN, src0=ret_reg) + else: + self._emit(OP_CALL_RETURN) + else: + # Return from entry point + if len(instr.operands) > 0: + ret_reg = self._resolve_operand(instr.operands[0]) + self._emit(OP_RET, dst=ret_reg) + else: + # Void return — use immediate 0 as exit code. + self._emit(OP_RET, dst=IntOperand(0)) + + # ------------------------------------------------------------------ + # Comparison emitters + # ------------------------------------------------------------------ + + def _emit_icmp(self, instr: Any) -> None: + """Emit an integer comparison.""" + cond_code = ICMP_MAP.get(instr.predicate, 0) + dst = self._alloc_reg(instr, REG_TYPE_BOOL) + src0 = self._resolve_operand(instr.operands[0]) + src1 = self._resolve_operand(instr.operands[1]) + self._emit(OP_ICMP | (cond_code << 8), dst=dst, src0=src0, src1=src1) + + def _emit_fcmp(self, instr: Any) -> None: + """Emit a float comparison.""" + cond_code = FCMP_MAP.get(instr.predicate, 0) + dst = self._alloc_reg(instr, REG_TYPE_BOOL) + src0 = self._resolve_operand(instr.operands[0]) + src1 = self._resolve_operand(instr.operands[1]) + self._emit( + OP_FCMP | (cond_code << 8) | FLAG_FLOAT, + dst=dst, + src0=src0, + src1=src1, + ) + + # ------------------------------------------------------------------ + # inttoptr handling + # ------------------------------------------------------------------ + + def _emit_inttoptr(self, instr: Any) -> None: + """Handle ``inttoptr`` — just propagate the source register. + + ``inttoptr i64 %v to %Qubit*`` is a no-op cast; the integer value + is the qubit/result ID. We use OP_MOV to alias the value. + """ + src_operand = instr.operands[0] + src_reg = self._resolve_operand(src_operand) + # Register the inttoptr result as pointing to the same register + dst = self._alloc_reg(instr, REG_TYPE_PTR) + self._emit(OP_MOV, dst=dst, src0=src_reg) + + # ------------------------------------------------------------------ + # IR-defined function call/return + # ------------------------------------------------------------------ + + def _emit_ir_function_call(self, call: Any) -> None: + """Emit OP_CALL for an IR-defined function.""" + func_name = call.callee.name + func_id = self._func_to_id[func_name] + arg_offset = len(self.call_args) + for arg in call.args: + operand = self._resolve_operand(arg) + if isinstance(operand, Reg): + self.call_args.append(operand.val) + else: + # Immediate values must be materialized into a register + # because the GPU call_arg_table stores register indices. + reg = self._alloc_reg(None, REG_TYPE_PTR) + self._emit(OP_MOV | FLAG_SRC0_IMM, dst=reg, src0=operand.val) + self.call_args.append(reg.val) + # Allocate return register if function has non-void return type + if call.type.is_void: + return_reg = VOID_RETURN # no return + else: + return_reg = self._alloc_reg(call, REG_TYPE_I32) + self._emit( + OP_CALL, + dst=return_reg, + aux0=func_id, + aux1=len(call.args), + aux2=arg_offset, + ) + + # ------------------------------------------------------------------ + # Helpers + # ------------------------------------------------------------------ + + def _extract_label(self, value: Any) -> str: + """Extract a label string from a call argument.""" + bs = pyqir.extract_byte_string(value) + if bs is not None: + return bs.decode("utf-8") + return "" diff --git a/source/qdk_package/qdk/_device/__init__.py b/source/qdk_package/qdk/_device/__init__.py new file mode 100644 index 0000000000..59041732f4 --- /dev/null +++ b/source/qdk_package/qdk/_device/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from ._device import Device + +__all__ = [ + "Device", +] diff --git a/source/qdk_package/qdk/_device/_atom/__init__.py b/source/qdk_package/qdk/_device/_atom/__init__.py new file mode 100644 index 0000000000..bea15a145c --- /dev/null +++ b/source/qdk_package/qdk/_device/_atom/__init__.py @@ -0,0 +1,299 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from .._device import Device, Zone, ZoneType +from ..._simulation import NoiseConfig, run_qir_clifford, run_qir_cpu, run_qir_gpu +from ..._native import try_create_gpu_adapter +from ..._qsharp import QirInputData +from ... import telemetry_events + +from typing import List, Literal, Optional +import time + + +class NeutralAtomDevice(Device): + """ + Representation of a neutral atom device quantum computer. + """ + + def __init__( + self, + column_count: int = 40, + register_zone_row_count: int = 25, + interaction_zone_row_count: int = 2, + measurement_zone_row_count: int = 2, + ): + default_layout = ( + column_count == 40 + and register_zone_row_count == 25 + and interaction_zone_row_count == 2 + and measurement_zone_row_count == 2 + ) + telemetry_events.on_neutral_atom_init(default_layout) + + super().__init__( + column_count, + [ + Zone("Register 1", register_zone_row_count, ZoneType.REG), + Zone("Interaction Zone", interaction_zone_row_count, ZoneType.INTER), + Zone("Measurement Zone", measurement_zone_row_count, ZoneType.MEAS), + ], + ) + + def _init_home_locs(self): + # Set up the home locations for qubits in the NeutralAtomDevice layout. + assert len(self.zones) == 3 + assert ( + self.zones[0].type == ZoneType.REG + and self.zones[1].type == ZoneType.INTER + and self.zones[2].type == ZoneType.MEAS + ) + rz1_rows = range(self.zones[0].row_count - 1, -1, -1) + self.home_locs = [] + for row in range(self.zones[0].row_count): + for col in range(self.column_count): + self.home_locs.append((rz1_rows[row], col)) + + def compile( + self, + program: str | QirInputData, + verbose: bool = False, + ) -> QirInputData: + """ + Compile a QIR program for the NeutralAtomDevice device. This includes decomposing gates to the native gate set, + optimizing sequences of single qubit gates, pruning unused functions, and reordering instructions to + enable better scheduling during execution. + + :param program: The QIR program to compile, either as a string or as QirInputData. + :param verbose: If true, print detailed information about each compilation step. + :returns QirInputData: The compiled QIR program. + """ + + from ._optimize import ( + OptimizeSingleQubitGates, + PruneUnusedFunctions, + ) + from ._decomp import ( + DecomposeMultiQubitToCZ, + DecomposeSingleRotationToRz, + DecomposeSingleQubitToRzSX, + ReplaceResetWithMResetZ, + ) + from ._reorder import Reorder + from pyqir import Module, Context + + start_time = time.monotonic() + all_start_time = start_time + telemetry_events.on_neutral_atom_compile() + + name = "" + if isinstance(program, QirInputData): + name = program._name + + if verbose: + print(f"Compiling program {name} for NeutralAtomDevice device...") + + module = Module.from_ir(Context(), str(program)) + if verbose: + end_time = time.monotonic() + print(f" Loaded module in {end_time - start_time:.2f} seconds") + start_time = end_time + + OptimizeSingleQubitGates().run(module) + if verbose: + end_time = time.monotonic() + print( + f" Optimized single qubit gates in {end_time - start_time:.2f} seconds" + ) + start_time = end_time + + DecomposeMultiQubitToCZ().run(module) + if verbose: + end_time = time.monotonic() + print( + f" Decomposed multi-qubit gates to CZ in {end_time - start_time:.2f} seconds" + ) + start_time = end_time + + OptimizeSingleQubitGates().run(module) + if verbose: + end_time = time.monotonic() + print( + f" Optimized single qubit gates in {end_time - start_time:.2f} seconds" + ) + start_time = end_time + + DecomposeSingleRotationToRz().run(module) + if verbose: + end_time = time.monotonic() + print( + f" Decomposed single rotations to Rz in {end_time - start_time:.2f} seconds" + ) + start_time = end_time + + OptimizeSingleQubitGates().run(module) + if verbose: + end_time = time.monotonic() + print( + f" Optimized single qubit gates in {end_time - start_time:.2f} seconds" + ) + start_time = end_time + + DecomposeSingleQubitToRzSX().run(module) + if verbose: + end_time = time.monotonic() + print( + f" Decomposed single qubit gates to Rz and SX in {end_time - start_time:.2f} seconds" + ) + start_time = end_time + + OptimizeSingleQubitGates().run(module) + if verbose: + end_time = time.monotonic() + print( + f" Optimized single qubit gates in {end_time - start_time:.2f} seconds" + ) + start_time = end_time + + ReplaceResetWithMResetZ().run(module) + if verbose: + end_time = time.monotonic() + print( + f" Replaced resets with mresetz in {end_time - start_time:.2f} seconds" + ) + start_time = end_time + + PruneUnusedFunctions().run(module) + if verbose: + end_time = time.monotonic() + print(f" Pruned unused functions in {end_time - start_time:.2f} seconds") + start_time = end_time + + Reorder(self).run(module) + if verbose: + end_time = time.monotonic() + print(f" Reordered instructions in {end_time - start_time:.2f} seconds") + start_time = end_time + + end_time = time.monotonic() + telemetry_events.on_neutral_atom_compile_end((end_time - all_start_time) * 1000) + if verbose: + print( + f"Finished compiling program {name} in {end_time - all_start_time:.2f} seconds" + ) + + return QirInputData(name, str(module)) + + def show_trace(self, qir: str | QirInputData): + """ + Visualize the execution trace of a QIR program on the NeutralAtomDevice device using the Atoms widget. + This includes approximate layout and scheduling of the program to show the parallelism of gates and + movement of qubits during execution. + + :param qir: The QIR program to visualize, either as a string or as QirInputData. + """ + + try: + from qsharp_widgets import Atoms + except ImportError: + raise ImportError( + "The qsharp-widgets package is required for showing atom trace visualization. " + "Please install it via 'pip install \"qdk[jupyter]\"' or 'pip install qsharp-widgets'." + ) + from ._trace import Trace + from ._validate import ValidateNoConditionalBranches + from ._scheduler import Schedule + from pyqir import Module, Context + from IPython.display import display + + start_time = time.monotonic() + telemetry_events.on_neutral_atom_trace() + + # Compile and visualize the trace in one step. + compiled = self.compile(qir) + module = Module.from_ir(Context(), str(compiled)) + ValidateNoConditionalBranches().run(module) + Schedule(self).run(module) + tracer = Trace(self) + tracer.run(module) + display(Atoms(machine_layout=self.get_layout(), trace_data=tracer.trace)) + + end_time = time.monotonic() + telemetry_events.on_neutral_atom_trace_end((end_time - start_time) * 1000) + + def simulate( + self, + qir: str | QirInputData, + shots=1, + noise: NoiseConfig | None = None, + type: Optional[Literal["clifford", "cpu", "gpu"]] = None, + seed: Optional[int] = None, + ) -> List: + """ + Simulate a QIR program on the NeutralAtomDevice device. This includes approximate layout and scheduling of the program + to model the parallelism of gates and movement of qubits during execution. The simulation can optionally + include noise based on a provided noise configuration. + + :param qir: The QIR program to simulate, either as a string or as QirInputData. + :param shots: The number of shots to simulate. Defaults to 1. + :param noise: An optional NoiseConfig to include noise in the simulation. + :param type: The type of simulator to use: + Use `"clifford"` if your QIR only contains Clifford gates and measurements. + Use `"gpu"` if you have a GPU available in your system. + Use `"cpu"` as a fallback option if you don't have a GPU in your system. + If `None` (default), the GPU simulator will be tried first, falling back to + CPU if a suitable GPU device could not be located. + :param seed: An optional random seed for reproducibility. + :return: The results of each shot of the simulation as a list. + """ + + from ._validate import ValidateNoConditionalBranches + from ._scheduler import Schedule + from ._decomp import DecomposeRzAnglesToCliffordGates + from pyqir import Module, Context + + start_time = time.monotonic() + + using_noise = noise is not None + if noise is None: + noise = NoiseConfig() + + compiled = self.compile(qir) + module = Module.from_ir(Context(), str(compiled)) + ValidateNoConditionalBranches().run(module) + Schedule(self).run(module) + + if type is None: + try: + try_create_gpu_adapter() + type = "gpu" + except OSError: + telemetry_events.on_neutral_atom_cpu_fallback() + type = "cpu" + + telemetry_events.on_neutral_atom_simulate(shots, using_noise, type) + + match type: + case "clifford": + DecomposeRzAnglesToCliffordGates().run(module) + result = run_qir_clifford( + str(module), + shots, + noise, + seed, + ) + case "cpu": + result = run_qir_cpu(str(module), shots, noise, seed) + case "gpu": + result = run_qir_gpu(str(module), shots, noise, seed) + case _: + raise ValueError(f"Simulation type {type} is not supported") + + end_time = time.monotonic() + telemetry_events.on_neutral_atom_simulate_end( + (end_time - start_time) * 1000, shots, using_noise, type + ) + return result + + +__all__ = ["NeutralAtomDevice"] diff --git a/source/qdk_package/qdk/_device/_atom/_decomp.py b/source/qdk_package/qdk/_device/_atom/_decomp.py new file mode 100644 index 0000000000..f51d878119 --- /dev/null +++ b/source/qdk_package/qdk/_device/_atom/_decomp.py @@ -0,0 +1,510 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from pyqir import ( + FloatConstant, + const, + Function, + FunctionType, + PointerType, + Type, + result, + Context, + Linkage, + QirModuleVisitor, + required_num_results, +) +from math import pi +from ._utils import TOLERANCE + + +class DecomposeMultiQubitToCZ(QirModuleVisitor): + """ + Decomposes all multi-qubit gates to CZ gates and single qubit gates. + """ + + h_func: Function + s_func: Function + sadj_func: Function + t_func: Function + tadj_func: Function + rz_func: Function + cz_func: Function + + def _on_module(self, module): + void = Type.void(module.context) + qubit_ty = PointerType(Type.void(module.context)) + self.double_ty = Type.double(module.context) + # Find or create all the needed functions. + for func in module.functions: + match func.name: + case "__quantum__qis__h__body": + self.h_func = func + case "__quantum__qis__s__body": + self.s_func = func + case "__quantum__qis__s__adj": + self.sadj_func = func + case "__quantum__qis__t__body": + self.t_func = func + case "__quantum__qis__t__adj": + self.tadj_func = func + case "__quantum__qis__rz__body": + self.rz_func = func + case "__quantum__qis__cz__body": + self.cz_func = func + if not hasattr(self, "h_func"): + self.h_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__h__body", + module, + ) + if not hasattr(self, "s_func"): + self.s_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__s__body", + module, + ) + if not hasattr(self, "sadj_func"): + self.sadj_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__s__adj", + module, + ) + if not hasattr(self, "t_func"): + self.t_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__t__body", + module, + ) + if not hasattr(self, "tadj_func"): + self.tadj_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__t__adj", + module, + ) + if not hasattr(self, "rz_func"): + self.rz_func = Function( + FunctionType(void, [self.double_ty, qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__rz__body", + module, + ) + if not hasattr(self, "cz_func"): + self.cz_func = Function( + FunctionType(void, [qubit_ty, qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__cz__body", + module, + ) + super()._on_module(module) + + def _on_qis_ccx(self, call, ctrl1, ctrl2, target): + self.builder.insert_before(call) + self.builder.call(self.h_func, [target]) + self.builder.call(self.tadj_func, [ctrl1]) + self.builder.call(self.tadj_func, [ctrl2]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.cz_func, [target, ctrl1]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.t_func, [ctrl1]) + self.builder.call(self.h_func, [target]) + self.builder.call(self.cz_func, [ctrl2, target]) + self.builder.call(self.h_func, [target]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.cz_func, [ctrl2, ctrl1]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.t_func, [target]) + self.builder.call(self.tadj_func, [ctrl1]) + self.builder.call(self.h_func, [target]) + self.builder.call(self.cz_func, [ctrl2, target]) + self.builder.call(self.h_func, [target]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.cz_func, [target, ctrl1]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.tadj_func, [target]) + self.builder.call(self.t_func, [ctrl1]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.cz_func, [ctrl2, ctrl1]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.h_func, [target]) + call.erase() + + def _on_qis_cx(self, call, ctrl, target): + self.builder.insert_before(call) + self.builder.call(self.h_func, [target]) + self.builder.call(self.cz_func, [ctrl, target]) + self.builder.call(self.h_func, [target]) + call.erase() + + def _on_qis_cy(self, call, ctrl, target): + self.builder.insert_before(call) + self.builder.call(self.sadj_func, [target]) + self.builder.call(self.h_func, [target]) + self.builder.call(self.cz_func, [ctrl, target]) + self.builder.call(self.h_func, [target]) + self.builder.call(self.s_func, [target]) + call.erase() + + def _on_qis_rxx(self, call, angle, target1, target2): + self.builder.insert_before(call) + self.builder.call(self.h_func, [target2]) + self.builder.call(self.cz_func, [target2, target1]) + self.builder.call(self.h_func, [target1]) + self.builder.call(self.rz_func, [angle, target1]) + self.builder.call(self.h_func, [target1]) + self.builder.call(self.cz_func, [target2, target1]) + self.builder.call(self.h_func, [target2]) + call.erase() + + def _on_qis_ryy(self, call, angle, target1, target2): + self.builder.insert_before(call) + self.builder.call(self.sadj_func, [target1]) + self.builder.call(self.sadj_func, [target2]) + self.builder.call(self.h_func, [target2]) + self.builder.call(self.cz_func, [target2, target1]) + self.builder.call(self.h_func, [target1]) + self.builder.call(self.rz_func, [angle, target1]) + self.builder.call(self.h_func, [target1]) + self.builder.call(self.cz_func, [target2, target1]) + self.builder.call(self.h_func, [target2]) + self.builder.call(self.s_func, [target2]) + self.builder.call(self.s_func, [target1]) + call.erase() + + def _on_qis_rzz(self, call, angle, target1, target2): + self.builder.insert_before(call) + self.builder.call(self.h_func, [target1]) + self.builder.call(self.cz_func, [target2, target1]) + self.builder.call(self.h_func, [target1]) + self.builder.call(self.rz_func, [angle, target1]) + self.builder.call(self.h_func, [target1]) + self.builder.call(self.cz_func, [target2, target1]) + self.builder.call(self.h_func, [target1]) + call.erase() + + def _on_qis_swap(self, call, target1, target2): + self.builder.insert_before(call) + self.builder.call(self.h_func, [target2]) + self.builder.call(self.cz_func, [target1, target2]) + self.builder.call(self.h_func, [target2]) + self.builder.call(self.h_func, [target1]) + self.builder.call(self.cz_func, [target2, target1]) + self.builder.call(self.h_func, [target1]) + self.builder.call(self.h_func, [target2]) + self.builder.call(self.cz_func, [target1, target2]) + self.builder.call(self.h_func, [target2]) + call.erase() + + +class DecomposeSingleRotationToRz(QirModuleVisitor): + """ + Decomposes all single qubit rotations to Rz gates. + """ + + h_func: Function + s_func: Function + sadj_func: Function + rz_func: Function + + def _on_module(self, module): + void = Type.void(module.context) + qubit_ty = PointerType(Type.void(module.context)) + self.double_ty = Type.double(module.context) + # Find or create all the needed functions. + for func in module.functions: + match func.name: + case "__quantum__qis__h__body": + self.h_func = func + case "__quantum__qis__s__body": + self.s_func = func + case "__quantum__qis__s__adj": + self.sadj_func = func + case "__quantum__qis__rz__body": + self.rz_func = func + if not hasattr(self, "h_func"): + self.h_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__h__body", + module, + ) + if not hasattr(self, "s_func"): + self.s_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__s__body", + module, + ) + if not hasattr(self, "sadj_func"): + self.sadj_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__s__adj", + module, + ) + if not hasattr(self, "rz_func"): + self.rz_func = Function( + FunctionType(void, [self.double_ty, qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__rz__body", + module, + ) + super()._on_module(module) + + def _on_qis_rx(self, call, angle, target): + self.builder.insert_before(call) + self.builder.call(self.h_func, [target]) + self.builder.call( + self.rz_func, + [angle, target], + ) + self.builder.call(self.h_func, [target]) + call.erase() + + def _on_qis_ry(self, call, angle, target): + self.builder.insert_before(call) + self.builder.call(self.sadj_func, [target]) + self.builder.call(self.h_func, [target]) + self.builder.call( + self.rz_func, + [angle, target], + ) + self.builder.call(self.h_func, [target]) + self.builder.call(self.s_func, [target]) + call.erase() + + +class DecomposeSingleQubitToRzSX(QirModuleVisitor): + """ + Decomposes all single qubit gates to Rz and Sx gates. + """ + + sx_func: Function + rz_func: Function + + def _on_module(self, module): + void = Type.void(module.context) + qubit_ty = PointerType(Type.void(module.context)) + self.double_ty = Type.double(module.context) + # Find or create all the needed functions. + for func in module.functions: + match func.name: + case "__quantum__qis__sx__body": + self.sx_func = func + case "__quantum__qis__rz__body": + self.rz_func = func + if not hasattr(self, "sx_func"): + self.sx_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__sx__body", + module, + ) + if not hasattr(self, "rz_func"): + self.rz_func = Function( + FunctionType(void, [self.double_ty, qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__rz__body", + module, + ) + super()._on_module(module) + + def _on_qis_h(self, call, target): + self.builder.insert_before(call) + self.builder.call( + self.rz_func, + [const(self.double_ty, pi / 2), target], + ) + self.builder.call(self.sx_func, [target]) + self.builder.call( + self.rz_func, + [const(self.double_ty, pi / 2), target], + ) + call.erase() + + def _on_qis_s(self, call, target): + self.builder.insert_before(call) + self.builder.call( + self.rz_func, + [const(self.double_ty, pi / 2), target], + ) + call.erase() + + def _on_qis_s_adj(self, call, target): + self.builder.insert_before(call) + self.builder.call( + self.rz_func, + [const(self.double_ty, -pi / 2), target], + ) + call.erase() + + def _on_qis_t(self, call, target): + self.builder.insert_before(call) + self.builder.call( + self.rz_func, + [const(self.double_ty, pi / 4), target], + ) + call.erase() + + def _on_qis_t_adj(self, call, target): + self.builder.insert_before(call) + self.builder.call( + self.rz_func, + [const(self.double_ty, -pi / 4), target], + ) + call.erase() + + def _on_qis_x(self, call, target): + self.builder.insert_before(call) + self.builder.call(self.sx_func, [target]) + self.builder.call(self.sx_func, [target]) + call.erase() + + def _on_qis_y(self, call, target): + self.builder.insert_before(call) + self.builder.call(self.sx_func, [target]) + self.builder.call(self.sx_func, [target]) + self.builder.call( + self.rz_func, + [const(self.double_ty, pi), target], + ) + call.erase() + + def _on_qis_z(self, call, target): + self.builder.insert_before(call) + self.builder.call( + self.rz_func, + [const(self.double_ty, pi), target], + ) + call.erase() + + +class DecomposeRzAnglesToCliffordGates(QirModuleVisitor): + """ + Ensure that the module only contains Clifford gates instead of rotation angles. + """ + + THREE_PI_OVER_2 = 3 * pi / 2 + PI_OVER_2 = pi / 2 + TWO_PI = 2 * pi + + z_func: Function + s_func: Function + sadj_func: Function + + def _on_module(self, module): + void = Type.void(module.context) + qubit_ty = PointerType(Type.void(module.context)) + self.double_ty = Type.double(module.context) + # Find or create all the needed functions. + for func in module.functions: + match func.name: + case "__quantum__qis__s__body": + self.s_func = func + case "__quantum__qis__s__adj": + self.sadj_func = func + case "__quantum__qis__z__body": + self.z_func = func + + if not hasattr(self, "s_func"): + self.s_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__s__body", + module, + ) + if not hasattr(self, "sadj_func"): + self.sadj_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__s__adj", + module, + ) + if not hasattr(self, "z_func"): + self.z_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__z__body", + module, + ) + + super()._on_module(module) + + def _on_qis_rz(self, call, angle, target): + if not isinstance(angle, FloatConstant): + raise ValueError("Angle used in RZ must be a constant") + angle = angle.value + + self.builder.insert_before(call) + + if ( + abs(angle - self.THREE_PI_OVER_2) < TOLERANCE + or abs(angle + self.PI_OVER_2) < TOLERANCE + ): + self.builder.call(self.sadj_func, [target]) + elif abs(angle - pi) < TOLERANCE or abs(angle + pi) < TOLERANCE: + self.builder.call(self.z_func, [target]) + elif ( + abs(angle - self.PI_OVER_2) < TOLERANCE + or abs(angle + self.THREE_PI_OVER_2) < TOLERANCE + ): + self.builder.call(self.s_func, [target]) + elif ( + angle < TOLERANCE + or abs(angle - self.TWO_PI) < TOLERANCE + or abs(angle + self.TWO_PI) < TOLERANCE + ): + # I, drop it + pass + else: + raise ValueError( + f"Angle {angle} used in RZ is not a Clifford compatible rotation angle" + ) + + call.erase() + + +class ReplaceResetWithMResetZ(QirModuleVisitor): + """ + Replaces all reset operations with a call to mresetz using a new, ignored result identifier. + """ + + context: Context + mresetz_func: Function + next_result_id: int + + def _on_module(self, module): + self.context = module.context + void = Type.void(self.context) + qubit_ty = PointerType(Type.void(self.context)) + result_ty = PointerType(Type.void(self.context)) + # Find or create the intrinsic mresetz function + for func in module.functions: + match func.name: + case "__quantum__qis__mresetz__body": + self.mresetz_func = func + if not hasattr(self, "mresetz_func"): + self.mresetz_func = Function( + FunctionType(void, [qubit_ty, result_ty]), + Linkage.EXTERNAL, + "__quantum__qis__mresetz__body", + module, + ) + super()._on_module(module) + + def _on_function(self, function): + self.next_result_id = required_num_results(function) or 0 + super()._on_function(function) + + def _on_qis_reset(self, call, target): + self.builder.insert_before(call) + # Create a new result identifier to ignore the measurement result + result_id = result(self.context, self.next_result_id) + self.next_result_id += 1 + self.builder.call(self.mresetz_func, [target, result_id]) + call.erase() diff --git a/source/qdk_package/qdk/_device/_atom/_optimize.py b/source/qdk_package/qdk/_device/_atom/_optimize.py new file mode 100644 index 0000000000..090a2fa16b --- /dev/null +++ b/source/qdk_package/qdk/_device/_atom/_optimize.py @@ -0,0 +1,315 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from pyqir import ( + Type, + Function, + FunctionType, + FloatConstant, + Linkage, + PointerType, + const, + ptr_id, + is_entry_point, + QirModuleVisitor, +) +from math import pi + +from ._utils import TOLERANCE + + +class OptimizeSingleQubitGates(QirModuleVisitor): + """ + Optimizes single qubit gates by looking for sequences of a gate and its adjoint on a given qubit. + Will also try to replace certain patterns with simpler gates. + """ + + sx_func: Function + mresetz_func: Function + + def _on_module(self, module): + void = Type.void(module.context) + qubit_ty = PointerType(Type.void(module.context)) + result_ty = qubit_ty + self.double_ty = Type.double(module.context) + self.used_qubits = set() + # Find or create the intrinsic gate functions + for func in module.functions: + match func.name: + case "__quantum__qis__mresetz__body": + self.mresetz_func = func + case "__quantum__qis__sx__body": + self.sx_func = func + if not hasattr(self, "sx_fun"): + self.sx_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__sx__body", + module, + ) + if not hasattr(self, "mresetz_func"): + self.mresetz_func = Function( + FunctionType(void, [qubit_ty, result_ty]), + Linkage.EXTERNAL, + "__quantum__qis__mresetz__body", + module, + ) + super()._on_module(module) + + def _drop_ops(self, qubits): + # Since instructions are only removed when they are canceled out by their adjoint or folded with another + # instruction, we can just pop the entries for these qubits so they start fresh with the next gates. + for qubit in qubits: + q = ptr_id(qubit) + self.qubit_ops.pop(q, None) + self.last_meas.pop(q, None) + self.used_qubits.add(q) + + def _schedule_gate(self, instr, key, name, adj): + if key in self.qubit_ops: + # There are previous operations on this qubit, so check if the last one was the adjoint of this one. + if self.qubit_ops[key][-1][1] == adj: + (other_instr, _) = self.qubit_ops[key].pop() + # Erase the adjoint instruction and the current instruction since they cancel out. + other_instr.erase() + instr.erase() + elif ( + len(self.qubit_ops[key]) > 1 + and name == "h" + and self.qubit_ops[key][-1][1] == "s" + and self.qubit_ops[key][-2][1] == "h" + ): + # We have a sequence of h s h, which can be replaced with a single sx. + self.builder.insert_before(instr) + self.builder.call(self.sx_func, [instr.args[0]]) + instr.erase() + (other_instr, _) = self.qubit_ops[key].pop() + other_instr.erase() + (other_instr, _) = self.qubit_ops[key].pop() + other_instr.erase() + else: + # The last operation was not the adjoint of this one, so add this instruction to the list. + self.qubit_ops[key].append((instr, name)) + self.used_qubits.add(key) + self.last_meas.pop(key, None) + + if len(self.qubit_ops[key]) == 0: + # There are no more operations on this qubit, so pop it's entry to avoid having empty lists in the dict. + self.qubit_ops.pop(key) + + else: + # No previous operations on this qubit, so create a new list from this instruction. + self.qubit_ops[key] = [(instr, name)] + self.used_qubits.add(key) + self.last_meas.pop(key, None) + + def _schedule_rotation(self, instr, key, name): + if isinstance(instr.args[0], FloatConstant): + # The angle is constant, so we can try to fold this rotation with other instances of the same rotation + # tht are constant. + if key in self.qubit_ops: + if self.qubit_ops[key][-1][1] == name and isinstance( + self.qubit_ops[key][-1][0].args[0], FloatConstant + ): + # The last operation on this qubit was also a rotation of the same type by a constant angle. + (other_instr, _) = self.qubit_ops[key].pop() + new_angle = instr.args[0].value + other_instr.args[0].value + sign = -1 if new_angle < 0 else 1 + abs_new_angle = abs(new_angle) + # Normalize the angle to be within 0 to 2*pi + while abs_new_angle > 2 * pi: + abs_new_angle -= 2 * pi + new_angle = sign * abs_new_angle + if ( + abs(new_angle) > TOLERANCE + and abs(abs(new_angle) - (2 * pi)) > TOLERANCE + ): + # Create a new rotation instruction with the sum of the angles, + # and insert it, but only if the angle is above our threshold. + self.builder.insert_before(instr) + new_instr = self.builder.call( + instr.callee, + [const(self.double_ty, new_angle), instr.args[1]], + ) + self.qubit_ops[key].append((new_instr, name)) + self.used_qubits.add(key) + self.last_meas.pop(key, None) + # Erase the old instructions the new rotation replaces. + other_instr.erase() + instr.erase() + else: + # Can't fold this rotation with the previous one, so just add it to the list. + self.qubit_ops[key].append((instr, name)) + self.used_qubits.add(key) + self.last_meas.pop(key, None) + + if len(self.qubit_ops[key]) == 0: + # There are no more operations on this qubit, so pop it's entry to avoid having empty lists in the dict. + self.qubit_ops.pop(key) + + else: + # No previous operations on this qubit, so create a new list from this instruction. + self.qubit_ops[key] = [(instr, name)] + self.used_qubits.add(key) + self.last_meas.pop(key, None) + else: + # This angle is not constant, so append it to the list of operations on this qubit. + if key in self.qubit_ops: + self.qubit_ops[key].append((instr, name)) + else: + self.qubit_ops[key] = [(instr, name)] + self.used_qubits.add(key) + self.last_meas.pop(key, None) + + def _on_function(self, function): + self.last_meas = {} + self.qubit_ops = {} + super()._on_function(function) + # At the end of a function, if there are any remaining entries in self.last_meas, it means + # that there were measurements on qubits that were never reset. Convert those into mresetz. + for key, (instr, target, result) in self.last_meas.items(): + self.builder.insert_before(instr) + self.builder.call( + self.mresetz_func, + [target, result], + ) + instr.erase() + for key in self.qubit_ops: + if self.qubit_ops[key][-1][1] == "reset": + # The last operation on this qubit was a reset, so we can drop it. + (instr, _) = self.qubit_ops[key].pop() + instr.erase() + + def _on_block(self, block): + # Each block is independent, so start from an empty list of operations per qubit. + self.qubit_ops = {} + self.last_meas = {} + super()._on_block(block) + + def _on_call_instr(self, call): + if call.callee.name == "__quantum__qis__sx__body": + self._drop_ops([call.args[0]]) + elif call.callee.name == "__quantum__qis__move__body": + self._drop_ops([call.args[0]]) + elif call.callee.name == "__quantum__qis__barrier__body": + # Don't optimize across barrier calls. Treat this as a drop of all tracked gates, + # which effectively flushes all scheduled operations. + self.qubit_ops = {} + self.last_meas = {} + else: + super()._on_call_instr(call) + + def _on_qis_h(self, call, target): + self._schedule_gate(call, ptr_id(target), "h", "h") + + def _on_qis_s(self, call, target): + self._schedule_gate(call, ptr_id(target), "s", "s_adj") + + def _on_qis_s_adj(self, call, target): + self._schedule_gate(call, ptr_id(target), "s_adj", "s") + + def _on_qis_t(self, call, target): + self._schedule_gate(call, ptr_id(target), "t", "t_adj") + + def _on_qis_t_adj(self, call, target): + self._schedule_gate(call, ptr_id(target), "t_adj", "t") + + def _on_qis_x(self, call, target): + self._schedule_gate(call, ptr_id(target), "x", "x") + + def _on_qis_y(self, call, target): + self._schedule_gate(call, ptr_id(target), "y", "y") + + def _on_qis_z(self, call, target): + self._schedule_gate(call, ptr_id(target), "z", "z") + + def _on_qis_rx(self, call, angle, target): + self._schedule_rotation(call, ptr_id(target), "rx") + + def _on_qis_rxx(self, call, angle, target1, target2): + self._drop_ops([target1, target2]) + + def _on_qis_ry(self, call, angle, target): + self._schedule_rotation(call, ptr_id(target), "ry") + + def _on_qis_ryy(self, call, angle, target1, target2): + self._drop_ops([target1, target2]) + + def _on_qis_rz(self, call, angle, target): + self._schedule_rotation(call, ptr_id(target), "rz") + + def _on_qis_rzz(self, call, angle, target1, target2): + self._drop_ops([target1, target2]) + + def _on_qis_ccx(self, call, ctrl1, ctrl2, target): + self._drop_ops([ctrl1, ctrl2, target]) + + def _on_qis_cx(self, call, target1, target2): + self._drop_ops([target1, target2]) + + def _on_qis_cy(self, call, target1, target2): + self._drop_ops([target1, target2]) + + def _on_qis_cz(self, call, target1, target2): + self._drop_ops([target1, target2]) + + def _on_qis_swap(self, call, target1, target2): + self._drop_ops([target1, target2]) + + def _on_qis_m(self, call, target, result): + self._drop_ops([target]) + self.last_meas[ptr_id(target)] = (call, target, result) + + def _on_qis_mz(self, call, target, result): + self._on_qis_m(call, target, result) + + def _on_qis_mresetz(self, call, target, result): + self._on_qis_m(call, target, result) + + def _on_qis_reset(self, call, target): + id = ptr_id(target) + if id in self.last_meas: + # Since the last operation on this qubit was a measurement, + # we can combine that measurement with the reset. + (instr, target, result) = self.last_meas.pop(id) + instr.erase() + self.builder.insert_before(call) + new_call = self.builder.call( + self.mresetz_func, + [target, result], + ) + call.erase() + self.last_meas[ptr_id(target)] = (new_call, target, result) + elif not id in self.used_qubits: + # This qubit was never used, so we can just erase the reset instruction. + call.erase() + elif id in self.qubit_ops and self.qubit_ops[id][-1][1] == "reset": + # The last operation on this qubit was also a reset, so we drop the current, + # extra one. + call.erase() + else: + self._drop_ops([target]) + self._schedule_gate(call, id, "reset", "") + + +class PruneUnusedFunctions(QirModuleVisitor): + def _on_module(self, module): + # Assume every non-entry point function is unused. + self.funcs_to_drop = [f for f in module.functions if not is_entry_point(f)] + super()._on_module(module) + # Delete all unused functions. + for func in self.funcs_to_drop: + func.delete() + + def _on_call_instr(self, call): + # Remove calls to initialization and barrier functions, since they aren't handled + # by most of the stack. + if call.callee.name == "__quantum__rt__initialize": + call.erase() + elif call.callee.name == "__quantum__qis__barrier__body": + call.erase() + elif call.callee in self.funcs_to_drop: + # This function is used in a call, so remove it from the list of + # functions to drop. + assert isinstance(call.callee, Function) + self.funcs_to_drop.remove(call.callee) diff --git a/source/qdk_package/qdk/_device/_atom/_reorder.py b/source/qdk_package/qdk/_device/_atom/_reorder.py new file mode 100644 index 0000000000..3efed6a4f0 --- /dev/null +++ b/source/qdk_package/qdk/_device/_atom/_reorder.py @@ -0,0 +1,114 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from ._utils import as_qis_gate, get_used_values, uses_any_value +from .._device import Device +from pyqir import ( + Call, + Instruction, + Function, + QirModuleVisitor, +) + + +def is_output_recording(instr: Instruction): + if isinstance(instr, Call): + return instr.callee.name.endswith("_record_output") + return False + + +def is_irreversible(instr: Instruction): + if isinstance(instr, Call) and isinstance(instr.callee, Function): + return "irreversible" in instr.callee.attributes.func + return False + + +class Reorder(QirModuleVisitor): + """ + Reorder instructions within a block to find contiguous sequences of the same gate on + different qubits. This enables both layout and scheduling at a later stage. + """ + + def __init__(self, device: Device): + super().__init__() + self.device = device + + def instr_key(self, instr: Instruction): + gate = as_qis_gate(instr) + if gate != {}: + qubits = sorted(map(self.device.get_ordering, gate["qubit_args"])) + return qubits[0] + return 0 + + def _on_block(self, block): + # The instructions are collected into an ordered list of steps, where each step + # contains instructions of the same type that do not depend on each other. + steps = [] + + # A list of all values or resultsused in the current step. This is used to determine if an instruction + # can be added to the current step or if it needs to go into a new step by checking dependencies. + values_used_in_step = [] + results_used_in_step = [] + + # Output recording instructions and terminator must be treated separately, as those + # must be at the end of the block. + output_recording = [] + terminator = block.terminator + if terminator: + terminator.remove() + + for instr in block.instructions: + # Remove the instruction from the block, which keeps it alive in the module + # and available for later insertion. + instr.remove() + if is_output_recording(instr): + # Gather output recording instructions to be placed at the end of the block just before + # the terminator. + output_recording.append(instr) + else: + # Find the last step that contains instructions that the current instruction + # depends on. We want to insert the current instruction on the earliest possible + # step without violating dependencies. + last_dependent_step_idx = len(steps) - 1 + (used_values, used_results) = get_used_values(instr) + while last_dependent_step_idx >= 0: + if uses_any_value( + used_values, values_used_in_step[last_dependent_step_idx] + ) or uses_any_value( + used_results, results_used_in_step[last_dependent_step_idx] + ): + break + last_dependent_step_idx -= 1 + + if isinstance(instr, Call): + while ( + last_dependent_step_idx < len(steps) - 1 + and isinstance(steps[last_dependent_step_idx + 1][0], Call) + and instr.callee != steps[last_dependent_step_idx + 1][0].callee + ): + last_dependent_step_idx += 1 + + if last_dependent_step_idx == len(steps) - 1: + # The current instruction depends on the last step, so add it to a new step at the end. + steps.append([instr]) + values_used_in_step.append(set(used_values)) + results_used_in_step.append(set(used_results)) + else: + # The last dependent step is before the end, so add the current instruction to the + # step after it. + steps[last_dependent_step_idx + 1].append(instr) + values_used_in_step[last_dependent_step_idx + 1].update(used_values) + results_used_in_step[last_dependent_step_idx + 1].update( + used_results + ) + + # Insert the instructions back into the block in the correct order. + self.builder.insert_at_end(block) + for step in steps: + for instr in sorted(step, key=self.instr_key): + self.builder.instr(instr) + # Add output recording instructions and terminator at the end of the block. + for instr in output_recording: + self.builder.instr(instr) + if terminator: + self.builder.instr(terminator) diff --git a/source/qdk_package/qdk/_device/_atom/_scheduler.py b/source/qdk_package/qdk/_device/_atom/_scheduler.py new file mode 100644 index 0000000000..0ca1285e51 --- /dev/null +++ b/source/qdk_package/qdk/_device/_atom/_scheduler.py @@ -0,0 +1,938 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from ._utils import as_qis_gate, get_used_values, uses_any_value +from pyqir import ( + Call, + Instruction, + Function, + QirModuleVisitor, + FunctionType, + PointerType, + Type, + Linkage, + ptr_id, + IntType, + Value, +) +from .._device import Device, Zone, ZoneType +from collections import defaultdict +from dataclasses import dataclass +from itertools import chain +from typing import Iterable, TypeAlias, Optional +from fractions import Fraction +from functools import lru_cache + +QubitId: TypeAlias = Value +Location: TypeAlias = tuple[int, int] +MoveGroupScaleFactor: TypeAlias = tuple[bool | Fraction, bool | Fraction] +MOVE_GROUPS_PER_PARALLEL_SECTION = 1 + + +@dataclass +class Move: + __slots__ = ("qubit_id_ptr", "src_loc", "dst_loc") + + qubit_id_ptr: Value + src_loc: Location + dst_loc: Location + + def __hash__(self): + return hash(self.qubit_id_ptr) + + def __str__(self): + return f"Move Qubit({self.qubit_id}): {self.src_loc} -> {self.dst_loc}" + + def __repr__(self): + return self.__str__() + + @property + def qubit_id(self) -> int: + q_id = ptr_id(self.qubit_id_ptr) + assert q_id is not None, "Qubit id should be known" + return q_id + + def parity(self): + return move_parity(self.src_loc, self.dst_loc) + + def direction(self): + return move_direction(self.src_loc, self.dst_loc) + + +@dataclass +class PartialMove: + """A move missing its destination location.""" + + __slots__ = ("qubit_id_ptr", "src_loc") + + qubit_id_ptr: Value + src_loc: Location + + @property + def qubit_id(self) -> int: + q_id = ptr_id(self.qubit_id_ptr) + assert q_id is not None, "Qubit id should be known" + return q_id + + def into_move(self, dst_loc: Location) -> Move: + return Move(self.qubit_id_ptr, self.src_loc, dst_loc) + + +PartialMovePair: TypeAlias = tuple[PartialMove, PartialMove] + + +def move_parity(source: Location, destination: Location) -> tuple[int, int]: + """Returns a tuple representing the parities of the source and destination columns of a move.""" + return (source[1] % 2, destination[1] % 2) + + +def move_direction(source: Location, destination: Location) -> tuple[int, int]: + """Returns a tuple representing if the move is up or down, and left or right.""" + return (int(source[0] < destination[0]), int(source[1] < destination[1])) + + +def index_from_parity_and_direction(ud: int, lr: int) -> int: + return 2 * ud + lr + + +def is_invalid_move_pair(move1: Move, move2: Move) -> bool: + """ + Returns true if the two moves are incompatible, i.e., if they have the same + source row then they must have the same destination row, and if they have the + same source column then they must have the same destination column. + """ + + source_row_diff = move1.src_loc[0] - move2.src_loc[0] + destination_row_diff = move1.dst_loc[0] - move2.dst_loc[0] + source_col_diff = move1.src_loc[1] - move2.src_loc[1] + destination_col_diff = move1.dst_loc[1] - move2.dst_loc[1] + + return ( + (source_row_diff == 0 and destination_row_diff != 0) + or (source_row_diff != 0 and destination_row_diff == 0) + or (source_col_diff == 0 and destination_col_diff != 0) + or (source_col_diff != 0 and destination_col_diff == 0) + ) + + +@lru_cache(maxsize=1 << 14) +def scale_factor_helper(source_diff, destination_diff): + if destination_diff == 0: + return True + if (s := Fraction(source_diff, destination_diff)) >= 0: + return s + + +def scale_factor(move1: Move, move2: Move) -> Optional[MoveGroupScaleFactor]: + """ + Returns a tuple of two elements, representing the row displacement ratio and column + displacement ratio between the moves. + """ + + if is_invalid_move_pair(move1, move2): + return None + + source_row_diff = move1.src_loc[0] - move2.src_loc[0] + destination_row_diff = move1.dst_loc[0] - move2.dst_loc[0] + source_col_diff = move1.src_loc[1] - move2.src_loc[1] + destination_col_diff = move1.dst_loc[1] - move2.dst_loc[1] + row_scale_factor = scale_factor_helper(source_row_diff, destination_row_diff) + col_scale_factor = scale_factor_helper(source_col_diff, destination_col_diff) + + if row_scale_factor is not None and col_scale_factor is not None: + return row_scale_factor, col_scale_factor + + +class MoveGroup: + """ + Represents a group of moves that can be done at the same time. + + ``moves`` is the set of moves that can be performed in parallel. + ``scale_factor`` is a tuple of fractions representing the scale factors in the + row and col axes between moves, or ``None`` if there is a single element in the set. + ``ref_move`` is a move used as a representative of the group to test compatibility + of other moves. + """ + + __slots__ = ("moves", "scale_factor", "ref_move") + + def __init__(self, moves: Iterable[Move]): + self.moves = set(moves) + self.scale_factor = scale_factor(*moves) if len(self.moves) > 1 else None + self.ref_move = next(iter(moves)) + + def __len__(self) -> int: + return len(self.moves) + + def add(self, move: Move): + """ + Adds a move to this move group. + + :param move: The move to add. + """ + + # A move group with a single move doesn't have an associated scale factor. + # Therefore, we cannot test if a move is compatible with it, which means + # we cannot add moves to it. + assert ( + self.scale_factor + ), "cannot add to move group candidate with a single move" + self.moves.add(move) + + def remove(self, move: Move): + self.moves.remove(move) + + def discard(self, move: Move): + self.moves.discard(move) + + +class MoveGroupPool: + """A data structure that takes individual moves as input and organizes them + into groups of moves that can be executed in parallel. + + ``moves`` contains all moves in the pool. ``move_group_candidates`` is a dict + organizing the move-group candidates by scale factor. ``parity`` is the parity + of source and destination columns of all moves in the pool. ``direction`` is the + up/down and left/right direction of all moves in the pool. + """ + + def __init__(self): + """Initializes a move-group pool for moves of the given ``parity`` and ``direction``. + + :param parity: The parity of source and destination columns of all the moves in this pool. + :param direction: The up/down and left/right direction of all the moves in this pool. + """ + self.moves: Optional[list[Move]] = [] + self.move_group_candidates: dict[MoveGroupScaleFactor, list[MoveGroup]] = ( + defaultdict(list) + ) + self.single_moves: set[Move] | list[Move] = set() + + def move_group_candidates_iter(self) -> Iterable[MoveGroup]: + return chain(*self.move_group_candidates.values()) + + def is_empty(self) -> bool: + """Returns `True` if there are no moves left, `False` otherwise.""" + return ( + not any(s.moves for s in self.move_group_candidates_iter()) + and not self.single_moves + ) + + def largest_move_group_candidate(self) -> Optional[MoveGroup]: + try: + return max(self.move_group_candidates_iter(), key=len) + except ValueError: + return None + + def add(self, move: Move): + """Adds a move to the move-group pool. + + :param move: The move to add. It must be of the same parity and direction as + the rest of the moves in this pool. + """ + assert self.moves is not None + + move_added = False + + # Add the move to all the groups it is compatible with + for group_scale_factor, groups in self.move_group_candidates.items(): + for group in groups: + if scale_factor(move, group.ref_move) == group_scale_factor: + group.add(move) + move_added = True + + # Build a table organizing the moves by scale factor with respect to `move`. + moves_by_scale: dict[MoveGroupScaleFactor, list[Move]] = defaultdict(list) + for move2 in self.moves: + s = scale_factor(move, move2) + if s is None: + continue + moves_by_scale[s].append(move2) + + # Try to create new candidates having the new move as the ref_move. + for s, moves in moves_by_scale.items(): + candidates_with_same_scale_factor = self.move_group_candidates[s] + for move2 in moves: + for group in candidates_with_same_scale_factor: + if move2 in group.moves: + # This pair already belongs to an existing move group candidate, + # so we don't need to create a new one. + break + else: + # Create a new move group candidate. + new_candidate = MoveGroup((move, move2)) + + # Add previous moves to the new candidate. + new_candidate.moves.update(moves_by_scale[s]) + + candidates_with_same_scale_factor.append(new_candidate) + move_added = True + + # This case triggers if `move` is not compatible with any move in `self.moves`. + if not move_added: + assert isinstance(self.single_moves, set) + self.single_moves.add(move) + + self.moves.append(move) + + def try_take(self, number_of_moves: int) -> list[Move]: + """Take up to ``number_of_moves`` from the largest move group candidate. + + :param number_of_moves: The number of moves to take from this pool. + """ + # Once we start taking moves from the MoveGroupPool, we don't need to add + # new moves. So we set `self.moves` to `None` as a safety measure. + if self.moves is not None: + self.moves = None + + if largest_move_group_candidate := self.largest_move_group_candidate(): + # Ensure moves are sorted by qubit ID to have a deterministic order. + moves = sorted( + largest_move_group_candidate.moves, key=lambda m: m.qubit_id + )[:number_of_moves] + moves_set = set(moves) + # Remove the taken moves from all candidates. + for group in self.move_group_candidates_iter(): + group.moves -= moves_set + assert isinstance(self.single_moves, set) + self.single_moves -= moves_set + return moves + else: + if isinstance(self.single_moves, set): + self.single_moves = sorted( + self.single_moves, key=lambda m: m.qubit_id, reverse=True + ) + if m := self.single_moves.pop(): + return [m] + else: + return [] + + def take_largest_candidate(self) -> list[Move]: + """Take all the moves from the largest move group candidate.""" + # Once we start taking moves from the MoveGroupPool, we don't need to add + # new moves. So we set `self.moves` to `None` as a safety measure. + if self.moves is not None: + self.moves = None + + if largest_move_group_candidate := self.largest_move_group_candidate(): + # Ensure moves are sorted by qubit ID to have a deterministic order. + moves = sorted(largest_move_group_candidate.moves, key=lambda m: m.qubit_id) + moves_set = largest_move_group_candidate.moves + # Remove the taken moves from all candidates. + for group in self.move_group_candidates_iter(): + if group is not largest_move_group_candidate: + group.moves -= moves_set + assert isinstance(self.single_moves, set) + self.single_moves -= moves_set + moves_set.clear() + return moves + else: + if isinstance(self.single_moves, set): + self.single_moves = sorted( + self.single_moves, key=lambda m: m.qubit_id, reverse=True + ) + if m := self.single_moves.pop(): + return [m] + else: + return [] + + +class MoveScheduler: + """ + Takes a device, a target zone, and a list of qubits to move to that + target zone and builds an iterator that returns groups of moves + that can be executed in parallel. + + ``device`` contains information about the device. ``zone`` is the target zone. + ``available_dst_locations`` holds the available destinations in the zone. + ``partial_moves`` are moves not yet assigned a destination. ``disjoint_pools`` + is a list containing one pool of move-groups for each parity and direction. + """ + + def __init__( + self, + device: Device, + zone: Zone, + qubits_to_move: list[QubitId | tuple[QubitId, QubitId]], + ): + """Initializes the move scheduler from a device, a target zone, + and a list of qubits to move to that target zone. + + :param device: An object containing information about the device. + :param zone: The zone the moves will be scheduled to. + :param qubits_to_move: A list of qubits to move. + """ + self.device = device + self.zone = zone + self.available_dst_locations = self.build_zone_locations(zone) + self.move_group_pool = MoveGroupPool() + + # Step through the partial moves and push them to the largest + # candidate they are compatible with. + partial_moves = self.qubits_to_partial_moves(qubits_to_move) + for partial_move in partial_moves: + if isinstance(partial_move, PartialMove): + self.add_to_largest_compatible_move_group(partial_move) + else: + self.add_pair_to_largest_compatible_move_group(partial_move) + + def build_zone_locations(self, zone: Zone) -> dict[Location, None]: + zone_row_offset = zone.offset // self.device.column_count + # We use a dict with None values instead of a set to preserve order. + return { + (row, col): None + for row in range( + zone_row_offset, + zone_row_offset + zone.row_count, + ) + for col in range(self.device.column_count) + } + + def qubits_to_partial_moves( + self, qubits_to_move: list[QubitId | tuple[QubitId, QubitId]] + ) -> list[PartialMove | PartialMovePair]: + partial_moves = [] + for elt in qubits_to_move: + if isinstance(elt, tuple): + q_id1 = ptr_id(elt[0]) + q_id2 = ptr_id(elt[1]) + assert q_id1 is not None + assert q_id2 is not None + mov1 = PartialMove(elt[0], self.device.get_home_loc(q_id1)) + mov2 = PartialMove(elt[1], self.device.get_home_loc(q_id2)) + partial_moves.append((mov1, mov2)) + else: + q_id = ptr_id(elt) + assert q_id is not None + mov = PartialMove(elt, self.device.get_home_loc(q_id)) + partial_moves.append(mov) + + def sort_key(partial_move: PartialMove | PartialMovePair): + if isinstance(partial_move, PartialMove): + return self.device.get_ordering(partial_move.qubit_id) + else: + return self.device.get_ordering(partial_move[0].qubit_id) + + return sorted(partial_moves, key=sort_key) + + def is_empty(self): + """ + Returns `True` if all moves were scheduled. + That is, there are no partial moves and all disjoint pools are empty. + """ + return self.move_group_pool.is_empty() + + def largest_move_group_pool(self) -> MoveGroupPool: + return self.move_group_pool + + def add_to_largest_compatible_move_group( + self, partial_move: PartialMove + ) -> MoveGroupPool: + zone_row_offset = self.zone.offset // self.device.column_count + + # Heuristic: Prefer moves that are straight up or down. + for row in range(zone_row_offset, zone_row_offset + self.zone.row_count): + dst_loc = (row, partial_move.src_loc[1]) + if dst_loc in self.available_dst_locations: + move = partial_move.into_move(dst_loc) + pool = self.move_group_pool + pool.add(move) + del self.available_dst_locations[move.dst_loc] + return pool + + if move := self.get_compatible_move(self.move_group_pool, partial_move): + self.move_group_pool.add(move) + del self.available_dst_locations[move.dst_loc] + return self.move_group_pool + + raise Exception("not enough IZ space to schedule all moves") + + def add_pair_to_largest_compatible_move_group( + self, partial_move_pair: PartialMovePair + ) -> MoveGroupPool: + zone_row_offset = self.zone.offset // self.device.column_count + partial_move = partial_move_pair[0] + + # Heuristic: Prefer moves that are straight up or down. + if partial_move.src_loc[1] % 2 == 0: + for row in range(zone_row_offset, zone_row_offset + self.zone.row_count): + dst_loc1 = (row, partial_move.src_loc[1]) + dst_loc2 = (row, partial_move.src_loc[1] + 1) + if ( + dst_loc1 in self.available_dst_locations + and dst_loc2 in self.available_dst_locations + ): + move1 = partial_move.into_move(dst_loc1) + move2 = partial_move_pair[1].into_move(dst_loc2) + pool1 = self.move_group_pool + pool2 = self.move_group_pool + pool1.add(move1) + pool2.add(move2) + del self.available_dst_locations[dst_loc1] + del self.available_dst_locations[dst_loc2] + return pool1 + + if move1 := self.get_compatible_move( + self.move_group_pool, partial_move, is_pair=True + ): + # Push the move corresponding to the first qubit of the CZ pair. + self.move_group_pool.add(move1) + + # Build the move corresponding to the second qubit of the CZ pair. + dest2 = (move1.dst_loc[0], move1.dst_loc[1] + 1) + move2 = partial_move_pair[1].into_move(dest2) + self.move_group_pool.add(move2) + del self.available_dst_locations[move1.dst_loc] + del self.available_dst_locations[move2.dst_loc] + return self.move_group_pool + raise Exception("not enough IZ space to schedule all moves") + + def get_destination( + self, + partial_move: PartialMove, + scale_factor: MoveGroupScaleFactor, + group: MoveGroup, + ) -> Optional[Location]: + """ + Returns an available destination location that would make `partial_move` + fit in the given group, or `None` if no such location exists. + """ + row_scale_factor, col_scale_factor = scale_factor + + if row_scale_factor is True: + dst_row = group.ref_move.dst_loc[0] + else: + # We compute the destination row by solving this equation for `dst_row`: + # src_row_diff / (group.ref_move.dst_loc[0] - dst_row) == row_scale_factor + src_row_diff = group.ref_move.src_loc[0] - partial_move.src_loc[0] + dst_row = group.ref_move.dst_loc[0] - src_row_diff / row_scale_factor + assert isinstance(dst_row, Fraction) + if dst_row.denominator == 1: + dst_row = dst_row.numerator + else: + return None + + if col_scale_factor is True: + dst_col = group.ref_move.dst_loc[1] + else: + # We compute the destination col by solving this equation for `dst_col`: + # src_col_diff / (group.ref_move.dst_loc[1] - dst_col) == col_scale_factor + src_col_diff = group.ref_move.src_loc[1] - partial_move.src_loc[1] + dst_col = group.ref_move.dst_loc[1] - src_col_diff / col_scale_factor + assert isinstance(dst_col, Fraction) + if dst_col.denominator == 1: + dst_col = dst_col.numerator + else: + return None + + loc = (dst_row, dst_col) + if loc in self.available_dst_locations: + return loc + + def get_compatible_move( + self, + pool: MoveGroupPool, + partial_move: PartialMove, + is_pair=False, + ) -> Optional[Move]: + # First, try finding a large enough group to place the partial move in. + if self.zone.type != ZoneType.MEAS: + GROUP_SIZE_THRESHOLD = self.device.column_count // 4 + best_destination: Optional[Location] = None + best_destination_group_len = 0 + for scale, groups in pool.move_group_candidates.items(): + for group in sorted(groups, key=len, reverse=True): + if ( + len(group) < GROUP_SIZE_THRESHOLD + or len(group) < best_destination_group_len + ): + break + if destination := self.get_destination(partial_move, scale, group): + if (not is_pair) or destination[1] % 2 == 0: + best_destination = destination + best_destination_group_len = len(group) + break + if best_destination: + return partial_move.into_move(best_destination) + + # If we didn't find a group to place the partial_move in, + # just pick the next available IZ location. + for destination in self.available_dst_locations: + if (not is_pair) or destination[1] % 2 == 0: + return partial_move.into_move(destination) + + def __iter__(self): + return self + + def __next__(self) -> list[Move]: + # If there are no moves left to schedule, stop the iteration. + if self.is_empty(): + raise StopIteration + + # Try_get from the largest candidate. + return self.largest_move_group_pool().take_largest_candidate() + + +class Schedule(QirModuleVisitor): + """ + Schedule instructions within a block, adding appropriate moves to the interaction zone to perform operations + """ + + begin_func: Function + end_func: Function + move_funcs: list[Function] + + def __init__(self, device: Device): + super().__init__() + self.device = device + self.num_qubits = len(self.device.home_locs) + self.pending_moves: list[list[Move]] = [] + + def _on_module(self, module): + i64_ty = IntType(module.context, 64) + # Find or create the necessary runtime functions. + for func in module.functions: + if func.name == "__quantum__rt__begin_parallel": + self.begin_func = func + elif func.name == "__quantum__rt__end_parallel": + self.end_func = func + if not hasattr(self, "begin_func"): + self.begin_func = Function( + FunctionType( + Type.void(module.context), + [], + ), + Linkage.EXTERNAL, + "__quantum__rt__begin_parallel", + module, + ) + if not hasattr(self, "end_func"): + self.end_func = Function( + FunctionType( + Type.void(module.context), + [], + ), + Linkage.EXTERNAL, + "__quantum__rt__end_parallel", + module, + ) + self.move_func = Function( + FunctionType( + Type.void(module.context), + [PointerType(Type.void(module.context)), i64_ty, i64_ty], + ), + Linkage.EXTERNAL, + "__quantum__qis__move__body", + module, + ) + + super()._on_module(module) + + def _on_block(self, block): + # Use only the first interaction and measurement zone; more could be supported in future. + interaction_zone = self.device.get_interaction_zones()[0] + measurement_zone = self.device.get_measurement_zones()[0] + max_iz_pairs = (self.device.column_count // 2) * interaction_zone.row_count + max_measurements = self.device.column_count * measurement_zone.row_count + + # Track pending/queued single qubit operations by qubit id. + self.single_qubit_ops = [[] for _ in range(self.num_qubits)] + + # Track pending CZ operations. + self.curr_cz_ops = [] + + # Track pending measurements. + self.measurements = [] + + # Track pending qubits to move to an interaction or measurement zone. + self.pending_qubits_to_move: list[QubitId | tuple[QubitId, QubitId]] = [] + + # Track values used in CZ ops and measurements to avoid putting operations on the + # same qubit in the same batch. + self.vals_used_in_cz_ops = set() + self.vals_used_in_measurements = set() + + instructions = [instr for instr in block.instructions] + for instr in instructions: + gate = as_qis_gate(instr) + if ( + gate != {} + and len(gate["qubit_args"]) == 1 + and len(gate["result_args"]) == 0 + ): + # This is a single qubit gate; queue it up for later execution when this qubit is needed for CZ or measurement. + + # If this qubit is involved in pending moves, that implies a CZ or measurement is pending, so flush now. + if any( + ( + gate["qubit_args"][0] == ptr_id(q) + if isinstance(q, QubitId) + else ( + gate["qubit_args"][0] == ptr_id(q[0]) + or gate["qubit_args"][0] == ptr_id(q[1]) + ) + ) + for q in self.pending_qubits_to_move + ): + self.flush_pending(instr) + + # Remove the instruction from the block and queue by the qubit id. + instr.remove() + self.single_qubit_ops[gate["qubit_args"][0]].append((instr, gate)) + + elif gate != {} and len(gate["qubit_args"]) == 2: + # This is a CZ gate; queue it up to be executed in the next available interaction zone row. + + # Pick next available interaction zone pair for these qubits. If none, flush the current set and start a fresh set. + # Create move instructions to move qubits to interaction zone and save them in pending moves for later insertion. + assert isinstance(instr, Call) + (vals_used, _) = get_used_values(instr) + if ( + self.measurements + or uses_any_value(vals_used, self.vals_used_in_cz_ops) + or len(self.curr_cz_ops) >= max_iz_pairs + ): + self.flush_pending(instr) + instr.remove() + self.curr_cz_ops.append(instr) + self.vals_used_in_cz_ops.update(vals_used) + + # Prefer using matching relative column ordering to home locations to reduce move crossings. + if ( + self.device.get_home_loc(gate["qubit_args"][0])[1] + > self.device.get_home_loc(gate["qubit_args"][1])[1] + ): + self.pending_qubits_to_move.append((instr.args[1], instr.args[0])) + else: + self.pending_qubits_to_move.append((instr.args[0], instr.args[1])) + + elif gate != {} and len(gate["result_args"]) == 1: + # This is a measurement; queue it up to be executed in the measurement zone. + + # Pick next available measurement zone location for this qubit. If none, flush the current set and start a fresh set. + # Create move instructions to move qubit to measurement zone and save them in pending moves for later insertion. + assert isinstance(instr, Call) + (vals_used, _) = get_used_values(instr) + if ( + not self.measurements + or len(self.measurements) >= max_measurements + or uses_any_value(vals_used, self.vals_used_in_measurements) + ): + self.flush_pending(instr) + if len(self.single_qubit_ops[gate["qubit_args"][0]]) > 0: + # There are still pending single qubits ops for the qubit we want to measure, + # so trigger another flush. + # We need to cache and restore the measurements and pending moves that have already + # been queued so that this flush affects the single qubit ops but not the measurements. + temp_meas = self.measurements + self.measurements = [] + temp_moves = self.pending_qubits_to_move + self.pending_qubits_to_move = [] + self.flush_pending(instr) + self.measurements = temp_meas + self.pending_qubits_to_move = temp_moves + + # Remove the measurement from the block and queue it. + instr.remove() + self.measurements.append((instr, gate)) + self.vals_used_in_measurements.update(vals_used) + self.pending_qubits_to_move.append(instr.args[0]) + else: + # This is not a gate or measurement; flush any pending operations and leave the instruction in place. + # This uses a while loop to ensure all pending operations are flushed before the instruction. + while self.any_pending_ops(): + self.flush_pending(instr) + + def any_pending_single_qubit_ops(self): + return any(ops for ops in self.single_qubit_ops) + + def any_pending_czs(self): + return bool(self.curr_cz_ops) + + def any_pending_measurements(self): + return bool(self.measurements) + + def any_pending_ops(self): + return ( + self.any_pending_czs() + or self.any_pending_single_qubit_ops() + or self.any_pending_measurements() + ) + + def flush_pending(self, insert_before: Instruction): + interaction_zone = self.device.get_interaction_zones()[0] + self.builder.insert_before(insert_before) + # If cz ops pending, insert accumulated moves, single qubits ops matching cz rows, then the cz ops, then move back. + if self.curr_cz_ops: + self.schedule_pending_moves(interaction_zone) + self.insert_moves() + qubits_by_row = self.target_qubits_by_row(interaction_zone) + for qubits_in_row in qubits_by_row: + self.flush_single_qubit_ops(qubits_in_row) + self.builder.call(self.begin_func, []) + for cz_op in self.curr_cz_ops: + self.builder.instr(cz_op) + self.builder.call(self.end_func, []) + self.curr_cz_ops = [] + self.insert_moves_back() + self.vals_used_in_cz_ops = set() + return + # If measurements pending, insert accumulated moves, then measurements, then move back. + elif len(self.measurements) > 0: + self.schedule_pending_moves(self.device.get_measurement_zones()[0]) + self.insert_moves() + self.builder.call(self.begin_func, []) + for meas_op, meas_gate in self.measurements: + self.builder.instr(meas_op) + self.builder.call(self.end_func, []) + self.measurements = [] + self.vals_used_in_measurements = set() + self.insert_moves_back() + return + # Else, create movements for remaining single qubit ops to the first interaction zone, + # insert those moves, then the ops, then move back. + else: + while self.any_pending_single_qubit_ops(): + target_qubits_by_row = [[] for _ in range(interaction_zone.row_count)] + curr_row = 0 + for q in range(self.num_qubits): + if len(self.single_qubit_ops[q]) > 0: + target_qubits_by_row[curr_row].append(q) + if ( + len(target_qubits_by_row[curr_row]) + >= self.device.column_count + ): + curr_row += 1 + if curr_row >= interaction_zone.row_count: + break + for target_qubits in target_qubits_by_row: + for q in target_qubits: + qubit = self.single_qubit_ops[q][0][0].args[0] + if self.single_qubit_ops[q][0][1]["gate"] == "rz": + qubit = self.single_qubit_ops[q][0][0].args[1] + self.pending_qubits_to_move.append(qubit) + self.schedule_pending_moves(interaction_zone) + self.insert_moves() + qubits_by_row = self.target_qubits_by_row(interaction_zone) + for qubits_in_row in qubits_by_row: + self.flush_single_qubit_ops(qubits_in_row) + self.insert_moves_back() + return + + def target_qubits_by_row(self, zone: Zone) -> list[list[int]]: + zone_row_offset = zone.offset // self.device.column_count + qubits_by_row: list[list[int]] = [[] for _ in range(zone.row_count)] + for group in self.pending_moves: + for move in group: + row_idx = move.dst_loc[0] - zone_row_offset + qubits_by_row[row_idx].append(move.qubit_id) + # Organize qubits in each row by qubit_id, so that parallel sections + # of single-qubit ops in the generated QIR are easier to read. + for row in qubits_by_row: + row.sort() + return qubits_by_row + + def schedule_pending_moves(self, zone: Zone): + move_scheduler = MoveScheduler(self.device, zone, self.pending_qubits_to_move) + for move_group in move_scheduler: + self.pending_moves.append(move_group) + # self.verify_that_all_moves_were_scheduled() + self.pending_qubits_to_move = [] + + def verify_that_all_moves_were_scheduled(self): + moves_to_schedule = sum( + len(x) if isinstance(x, tuple) else 1 for x in self.pending_qubits_to_move + ) + scheduled_moves = sum(len(group) for group in self.pending_moves) + assert ( + moves_to_schedule == scheduled_moves + ), f"{moves_to_schedule} != {scheduled_moves}" + + def insert_moves(self): + """ + For each pending move, insert a call to the move function that moves the + given qubit to the given (row, col) location. + """ + move_group_id = 0 + for move_group in self.pending_moves: + # We can execute `MOVE_GROUPS_PER_PARALLEL_SECTION`, if + # this is the first one, start a parallel section. + if move_group_id == 0: + self.builder.call(self.begin_func, []) + + # Insert all the moves in a group using the same move function. + for move in move_group: + self.builder.call(self.move_func, (move.qubit_id_ptr, *move.dst_loc)) + + # There `MOVE_GROUPS_PER_PARALLEL_SECTION` move groups, + # so we increment the id modulo `MOVE_GROUPS_PER_PARALLEL_SECTION`. + move_group_id = (move_group_id + 1) % MOVE_GROUPS_PER_PARALLEL_SECTION + + # We can execute `MOVE_GROUPS_PER_PARALLEL_SECTION`, if + # this is the last one, end the parallel section. + if move_group_id == 0: + self.builder.call(self.end_func, []) + + # End the parallel section if it hasn't been ended. + if move_group_id != 0: + self.builder.call(self.end_func, []) + + def insert_moves_back(self): + move_group_id = 0 + for move_group in self.pending_moves: + # We can execute `MOVE_GROUPS_PER_PARALLEL_SECTION`, if + # this is the first one, start a parallel section. + if move_group_id == 0: + self.builder.call(self.begin_func, []) + + # Insert all the moves in a group using the same move function. + for move in move_group: + self.builder.call(self.move_func, (move.qubit_id_ptr, *move.src_loc)) + + # There `MOVE_GROUPS_PER_PARALLEL_SECTION` move groups, + # so we increment the id modulo `MOVE_GROUPS_PER_PARALLEL_SECTION`. + move_group_id = (move_group_id + 1) % MOVE_GROUPS_PER_PARALLEL_SECTION + + # We can execute `MOVE_GROUPS_PER_PARALLEL_SECTION`, if + # this is the last one, end the parallel section. + if move_group_id == 0: + self.builder.call(self.end_func, []) + + # End the parallel section if it hasn't been ended. + if move_group_id != 0: + self.builder.call(self.end_func, []) + + # Clear pending moves. + self.pending_moves = [] + + def flush_single_qubit_ops(self, target_qubits): + # Flush all pending single qubit ops for the given target qubits, combining + # consecutive ops of the same type into a single parallel region by row in + # the interaction zone. + ops_to_flush = [] + for q in target_qubits: + ops_to_flush.append(list(reversed(self.single_qubit_ops[q]))) + self.single_qubit_ops[q] = [] + while any(len(q_ops) > 0 for q_ops in ops_to_flush): + rz_ops = [] + for q_ops in ops_to_flush: + if len(q_ops) == 0: + continue + if q_ops[-1][1]["gate"] == "rz": + rz_ops.append(q_ops.pop()[0]) + if len(rz_ops) > 0: + self.builder.call(self.begin_func, []) + for rz_op in rz_ops: + self.builder.instr(rz_op) + self.builder.call(self.end_func, []) + sx_ops = [] + for q_ops in ops_to_flush: + if len(q_ops) == 0: + continue + if q_ops[-1][1]["gate"] == "sx": + sx_ops.append(q_ops.pop()[0]) + if len(sx_ops) > 0: + self.builder.call(self.begin_func, []) + for sx_op in sx_ops: + self.builder.instr(sx_op) + self.builder.call(self.end_func, []) diff --git a/source/qdk_package/qdk/_device/_atom/_trace.py b/source/qdk_package/qdk/_device/_atom/_trace.py new file mode 100644 index 0000000000..3308b52548 --- /dev/null +++ b/source/qdk_package/qdk/_device/_atom/_trace.py @@ -0,0 +1,76 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from pyqir import QirModuleVisitor, ptr_id, required_num_qubits +from .._device import Device + + +class Trace(QirModuleVisitor): + + def __init__( + self, + device: Device, + ): + self.in_parallel = False + self.trace = { + "qubits": device.home_locs, + "steps": [], + } + self.q_cols = {} + super().__init__() + + def _next_step(self): + self.trace["steps"].append({"id": len(self.trace["steps"]), "ops": []}) + + def _on_function(self, function): + num_qubits = required_num_qubits(function) + if num_qubits: + self.trace["qubits"] = self.trace["qubits"][:num_qubits] + super()._on_function(function) + + def _on_call_instr(self, call): + if call.callee.name == "__quantum__rt__begin_parallel": + self._next_step() + self.in_parallel = True + elif call.callee.name == "__quantum__rt__end_parallel": + self.in_parallel = False + elif call.callee.name == "__quantum__qis__move__body": + self._on_qis_move(call, call.args[0], call.args[1], call.args[2]) + elif call.callee.name == "__quantum__qis__sx__body": + self._on_qis_sx(call, call.args[0]) + else: + super()._on_call_instr(call) + + def _on_qis_move(self, call, qubit, row, col): + if not self.in_parallel: + self._next_step() + q = ptr_id(qubit) + self.q_cols[q] = col.value + self.trace["steps"][-1]["ops"].append(f"move({row.value}, {col.value}) {q}") + + def _on_qis_sx(self, call, qubit): + if not self.in_parallel: + self._next_step() + q = ptr_id(qubit) + self.trace["steps"][-1]["ops"].append(f"sx {q}") + + def _on_qis_rz(self, call, angle, qubit): + if not self.in_parallel: + self._next_step() + q = ptr_id(qubit) + self.trace["steps"][-1]["ops"].append(f"rz({angle.value}) {q}") + + def _on_qis_cz(self, call, qubit1, qubit2): + if not self.in_parallel: + self._next_step() + q1 = ptr_id(qubit1) + q2 = ptr_id(qubit2) + if self.q_cols.get(q1, -1) > self.q_cols.get(q2, -1): + q1, q2 = q2, q1 + self.trace["steps"][-1]["ops"].append(f"cz {q1}, {q2}") + + def _on_qis_mresetz(self, call, target, result): + if not self.in_parallel: + self._next_step() + q = ptr_id(target) + self.trace["steps"][-1]["ops"].append(f"mz {q}") diff --git a/source/qdk_package/qdk/_device/_atom/_utils.py b/source/qdk_package/qdk/_device/_atom/_utils.py new file mode 100644 index 0000000000..17d1eb3248 --- /dev/null +++ b/source/qdk_package/qdk/_device/_atom/_utils.py @@ -0,0 +1,92 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from pyqir import ( + Instruction, + Call, + Constant, + PointerType, + Value, + ptr_id, +) +from typing import Dict + +TOLERANCE: float = 1.1920929e-7 # Machine epsilon for 32-bit IEEE FP numbers. + +# QIS gates that consume a measurement result; the value is the 0-based index +# of the result argument. All other pointer-typed arguments of a QIS call are +# qubit arguments. +_RESULT_ARG_INDEX: Dict[str, int] = { + "__quantum__qis__m__body": 1, + "__quantum__qis__mz__body": 1, + "__quantum__qis__mresetz__body": 1, + "__quantum__qis__read_result__body": 0, +} + + +# If this is a call to a __qis__ gate, return a dict describing the gate and its arguments. +def as_qis_gate(instr: Instruction) -> Dict: + if isinstance(instr, Call) and instr.callee.name.startswith("__quantum__qis__"): + parts = instr.callee.name.split("__") + result_idx = _RESULT_ARG_INDEX.get(instr.callee.name) + qubit_args = [] + result_args = [] + other_args = [] + for i, arg in enumerate(instr.args): + if isinstance(arg.type, PointerType): + pid = ptr_id(arg) + if pid is None: + other_args.append(arg) + elif result_idx is not None and i == result_idx: + result_args.append(pid) + else: + qubit_args.append(pid) + else: + other_args.append(arg) + return { + "gate": parts[3] + ("_adj" if parts[4] == "adj" else ""), + "qubit_args": qubit_args, + "result_args": result_args, + "other_args": other_args, + } + return {} + + +# Returns all values and, separately, all measurement results used by the instruction. +def get_used_values(instr: Instruction) -> tuple[list[Value], list[Value]]: + vals = [] + meas_results = [] + if isinstance(instr, Call): + vals = instr.args + if ( + instr.callee.name == "__quantum__qis__mresetz__body" + or instr.callee.name == "__quantum__qis__m__body" + or instr.callee.name == "__quantum__qis__mz__body" + ): + # Measurement uses a result as the second argument + meas_results += vals[1:] + vals = vals[:1] + elif ( + instr.callee.name == "__quantum__qis__read_result__body" + or instr.callee.name == "__quantum__rt__read_result" + or instr.callee.name == "__quantum__rt__read_atom_result" + ): + # Read result uses a result as the first argument + meas_results += vals + vals = [] + else: + vals = instr.operands + vals.append(instr) + return (vals, meas_results) + + +# Returns true if any of the used values are in the existing values. +# Useful for determining if an instruction depends on any instructions in a set. +def uses_any_value(used_values, existing_values) -> bool: + return any( + [ + val in existing_values + for val in used_values + if not isinstance(val, Constant) or isinstance(val.type, PointerType) + ] + ) diff --git a/source/qdk_package/qdk/_device/_atom/_validate.py b/source/qdk_package/qdk/_device/_atom/_validate.py new file mode 100644 index 0000000000..0ebab719f8 --- /dev/null +++ b/source/qdk_package/qdk/_device/_atom/_validate.py @@ -0,0 +1,45 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from pyqir import QirModuleVisitor, is_entry_point, Opcode + + +class ValidateAllowedIntrinsics(QirModuleVisitor): + """ + Ensure that the module only contains allowed intrinsics. + """ + + def _on_function(self, function): + name = function.name + if ( + not is_entry_point(function) + and not name.endswith("_record_output") + and name + not in [ + "__quantum__rt__begin_parallel", + "__quantum__rt__end_parallel", + "__quantum__qis__read_result__body", + "__quantum__rt__read_result", + "__quantum__qis__move__body", + "__quantum__qis__cz__body", + "__quantum__qis__sx__body", + "__quantum__qis__rz__body", + "__quantum__qis__mresetz__body", + ] + ): + raise ValueError(f"{name} is not a supported intrinsic") + + +class ValidateNoConditionalBranches(QirModuleVisitor): + """ + Ensure that the function(s) only use unconditional branches. + """ + + def _on_block(self, block): + if ( + block.terminator + and block.terminator.opcode == Opcode.BR + and len(block.terminator.operands) > 1 + ): + raise ValueError("programs with branching control flow are not supported") + super()._on_block(block) diff --git a/source/qdk_package/qdk/_device/_device.py b/source/qdk_package/qdk/_device/_device.py new file mode 100644 index 0000000000..991dc46b24 --- /dev/null +++ b/source/qdk_package/qdk/_device/_device.py @@ -0,0 +1,139 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from enum import Enum +from .._qsharp import QirInputData + + +class ZoneType(Enum): + """ + Enum representing different types of zones in the device layout. + """ + + REG = "register" + INTER = "interaction" + MEAS = "measurement" + + +class Zone: + """ + Represents a zone in the device layout. + """ + + offset: int = 0 + + def __init__(self, name: str, row_count: int, type: ZoneType): + self.name = name + self.row_count = row_count + self.type = type + + def set_offset(self, offset: int): + self.offset = offset + + +class Device: + """ + Represents a quantum device with specific layout expressed as zones. + """ + + def __init__(self, column_count: int, zones: list[Zone]): + self.column_count = column_count + self.zones = zones + offset = 0 + # Ensure the zones have correct offsets set based on their ordering when passed in. + for zone in self.zones: + zone.set_offset(offset) + offset += zone.row_count * self.column_count + + self.home_locs = [] + self._init_home_locs() + + def _init_home_locs(self): + """ + Initialize the home locations of qubits in the device layout. + """ + raise NotImplementedError("Subclasses must implement _init_home_locs") + + def get_home_loc(self, qubit_id: int) -> tuple[int, int]: + """ + Get the home location (row, column) of the qubit with the given id. + + :param qubit_id: The id of the qubit. + :return: The (row, column) location of the qubit. + :rtype: tuple[int, int] + """ + if qubit_id < 0 or qubit_id >= len(self.home_locs): + raise ValueError(f"Qubit id {qubit_id} is out of range") + return self.home_locs[qubit_id] + + def get_ordering(self, qubit_id: int) -> int: + """ + Get the ordering index of the qubit with the given id. + + :param qubit_id: The id of the qubit. + :return: The ordering index of the qubit. + :rtype: int + """ + if qubit_id < 0 or qubit_id >= len(self.home_locs): + raise ValueError(f"Qubit id {qubit_id} is out of range") + row, col = self.home_locs[qubit_id] + return row * self.column_count + col + + def get_register_zones(self) -> list[Zone]: + """ + Get the register zones in the device. + + :return: The register zones. + :rtype: list[Zone] + """ + return [zone for zone in self.zones if zone.type == ZoneType.REG] + + def get_interaction_zones(self) -> list[Zone]: + """ + Get the interaction zones in the device. + + :return: The interaction zones. + :rtype: list[Zone] + """ + return [zone for zone in self.zones if zone.type == ZoneType.INTER] + + def get_measurement_zones(self) -> list[Zone]: + """ + Get the measurement zones in the device. + + :return: The measurement zones. + :rtype: list[Zone] + """ + return [zone for zone in self.zones if zone.type == ZoneType.MEAS] + + def compile(self, program: str) -> QirInputData: + """ + Compile the given program for the device. + + :param program: The program to compile. + """ + raise NotImplementedError("Subclasses must implement compile") + + def as_dict(self) -> dict: + """ + Get the device layout as a dictionary. + + :return: The device layout as a dictionary. + :rtype: dict + """ + return { + "cols": self.column_count, + "zones": [ + {"title": zone.name, "rows": zone.row_count, "kind": zone.type.value} + for zone in self.zones + ], + } + + def get_layout(self) -> dict: + """ + Get the device layout as a dictionary. + + :return: The device layout as a dictionary. + :rtype: dict + """ + return self.as_dict() diff --git a/source/qdk_package/qdk/_fs.py b/source/qdk_package/qdk/_fs.py new file mode 100644 index 0000000000..c317007fd1 --- /dev/null +++ b/source/qdk_package/qdk/_fs.py @@ -0,0 +1,90 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +_fs.py + +This module provides file system utility functions for working with the file +system as Python sees it. These are used as callbacks passed into native code +to allow the native code to interact with the file system in an +environment-specific way. +""" + +import os +from typing import Dict, List, Tuple + + +def read_file(path: str) -> Tuple[str, str]: + """ + Read the contents of a file. + + :param path: The path to the file. + :return: A tuple containing the path and the file contents. + :rtype: Tuple[str, str] + """ + with open(path, mode="r", encoding="utf-8-sig") as f: + return (path, f.read()) + + +def list_directory(dir_path: str) -> List[Dict[str, str]]: + """ + Lists the contents of a directory and returns a list of dictionaries, + where each dictionary represents an entry in the directory. + + :param dir_path: The path of the directory to list. + :return: A list of dictionaries representing the entries in the directory. + Each dictionary contains the following keys: + - ``"path"``: The full path of the entry. + - ``"entry_name"``: The name of the entry. + - ``"type"``: The type of the entry: ``"file"``, ``"folder"``, or ``"unknown"``. + :rtype: List[Dict[str, str]] + """ + + def map_dir(e: str) -> Dict[str, str]: + path = os.path.join(dir_path, e) + return { + "path": path, + "entry_name": e, + "type": ( + "file" + if os.path.isfile(path) + else "folder" if os.path.isdir(path) else "unknown" + ), + } + + return list(map(map_dir, os.listdir(dir_path))) + + +def resolve(base: str, path: str) -> str: + """ + Resolves a relative path with respect to a base path. + + :param base: The base path. + :param path: The relative path. + :return: The resolved path. + :rtype: str + """ + return os.path.normpath(join(base, path)) + + +def exists(path) -> bool: + """ + Check if a file or directory exists at the given path. + + :param path: The path to the file or directory. + :return: ``True`` if the file or directory exists, ``False`` otherwise. + :rtype: bool + """ + return os.path.exists(path) + + +def join(path: str, *paths) -> str: + """ + Joins one or more path components intelligently. + + :param path: The base path. + :param *paths: Additional path components to be joined. + :return: The concatenated path. + :rtype: str + """ + return os.path.join(path, *paths) diff --git a/source/qdk_package/qdk/_http.py b/source/qdk_package/qdk/_http.py new file mode 100644 index 0000000000..240ddcc67f --- /dev/null +++ b/source/qdk_package/qdk/_http.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +_http.py + +This module provides HTTP utility functions for interacting with +GitHub repositories. +""" + + +def fetch_github(owner: str, repo: str, ref: str, path: str) -> str: + """ + Fetches the content of a file from a GitHub repository. + + :param owner: The owner of the GitHub repository. + :param repo: The name of the GitHub repository. + :param ref: The reference (branch, tag, or commit) of the repository. + :param path: The path to the file within the repository. + :return: The content of the file as a string. + :rtype: str + :raises urllib.error.HTTPError: If there is an error fetching the file from GitHub. + :raises urllib.error.URLError: If there is an error with the URL. + """ + + import urllib.request + + path_no_leading_slash = path[1:] if path.startswith("/") else path + url = f"https://raw.githubusercontent.com/{owner}/{repo}/{ref}/{path_no_leading_slash}" + return urllib.request.urlopen(url).read().decode("utf-8-sig") diff --git a/source/qdk_package/qdk/_ipython.py b/source/qdk_package/qdk/_ipython.py new file mode 100644 index 0000000000..c010befe72 --- /dev/null +++ b/source/qdk_package/qdk/_ipython.py @@ -0,0 +1,88 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +_ipython.py + +This module provides IPython magic functions for integrating Q# code +execution within Jupyter notebooks. +""" + +from time import monotonic +from IPython.display import display, Javascript, clear_output +from IPython.core.magic import register_cell_magic +from ._native import QSharpError +from ._qsharp import get_interpreter, qsharp_value_to_python_value +from . import telemetry_events +import pathlib + + +def register_magic(): + @register_cell_magic + def qsharp(line, cell): + """Cell magic to interpret Q# code in Jupyter notebooks.""" + # This effectively pings the kernel to ensure it recognizes the cell is running and helps with + # accureate cell execution timing. + clear_output() + + def callback(output): + display(output) + # This is a workaround to ensure that the output is flushed. This avoids an issue + # where the output is not displayed until the next output is generated or the cell + # is finished executing. + display(display_id=True) + + telemetry_events.on_run_cell() + start_time = monotonic() + + try: + results = qsharp_value_to_python_value( + get_interpreter().interpret(cell, callback) + ) + + durationMs = (monotonic() - start_time) * 1000 + telemetry_events.on_run_cell_end(durationMs) + + return results + except QSharpError as e: + # pylint: disable=raise-missing-from + raise QSharpCellError(str(e)) + + +def enable_classic_notebook_codemirror_mode(): + """ + Registers %%qsharp cells with MIME type text/x-qsharp + and defines a CodeMirror mode to enable syntax highlighting. + This only works in "classic" Jupyter notebooks, not Notebook v7. + """ + js_to_inject = open( + pathlib.Path(__file__) + .parent.resolve() + .joinpath(".data", "qsharp_codemirror.js"), + mode="r", + encoding="utf-8", + ).read() + + # Extend the JavaScript display helper to print nothing when used + # in a non-browser context (i.e. IPython console) + class JavaScriptWithPlainTextFallback(Javascript): + def __repr__(self): + return "" + + # This will run the JavaScript in the context of the frontend. + display(JavaScriptWithPlainTextFallback(js_to_inject)) + + +class QSharpCellError(BaseException): + """ + Error raised when a %%qsharp cell fails. + """ + + def __init__(self, traceback: str): + self.traceback = traceback.splitlines() + + def _render_traceback_(self): + # We want to specifically override the traceback so that + # the Q# error directly from the interpreter is shown + # instead of the Python error. + return self.traceback diff --git a/source/qdk_package/qdk/_native.pyi b/source/qdk_package/qdk/_native.pyi new file mode 100644 index 0000000000..a84d950584 --- /dev/null +++ b/source/qdk_package/qdk/_native.pyi @@ -0,0 +1,1140 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from enum import Enum +from typing import Any, Callable, Optional, Dict, List, Tuple, TypedDict, overload + +# pylint: disable=unused-argument +# E302 is fighting with the formatter for number of blank lines +# flake8: noqa: E302 + +class OutputSemantics(Enum): + """ + Represents the output semantics for OpenQASM 3 compilation. + Each has implications on the output of the compilation + and the semantic checks that are performed. + """ + + Qiskit: OutputSemantics + """ + The output is in Qiskit format meaning that the output + is all of the classical registers, in reverse order + in which they were added to the circuit with each + bit within each register in reverse order. + """ + + OpenQasm: OutputSemantics + """ + [OpenQASM 3 has two output modes](https://openqasm.com/language/directives.html#input-output) + - If the programmer provides one or more `output` declarations, then + variables described as outputs will be returned as output. + The spec make no mention of endianness or order of the output. + - Otherwise, assume all of the declared variables are returned as output. + """ + + ResourceEstimation: OutputSemantics + """ + No output semantics are applied. The entry point returns `Unit`. + """ + +class ProgramType(Enum): + """ + Represents the type of compilation output to create + """ + + File: ProgramType + """ + Creates an operation in a namespace as if the program is a standalone + file. Inputs are lifted to the operation params. Output are lifted to + the operation return type. The operation is marked as `@EntryPoint` + as long as there are no input parameters. + """ + + Operation: ProgramType + """ + Programs are compiled to a standalone function. Inputs are lifted to + the operation params. Output are lifted to the operation return type. + """ + + Fragments: ProgramType + """ + Creates a list of statements from the program. This is useful for + interactive environments where the program is a list of statements + imported into the current scope. + This is also useful for testing individual statements compilation. + """ + +class TargetProfile(Enum): + """ + A Q# target profile. + + A target profile describes the capabilities of the hardware or simulator + which will be used to run the Q# program. + """ + + @classmethod + def from_str(cls, value: str) -> TargetProfile: ... + """ + Creates a target profile from a string. + :param value: The string to parse. + :raises ValueError: If the string does not match any target profile. + """ + + Base: TargetProfile + """ + Target supports the minimal set of capabilities required to run a quantum + program. + + This option maps to the Base Profile as defined by the QIR specification. + """ + + Adaptive_RI: TargetProfile + """ + Target supports the Adaptive profile with the integer computation extension. + + This profile includes all of the required Adaptive Profile + capabilities, as well as the optional integer computation + extension defined by the QIR specification. + """ + + Adaptive_RIF: TargetProfile + """ + Target supports the Adaptive profile with integer & floating-point + computation extensions. + + This profile includes all required Adaptive Profile and `Adaptive_RI` + capabilities, as well as the optional floating-point computation + extension defined by the QIR specification. + """ + + Adaptive_RIFLA: TargetProfile + """ + Target supports the Adaptive profile with integer & floating-point + computation extensions as well as loop extension and statically-sized + arrays extension. + """ + + Unrestricted: TargetProfile + """ + Describes the unrestricted set of capabilities required to run any Q# program. + """ + +class GlobalCallable: + """ + A callable reference that can be invoked with arguments. + """ + + ... + +class Closure: + """ + A closure reference that can be passed back into Q#. + """ + + ... + +class Interpreter: + """A Q# interpreter.""" + + def __init__( + self, + target_profile: TargetProfile, + language_features: Optional[List[str]], + project_root: Optional[str], + read_file: Callable[[str], Tuple[str, str]], + list_directory: Callable[[str], List[Dict[str, str]]], + resolve_path: Callable[[str, str], str], + fetch_github: Callable[[str, str, str, str], str], + make_callable: Optional[Callable[[GlobalCallable, List[str], str], None]], + make_class: Optional[Callable[[TypeIR, List[str], str], None]], + trace_circuit: Optional[bool], + ) -> None: + """ + Initializes the Q# interpreter. + + :param target_profile: The target profile to use for the interpreter. + :param project_root: A directory that contains a `qsharp.json` manifest. + :param read_file: A function that reads a file from the file system. + :param list_directory: A function that lists the contents of a directory. + :param resolve_path: A function that joins path segments and normalizes the resulting path. + :param make_callable: A function that registers a Q# callable in the in the environment module. + :param trace_circuit: Enables tracing of circuit during execution. + Passing `True` is required for the `dump_circuit` function to return a circuit. + The `circuit` function is *NOT* affected by this parameter will always generate a circuit. + """ + ... + + def interpret(self, input: str, output_fn: Callable[[Output], None]) -> Any: + """ + Interprets Q# source code. + + :param input: The Q# source code to interpret. + :param output_fn: A callback function that will be called with each output. + + :returns value: The value returned by the last statement in the input. + + :raises QSharpError: If there is an error interpreting the input. + """ + ... + + def run( + self, + entry_expr: Optional[str], + output_fn: Optional[Callable[[Output], None]], + noise_config: Optional[NoiseConfig], + noise: Optional[Tuple[float, float, float]], + qubit_loss: Optional[float], + callable: Optional[GlobalCallable | Closure], + args: Optional[Any], + seed: Optional[int], + ) -> Any: + """ + Runs the given Q# expression with an independent instance of the simulator. + + :param entry_expr: The entry expression. + :param output_fn: A callback function that will be called with each output. + :param noise_config: The noise configuration to use in simulation. + :param noise: A tuple with probabilities of Pauli-X, Pauli-Y, and Pauli-Z errors + to use in simulation as a parametric Pauli noise. + :param qubit_loss: The probability of qubit loss in simulation. + :param callable: The callable to run, if no entry expression is provided. + :param args: The arguments to pass to the callable, if any. + :param seed: The seed to use for the random number generator in simulation, if any. + + :returns values: A result or runtime errors. + + :raises QSharpError: If there is an error interpreting the input. + """ + ... + + def invoke( + self, + callable: GlobalCallable | Closure, + args: Any, + output_fn: Callable[[Output], None], + ) -> Any: + """ + Invokes the callable with the given arguments, converted into the appropriate Q# values. + :param callable: The callable to invoke. + :param args: The arguments to pass to the callable. + :param output_fn: A callback function that will be called with each output. + :returns values: A result or runtime errors. + :raises QSharpError: If there is an error interpreting the input. + """ + ... + + def qir( + self, + entry_expr: Optional[str] = None, + callable: Optional[GlobalCallable | Closure] = None, + args: Optional[Any] = None, + ) -> str: + """ + Generates QIR from Q# source code. Either an entry expression or a callable with arguments must be provided. + + :param entry_expr: The entry expression. + :param callable: The callable to generate QIR for, if no entry expression is provided. + :param args: The arguments to pass to the callable, if any. + + :returns qir: The QIR string. + """ + ... + + def circuit( + self, + config: CircuitConfig, + entry_expr: Optional[str] = None, + *, + operation: Optional[str] = None, + callable: Optional[GlobalCallable | Closure] = None, + args: Optional[Any] = None, + ) -> Circuit: + """ + Synthesizes a circuit for a Q# program. Either an entry + expression or an operation must be provided. + + :param config: Circuit generation options. + + :param entry_expr: An entry expression. + + :keyword operation: The operation to synthesize. This can be a name of + an operation of a lambda expression. The operation must take only + qubits or arrays of qubits as parameters. + + :keyword callable: The callable to synthesize the circuit for, if no entry expression is provided. + + :keyword args: The arguments to pass to the callable, if any. + + :raises QSharpError: If there is an error synthesizing the circuit. + """ + ... + + def estimate( + self, + params: str, + entry_expr: Optional[str] = None, + callable: Optional[GlobalCallable | Closure] = None, + args: Optional[Any] = None, + ) -> str: + """ + Estimates resources for Q# source code. + + :param params: The parameters to configure estimation. + :param entry_expr: The entry expression to estimate. + :param callable: The callable to estimate resources for, if no entry expression is provided. + :param args: The arguments to pass to the callable, if any. + + :returns resources: The estimated resources. + """ + ... + + def logical_counts( + self, + entry_expr: Optional[str] = None, + callable: Optional[GlobalCallable | Closure] = None, + args: Optional[Any] = None, + ) -> Dict[str, int]: + """ + Estimates logical operation counts for Q# source code. + + :param entry_expr: The entry expression to estimate. + :param callable: The callable to estimate resources for, if no entry expression is provided. + :param args: The arguments to pass to the callable, if any. + + :returns resources: The logical resources. + """ + ... + + def set_quantum_seed(self, seed: Optional[int]) -> None: + """ + Sets the seed for the quantum random number generator. + + :param seed: The seed to use for the quantum random number generator. If None, + the seed will be generated from entropy. + """ + ... + + def set_classical_seed(self, seed: Optional[int]) -> None: + """ + Sets the seed for the classical random number generator. + + :param seed: The seed to use for the classical random number generator. If None, + the seed will be generated from entropy. + """ + ... + + def dump_machine(self) -> StateDumpData: + """ + Returns the sparse state vector of the simulator as a StateDump object. + + :return: The state of the simulator. + """ + ... + + def dump_circuit(self) -> Circuit: + """ + Dumps a circuit showing the current state of the simulator. + + This circuit will contain the gates that have been applied + in the simulator up to the current point. + + Requires the interpreter to be initialized with `trace_circuit=True`. + + :raises QSharpError: If the interpreter was not initialized with ``trace_circuit=True``. + """ + ... + + def import_qasm( + self, + source: str, + output_fn: Callable[[Output], None], + read_file: Callable[[str], Tuple[str, str]], + list_directory: Callable[[str], List[Dict[str, str]]], + resolve_path: Callable[[str, str], str], + fetch_github: Callable[[str, str, str, str], str], + **kwargs, + ) -> Any: + """ + Imports OpenQASM source code into the active Q# interpreter. + + :param source: An OpenQASM program or fragment. + :param output_fn: The function to handle the output of the execution. + :param read_file: A callable that reads a file and returns its content and path. + :param list_directory: A callable that lists the contents of a directory. + :param resolve_path: A callable that resolves a file path given a base path and a relative path. + :param fetch_github: A callable that fetches a file from GitHub. + :param **kwargs: Common options: + + - ``name`` (str): The name of the program. + - ``search_path`` (str): The optional search path for resolving file references. + - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. + - ``program_type`` (ProgramType): The type of program compilation to perform. + :return: The value returned by the last statement in the source code. + :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. + :raises QSharpError: If there is an error compiling or evaluating the program. + """ + ... + +class Result(Enum): + """ + A Q# measurement result. + """ + + Zero: int + One: int + Loss: int + +class Pauli(Enum): + """ + A Q# Pauli operator. + """ + + I: int + X: int + Y: int + Z: int + +class Output: + """ + An output returned from the Q# interpreter. + Outputs can be a state dumps or messages. These are normally printed to the console. + """ + + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def _repr_markdown_(self) -> Optional[str]: ... + def state_dump(self) -> Optional[StateDumpData]: ... + def is_state_dump(self) -> bool: ... + def is_matrix(self) -> bool: ... + def is_message(self) -> bool: ... + +class StateDumpData: + """ + A state dump returned from the Q# interpreter. + """ + + """ + The number of allocated qubits at the time of the dump. + """ + qubit_count: int + + """ + Get the amplitudes of the state vector as a dictionary from state integer to + complex amplitudes. + """ + def get_dict(self) -> dict: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def _repr_markdown_(self) -> str: ... + def _repr_latex_(self) -> Optional[str]: ... + +class CircuitConfig: + """ + Configuration options for circuit generation. + """ + + def __init__( + self, + *, + max_operations: Optional[int] = None, + generation_method: Optional["CircuitGenerationMethod"] = None, + source_locations: bool = False, + group_by_scope: bool = False, + prune_classical_qubits: bool = False, + ) -> None: ... + + max_operations: Optional[int] + """ + The maximum number of operations to include in the generated circuit. + """ + + generation_method: Optional[CircuitGenerationMethod] + """ + The method to use for circuit generation. + """ + + source_locations: Optional[bool] + """ + Whether to include source locations in the generated circuit. + """ + +class CircuitGenerationMethod(Enum): + """ + The method to use for circuit generation. + """ + + ClassicalEval: CircuitGenerationMethod + """ + Use classical evaluation to generate the circuit. + """ + + Simulate: CircuitGenerationMethod + """ + Use simulation to generate the circuit. + """ + + Static: CircuitGenerationMethod + """ + Compile the program and transform to a circuit using partial evaluation. + Only works for AdaptiveRIF-compliant programs. + Requires a non-Unrestricted target profile (e.g. TargetProfile.Adaptive_RIF). + """ + +class Circuit: + """ + A quantum circuit diagram generated from a Q# or OpenQASM program. + + Returned by :func:`qsharp.circuit` and :func:`qsharp.dump_circuit`. + """ + + def json(self) -> str: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + +class QSharpError(BaseException): + """ + An error returned from the Q# interpreter. + """ + + ... + +class QasmError(BaseException): + """ + An error returned from the OpenQASM parser. + """ + + ... + +def physical_estimates(logical_resources: str, params: str) -> str: + """ + Estimates physical resources from pre-calculated logical resources. + + :param logical_resources: The logical resources to estimate from. + :param params: The parameters to configure physical estimation. + + :return: The estimated resources. + :rtype: str + """ + ... + +def circuit_qasm_program( + source: str, + read_file: Callable[[str], Tuple[str, str]], + list_directory: Callable[[str], List[Dict[str, str]]], + resolve_path: Callable[[str, str], str], + fetch_github: Callable[[str, str, str, str], str], + **kwargs, +) -> Circuit: + """ + Synthesizes a circuit for an OpenQASM program. + + .. note:: + This call while exported is not intended to be used directly by the user. + It is intended to be used by the Python wrapper which will handle the + callbacks and other Python specific details. + + :param source: An OpenQASM program. + :param read_file: A callable that reads a file and returns its content and path. + :param list_directory: A callable that lists the contents of a directory. + :param resolve_path: A callable that resolves a file path given a base path and a relative path. + :param fetch_github: A callable that fetches a file from GitHub. + :param **kwargs: Common options: + + - ``name`` (str): The name of the program. + - ``search_path`` (str): The optional search path for resolving file references. + :return: The synthesized circuit. + :rtype: Circuit + :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. + :raises QSharpError: If there is an error evaluating or synthesizing the circuit. + """ + ... + +def compile_qasm_program_to_qir( + source: str, + read_file: Callable[[str], Tuple[str, str]], + list_directory: Callable[[str], List[Dict[str, str]]], + resolve_path: Callable[[str, str], str], + fetch_github: Callable[[str, str, str, str], str], + **kwargs, +) -> str: + """ + Compiles the OpenQASM source code into a program that can be submitted to a + target as QIR (Quantum Intermediate Representation). + + .. note:: + This call while exported is not intended to be used directly by the user. + It is intended to be used by the Python wrapper which will handle the + callbacks and other Python specific details. + + :param source: The OpenQASM source code to compile to QIR. + :param read_file: A callable that reads a file and returns its content and path. + :param list_directory: A callable that lists the contents of a directory. + :param resolve_path: A callable that resolves a file path given a base path and a relative path. + :param fetch_github: A callable that fetches a file from GitHub. + :param **kwargs: Common options: + + - ``name`` (str): The name of the circuit. + - ``target_profile`` (TargetProfile): The target profile to use for code generation. + - ``search_path`` (str): The optional search path for resolving file references. + - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. + :return: The converted QIR code as a string. + :rtype: str + :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. + :raises QSharpError: If there is an error compiling the program. + """ + ... + +def compile_qasm_to_qsharp( + source: str, + read_file: Callable[[str], Tuple[str, str]], + list_directory: Callable[[str], List[Dict[str, str]]], + resolve_path: Callable[[str, str], str], + fetch_github: Callable[[str, str, str, str], str], + **kwargs, +) -> str: + """ + Converts a OpenQASM program to Q#. + + .. note:: + This call while exported is not intended to be used directly by the user. + It is intended to be used by the Python wrapper which will handle the + callbacks and other Python specific details. + + :param source: The OpenQASM source code to convert. + :param read_file: A callable that reads a file and returns its content and path. + :param list_directory: A callable that lists the contents of a directory. + :param resolve_path: A callable that resolves a file path given a base path and a relative path. + :param fetch_github: A callable that fetches a file from GitHub. + :param **kwargs: Common options: + + - ``name`` (str): The name of the circuit. + - ``search_path`` (str): The optional search path for resolving file references. + :return: The converted Q# code as a string. + :rtype: str + """ + ... + +def resource_estimate_qasm_program( + source: str, + job_params: str, + read_file: Callable[[str], Tuple[str, str]], + list_directory: Callable[[str], List[Dict[str, str]]], + resolve_path: Callable[[str, str], str], + fetch_github: Callable[[str, str, str, str], str], + **kwargs, +) -> str: + """ + Estimates the resource requirements for executing OpenQASM source code. + + .. note:: + This call while exported is not intended to be used directly by the user. + It is intended to be used by the Python wrapper which will handle the + callbacks and other Python specific details. + + :param source: The OpenQASM source code to estimate resource requirements for. + :param job_params: The parameters for the job as a JSON string. + :param read_file: A callable that reads a file and returns its content and path. + :param list_directory: A callable that lists the contents of a directory. + :param resolve_path: A callable that resolves a file path given a base path and a relative path. + :param fetch_github: A callable that fetches a file from GitHub. + :param **kwargs: Common options: + + - ``name`` (str): The name of the circuit. Defaults to ``'program'``. + - ``search_path`` (str): The optional search path for resolving imports. + :return: The estimated resource requirements as a JSON string. + :rtype: str + """ + ... + +def run_qasm_program( + source: str, + output_fn: Callable[[Output], None], + noise_config: Optional[NoiseConfig], + noise: Optional[Tuple[float, float, float]], + qubit_loss: Optional[float], + read_file: Callable[[str], Tuple[str, str]], + list_directory: Callable[[str], List[Dict[str, str]]], + resolve_path: Callable[[str, str], str], + fetch_github: Callable[[str, str, str, str], str], + **kwargs, +) -> Any: + """ + Runs the given OpenQASM program for the given number of shots. + Each shot uses an independent instance of the simulator. + + .. note:: + This call while exported is not intended to be used directly by the user. + It is intended to be used by the Python wrapper which will handle the + callbacks and other Python specific details. + + :param source: The OpenQASM source code to execute. + :param output_fn: The function to handle the output of the execution. + :param noise_config: Optional noise configuration for noisy simulation. + :param noise: Optional Pauli noise as a tuple of ``(x, y, z)`` probabilities. + :param qubit_loss: The probability of qubit loss in simulation. + :param read_file: A callable that reads a file and returns its contents. + :param list_directory: A callable that lists the contents of a directory. + :param resolve_path: A callable that resolves a path given a base path and a relative path. + :param fetch_github: A callable that fetches a file from GitHub. + :param **kwargs: Common options: + + - ``target_profile`` (TargetProfile): The target profile to use for execution. + - ``name`` (str): The name of the circuit. Defaults to ``'program'``. + - ``search_path`` (str): The optional search path for resolving imports. + - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. + - ``shots`` (int): The number of shots to run. Defaults to ``1``. + - ``seed`` (int): The seed to use for the random number generator. + :return: The result of the execution. + :rtype: Any + :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. + :raises QSharpError: If there is an error interpreting the input. + """ + ... + +def estimate_custom( + algorithm, + qubit, + qec, + factories: List = [], + *, + error_budget: float = 0.01, + max_factories: Optional[int] = None, + logical_depth_factor: Optional[float] = None, + max_physical_qubits: Optional[int] = None, + max_duration: Optional[int] = None, + error_budget_pruning: bool = False, +) -> Dict: + """ + Estimates quantum resources for a given algorithm, qubit, and code. + + :param algorithm: Python object representing the algorithm. + :param qubit: The qubit properties as a dictionary. + :param qec: Python object representing the quantum error correction code. + :param factories: List of python objects representing factories. Defaults to ``[]``. + :type factories: List + :keyword error_budget: The total error budget, which is uniformly distributed. Defaults to ``0.01``. + :kwtype error_budget: float + :keyword max_factories: Constrains the number of factories. Defaults to ``None``. + :kwtype max_factories: int + :keyword logical_depth_factor: Extends algorithmic logical depth by a factor >= 1. Defaults to ``None``. + :kwtype logical_depth_factor: float + :keyword max_physical_qubits: Forces estimator to not exceed provided number of physical qubits, may fail. + Defaults to ``None``. + :kwtype max_physical_qubits: int + :keyword max_duration: Allows estimator to run for given runtime in nanoseconds, may fail. + Defaults to ``None``. + :kwtype max_duration: int + :keyword error_budget_pruning: Will try to prune the error budget to increase magic state error budget. + Defaults to ``False``. + :kwtype error_budget_pruning: bool + :return: A dictionary with resource estimation results. + :rtype: Dict + """ + ... + +class UdtValue: + """ + A Q# UDT value. Objects of this class represent UDT values generated + in Q# and sent to Python. It is then converted into a Python object + in the `qsharp_value_to_python_value` function in `_qsharp.py`. + """ + + name: str + fields: List[Tuple[str, Any]] + +class TypeIR: + """ + A Q# type. Objects of this class represent a Q# type. This is used + to send the definitions of the Q# UDTs defined by the user to Python + and creating equivalent Python dataclasses in `qsharp.code.*`. + """ + + def kind(self) -> TypeKind: ... + def unwrap_primitive(self) -> PrimitiveKind: ... + def unwrap_tuple(self) -> List[TypeIR]: ... + def unwrap_array(self) -> List[TypeIR]: ... + def unwrap_udt(self) -> UdtIR: ... + +class TypeKind(Enum): + """ + A Q# type kind. + """ + + Primitive: int + Tuple: int + Array: int + Udt: int + +class PrimitiveKind(Enum): + """ + A Q# primitive. + """ + + Bool: int + Int: int + Double: int + Complex: int + String: int + Pauli: int + Result: int + +class UdtIR: + """ + A Q# Udt. + """ + + name: str + fields: List[Tuple[str, TypeIR]] + +class QirInstructionId(Enum): + I: QirInstructionId + H: QirInstructionId + X: QirInstructionId + Y: QirInstructionId + Z: QirInstructionId + S: QirInstructionId + SAdj: QirInstructionId + SX: QirInstructionId + SXAdj: QirInstructionId + T: QirInstructionId + TAdj: QirInstructionId + CNOT: QirInstructionId + CX: QirInstructionId + CY: QirInstructionId + CZ: QirInstructionId + CCX: QirInstructionId + SWAP: QirInstructionId + RX: QirInstructionId + RY: QirInstructionId + RZ: QirInstructionId + RXX: QirInstructionId + RYY: QirInstructionId + RZZ: QirInstructionId + RESET: QirInstructionId + M: QirInstructionId + MResetZ: QirInstructionId + MZ: QirInstructionId + Move: QirInstructionId + ReadResult: QirInstructionId + ResultRecordOutput: QirInstructionId + BoolRecordOutput: QirInstructionId + IntRecordOutput: QirInstructionId + DoubleRecordOutput: QirInstructionId + TupleRecordOutput: QirInstructionId + ArrayRecordOutput: QirInstructionId + CorrelatedNoise: QirInstructionId + +class QirInstruction: ... + +class IdleNoiseParams: + s_probability: float + +class NoiseTable: + loss: float + + def __init__(self, num_qubits: int): + """ + Initializes a new noise table for an operation that targets `num_qubits` qubits. + """ + + def __getattr__(self, name: str) -> float: + """ + Defining __getattr__ allows getting noise like this + + noise_table.ziz + + for arbitrary pauli fields. + """ + + def __setattr__(self, name: str, value: float): + """ + Defining __setattr__ allows setting noise like this + + noise_table = NoiseTable(3) + noise_table.ziz = 0.005 + + for arbitrary pauli fields. Setting an element that was + previously set overrides that entry with the new value. + """ + + @overload + def set_pauli_noise(self, lst: list[tuple[str, float]]): + """ + The correlated pauli noise to use in simulation. Setting an element + that was previously set overrides that entry with the new value. + + Example:: + + noise_table = NoiseTable(2) + noise_table.set_pauli_noise([("XI", 1e-10), ("XZ", 1e-8)]) + """ + + @overload + def set_pauli_noise(self, pauli_strings: list[str], values: list[float]): + """ + The correlated pauli noise to use in simulation. Setting an element + that was previously set overrides that entry with the new value. + + Example:: + + noise_table = NoiseTable(2) + noise_table.set_pauli_noise(["XI", "XZ"], [1e-10, 3.7e-8]) + """ + + @overload + def set_pauli_noise(self, pauli_string: str, value: float): + """ + The correlated pauli noise to use in simulation. Setting an element + that was previously set overrides that entry with the new value. + + Example:: + + noise_table = NoiseTable(2) + noise_table.set_pauli_noise("XZ", 1e-10) + """ + + def set_depolarizing(self, value: float): + """ + The depolarizing noise to use in simulation. + """ + + def set_bitflip(self, value: float): + """ + The bit flip noise to use in simulation. + """ + + def set_phaseflip(self, value: float): + """ + The phase flip noise to use in simulation. + """ + +class NoiseIntrinsicsTable: + def __contains__(self, name: str) -> bool: + """ + This enables support for `in` membership checks. + """ + + def __getitem__(self, name: str) -> NoiseTable: + """ + Defining __getitem__ allows getting intrinsic noise tables like this: + noise_config = NoiseConfig() + my_intrinsic_noise_table = noise_config.intrinsics["my_intrinsic"] + """ + + def __setitem__(self, name: str, value: float): + """ + Defining __setitem__ allows setting intrinsic noise tables like this: + noise_config = NoiseConfig() + my_intrinsic_noise_table = NoiseTable(3) + my_intrinsic_noise_table.ziz = 0.01 + noise_config.intrinsics["my_intrinsic"] = my_intrinsic_noise_table + """ + + def get_intrinsic_id(self, name: str) -> int: + """ + Each intrinsic inserted in the table is assigned an integer id. + This method returns that id given an intrinsic's name. + """ + +class NoiseConfig: + x: NoiseTable + y: NoiseTable + z: NoiseTable + h: NoiseTable + s: NoiseTable + s_adj: NoiseTable + t: NoiseTable + t_adj: NoiseTable + sx: NoiseTable + sx_adj: NoiseTable + rx: NoiseTable + ry: NoiseTable + rz: NoiseTable + cx: NoiseTable + cy: NoiseTable + cz: NoiseTable + rxx: NoiseTable + ryy: NoiseTable + rzz: NoiseTable + swap: NoiseTable + mov: NoiseTable + mresetz: NoiseTable + # idle: IdleNoiseParams + intrinsics: NoiseIntrinsicsTable + + def intrinsic(self, name: str, num_qubits: int) -> NoiseTable: + """ + The noise table for a custom intrinsic. + """ + + def load_csv_dir(self, dir_path: str): + """ + Loads noise tables from the specified directory path. For each .csv file found in the directory, + the noise table is loaded and associated with a unique identifier. The name of the file (without the .csv extension) + is used as the label for the noise table, which should match the QIR instruction that will apply noise using this table. + + Each line of the table should be of the format: "IXYZ,1.345e-4" where IXYZ is a string of Pauli operators + representing the error on each qubit (Z applying to the first qubit argument, Y to the second, etc.), and the second value + is the corresponding error probability for that specific Pauli string. + + Blank lines, lines starting with #, or lines that start with the string "pauli" (i.e., a column header) are ignored. + """ + ... + +def run_clifford( + input: List[QirInstruction], + num_qubits: int, + num_results: int, + shots: int, + noise: Optional[NoiseConfig], + seed: Optional[int], +) -> List[str]: + """ + Run the given list of QIR instructions in a Clifford simulator, + using the given `NoiseConfig`, if any. + + Returns a list of result strings. Each result string is composed + of '0's, '1's, and 'L's, representing if each measurement result + was a Zero, One, or Loss respectively. + """ + ... + +def run_cpu_full_state( + input: List[QirInstruction], + num_qubits: int, + num_results: int, + shots: int, + noise: Optional[NoiseConfig], + seed: Optional[int], +) -> List[str]: + """ + Run the given list of QIR instructions in a CPU full-state simulator, + using the given `NoiseConfig`, if any. + + Returns a list of result strings. Each result string is composed + of '0's, '1's, and 'L's, representing if each measurement result + was a Zero, One, or Loss respectively. + """ + ... + +def try_create_gpu_adapter() -> str: + """ + Checks if a compatible GPU adapter is available on the system. + + This function attempts to request a GPU adapter to determine if GPU-accelerated + quantum simulation is supported. It's useful for capability detection before + attempting to run GPU-based simulations. + + # Errors + + Raises `OSError` if: + - No compatible GPU is found + - GPU drivers are missing or not functioning properly + """ + pass + +def run_parallel_shots( + input: List[QirInstruction], + shots: int, + qubit_count: int, + result_count: int, + noise: Optional[NoiseConfig], + seed: Optional[int], +) -> List[str]: + """ """ + ... + +def run_adaptive_parallel_shots( + input: dict, + shots: int, + noise: Optional[NoiseConfig], + seed: Optional[int], +) -> List[str]: + """ + Run the given list of QIR instructions in a CPU full-state simulator, + using the given `NoiseConfig`, if any. + + The input is an `AdaptiveProgram` converted to a dict using the + .as_dict() method. + + Returns a list of result strings. Each result string is composed + of '0's, '1's, and 'L's, representing if each measurement result + was a Zero, One, or Loss respectively. + """ + ... + +# This is a little clunky, but until we move to Python 3.11 as a minimum, the NotRequired annotation +# for Dict fields that may be missing is not availalble. See https://peps.python.org/pep-0655/#motivation +class _GpuShotResultsBase(TypedDict): + shot_results: List[str] + """Bit strings for each shot ('0', '1', or 'L' for lost qubits).""" + + shot_result_codes: List[int] + """Result codes for each shot. 0 = Success, else Failure (Specific codes are an internal detail).""" + +class GpuShotResults(_GpuShotResultsBase, total=False): + """ + Results from running shots on the GPU simulator. + """ + + diagnostics: str + """Diagnostic information if available. (Useful primarly for debugging by the development team)""" + +class GpuContext: + def load_noise_tables(self, dir_path: str) -> List[Tuple[int, str, int]]: + """ + Loads noise tables from the specified directory path. For each .csv file found in the directory, + the noise table is loaded and associated with a unique identifier. The name of the file (without the .csv extension) + is used as the label for the noise table, which should match the QIR instruction that will apply noise using this table. + + Each line of the table should be for the format: "IXYZ,1.345e-4" where IXYZ is a string of Pauli operators + representing the error on each qubit (Z applying to the first qubit argument, Y to the second, etc.), and the second value + is the corresponding error probability for that specific Pauli string. + + Blank lines, lines starting with #, or lines that start with the string "pauli" (i.e., a column header) are ignored. + """ + ... + + def get_noise_table_ids(self) -> List[Tuple[int, str, int]]: + """ + Retrieves the currently loaded noise table as a string. + """ + ... + + def set_program( + self, + input: List[QirInstruction], + qubit_count: int, + result_count: int, + ) -> None: + """ + Sets the QIR program to be executed on the GPU. + """ + ... + + def set_adaptive_program(self, program: dict) -> None: + """ + Sets an Adaptive Profile QIR program for GPU execution. + + The program dict contains bytecode instructions, block/function tables, + quantum op pool, and side tables produced by AdaptiveProfilePass. + """ + ... + + def set_noise(self, noise: NoiseConfig) -> None: + """ + Sets the noise configuration for the GPU simulation. + """ + ... + + def run_shots(self, shot_count: int, seed: int) -> GpuShotResults: + """ + Runs the specified number of shots of the loaded program on the GPU. + """ + ... + + def run_adaptive_shots(self, shot_count: int, seed: int) -> GpuShotResults: + """ + Runs the specified number of shots of the loaded adaptive program on the GPU. + """ + ... diff --git a/source/qdk_package/qdk/_qsharp.py b/source/qdk_package/qdk/_qsharp.py new file mode 100644 index 0000000000..b9e055a06b --- /dev/null +++ b/source/qdk_package/qdk/_qsharp.py @@ -0,0 +1,1181 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from . import telemetry_events, code +from ._native import ( # type: ignore + Interpreter, + TargetProfile, + StateDumpData, + QSharpError, + Output, + Circuit, + GlobalCallable, + Closure, + Pauli, + Result, + UdtValue, + TypeIR, + TypeKind, + PrimitiveKind, + CircuitConfig, + CircuitGenerationMethod, + NoiseConfig, +) +from typing import ( + Any, + Callable, + Dict, + Optional, + Tuple, + TypedDict, + Union, + List, + Set, + Iterable, + cast, +) +from .estimator._estimator import ( + EstimatorResult, + EstimatorParams, + LogicalCounts, +) +import json +import os +import sys +import types +from pathlib import Path +from time import monotonic +from dataclasses import make_dataclass + + +def lower_python_obj(obj: object, visited: Optional[Set[object]] = None) -> Any: + if visited is None: + visited = set() + + if id(obj) in visited: + raise QSharpError("Cannot send circular objects from Python to Q#.") + + visited = visited.copy().add(id(obj)) + + # Base case: Primitive types + if isinstance(obj, (bool, int, float, complex, str, Pauli, Result)): + return obj + + # Recursive case: Tuple + if isinstance(obj, tuple): + return tuple(lower_python_obj(elt, visited) for elt in obj) + + # Recursive case: Dict + if isinstance(obj, dict): + return {name: lower_python_obj(val, visited) for name, val in obj.items()} + + # Base case: Callable or Closure + if hasattr(obj, "__global_callable"): + return obj.__getattribute__("__global_callable") + if isinstance(obj, (GlobalCallable, Closure)): + return obj + + # Recursive case: Class with slots + if hasattr(obj, "__slots__"): + fields = {} + for name in getattr(obj, "__slots__"): + if name == "__dict__": + for name, val in obj.__dict__.items(): + fields[name] = lower_python_obj(val, visited) + else: + val = getattr(obj, name) + fields[name] = lower_python_obj(val, visited) + return fields + + # Recursive case: Class + if hasattr(obj, "__dict__"): + fields = { + name: lower_python_obj(val, visited) for name, val in obj.__dict__.items() + } + return fields + + # Recursive case: Array + # By using `Iterable` instead of `list`, we can handle other kind of iterables + # like numpy arrays and generators. + if isinstance(obj, Iterable): + return [lower_python_obj(elt, visited) for elt in obj] + + raise TypeError(f"unsupported type: {type(obj)}") + + +def python_args_to_interpreter_args(args): + """ + Helper function to turn the `*args` argument of this module + to the format expected by the Q# interpreter. + """ + if len(args) == 0: + return None + elif len(args) == 1: + return lower_python_obj(args[0]) + else: + return lower_python_obj(args) + + +_interpreter: Union["Interpreter", None] = None +_config: Union["Config", None] = None + +# Check if we are running in a Jupyter notebook to use the IPython display function +_in_jupyter = False +try: + from IPython.display import display + + if get_ipython().__class__.__name__ == "ZMQInteractiveShell": # type: ignore + _in_jupyter = True # Jupyter notebook or qtconsole +except: + pass + + +# Reporting execution time during IPython cells requires that IPython +# gets pinged to ensure it understands the cell is active. This is done by +# simply importing the display function, which it turns out is enough to begin timing +# while avoiding any UI changes that would be visible to the user. +def ipython_helper(): + try: + if __IPYTHON__: # type: ignore + from IPython.display import display + except NameError: + pass + + +class Config: + """ + Configuration hints for the language service. + """ + + _config: Dict[str, Any] + + def __init__( + self, + target_profile: TargetProfile, + language_features: Optional[List[str]], + manifest: Optional[str], + project_root: Optional[str], + ): + if target_profile == TargetProfile.Adaptive_RI: + self._config = {"targetProfile": "adaptive_ri"} + elif target_profile == TargetProfile.Adaptive_RIF: + self._config = {"targetProfile": "adaptive_rif"} + elif target_profile == TargetProfile.Adaptive_RIFLA: + self._config = {"targetProfile": "adaptive_rifla"} + elif target_profile == TargetProfile.Base: + self._config = {"targetProfile": "base"} + elif target_profile == TargetProfile.Unrestricted: + self._config = {"targetProfile": "unrestricted"} + + if language_features is not None: + self._config["languageFeatures"] = language_features + if manifest is not None: + self._config["manifest"] = manifest + if project_root: + # For now, we only support local project roots, so use a file schema in the URI. + # In the future, we may support other schemes, such as github, if/when + # we have VS Code Web + Jupyter support. + self._config["projectRoot"] = Path(os.getcwd(), project_root).as_uri() + + def __repr__(self) -> str: + return "Q# initialized with configuration: " + str(self._config) + + # See https://ipython.readthedocs.io/en/stable/config/integrating.html#rich-display + # See https://ipython.org/ipython-doc/3/notebook/nbformat.html#display-data + # This returns a custom MIME-type representation of the Q# configuration. + # This data will be available in the cell output, but will not be displayed + # to the user, as frontends would not know how to render the custom MIME type. + # Editor services that interact with the notebook frontend + # (i.e. the language service) can read and interpret the data. + def _repr_mimebundle_( + self, include: Union[Any, None] = None, exclude: Union[Any, None] = None + ) -> Dict[str, Dict[str, Any]]: + return {"application/x.qsharp-config": self._config} + + def get_target_profile(self) -> str: + """ + Returns the target profile as a string, or "unspecified" if not set. + """ + return self._config.get("targetProfile", "unspecified") + + +class PauliNoise(Tuple[float, float, float]): + """ + The Pauli noise to use in simulation represented + as probabilities of Pauli-X, Pauli-Y, and Pauli-Z errors + """ + + def __new__(cls, x: float, y: float, z: float): + """ + Creates a new :class:`PauliNoise` instance with the given error probabilities. + + :param x: Probability of a Pauli-X (bit flip) error. Must be non-negative. + :type x: float + :param y: Probability of a Pauli-Y error. Must be non-negative. + :type y: float + :param z: Probability of a Pauli-Z (phase flip) error. Must be non-negative. + :type z: float + :return: A new :class:`PauliNoise` tuple ``(x, y, z)``. + :rtype: PauliNoise + :raises ValueError: If any probability is negative or if ``x + y + z > 1``. + """ + if x < 0 or y < 0 or z < 0: + raise ValueError("Pauli noise probabilities must be non-negative.") + if x + y + z > 1: + raise ValueError("The sum of Pauli noise probabilities must be at most 1.") + return super().__new__(cls, (x, y, z)) + + +class DepolarizingNoise(PauliNoise): + """ + The depolarizing noise to use in simulation. + """ + + def __new__(cls, p: float): + """ + Creates a new :class:`DepolarizingNoise` instance. + + The depolarizing channel applies Pauli-X, Pauli-Y, or Pauli-Z errors each with + probability ``p / 3``. + + :param p: Total depolarizing error probability. Must satisfy ``0 ≤ p ≤ 1``. + :type p: float + :return: A new :class:`DepolarizingNoise` with equal X, Y, and Z error probabilities. + :rtype: DepolarizingNoise + :raises ValueError: If ``p`` is negative or ``p > 1``. + """ + return super().__new__(cls, p / 3, p / 3, p / 3) + + +class BitFlipNoise(PauliNoise): + """ + The bit flip noise to use in simulation. + """ + + def __new__(cls, p: float): + """ + Creates a new :class:`BitFlipNoise` instance. + + The bit flip channel applies a Pauli-X error with probability ``p``. + + :param p: Probability of a bit flip (Pauli-X) error. Must satisfy ``0 ≤ p ≤ 1``. + :type p: float + :return: A new :class:`BitFlipNoise` with X error probability ``p``. + :rtype: BitFlipNoise + :raises ValueError: If ``p`` is negative or ``p > 1``. + """ + return super().__new__(cls, p, 0, 0) + + +class PhaseFlipNoise(PauliNoise): + """ + The phase flip noise to use in simulation. + """ + + def __new__(cls, p: float): + """ + Creates a new :class:`PhaseFlipNoise` instance. + + The phase flip channel applies a Pauli-Z error with probability ``p``. + + :param p: Probability of a phase flip (Pauli-Z) error. Must satisfy ``0 ≤ p ≤ 1``. + :type p: float + :return: A new :class:`PhaseFlipNoise` with Z error probability ``p``. + :rtype: PhaseFlipNoise + :raises ValueError: If ``p`` is negative or ``p > 1``. + """ + return super().__new__(cls, 0, 0, p) + + +def init( + *, + target_profile: TargetProfile = TargetProfile.Unrestricted, + target_name: Optional[str] = None, + project_root: Optional[str] = None, + language_features: Optional[List[str]] = None, + trace_circuit: Optional[bool] = None, +) -> Config: + """ + Initializes the Q# interpreter. + + :keyword target_profile: Setting the target profile allows the Q# + interpreter to generate programs that are compatible + with a specific target. See :class:`TargetProfile`. + + :keyword target_name: An optional name of the target machine to use for inferring the compatible + target_profile setting. + + :keyword project_root: An optional path to a root directory with a Q# project to include. + It must contain a qsharp.json project manifest. + + :keyword language_features: An optional list of language feature flags to enable. + These correspond to experimental or preview Q# language features. + Valid values are: + + - ``"v2-preview-syntax"``: Enables Q# v2 preview syntax. This removes support for + the scoped qubit allocation block form (``use q = Qubit() { ... }``), requiring + the statement form instead (``use q = Qubit();``). It also removes the requirement + to use the ``set`` keyword for mutable variable assignments. + + :keyword trace_circuit: Enables tracing of circuit during execution. + Passing `True` is required for the `dump_circuit` function to return a circuit. + The `circuit` function is *NOT* affected by this parameter will always generate a circuit. + :return: The Q# interpreter configuration. + :rtype: Config + """ + from ._fs import read_file, list_directory, exists, join, resolve + from ._http import fetch_github + + global _interpreter + global _config + + if isinstance(target_name, str): + target = target_name.split(".")[0].lower() + if target == "ionq" or target == "rigetti": + target_profile = TargetProfile.Base + elif target == "quantinuum": + target_profile = TargetProfile.Adaptive_RI + else: + raise QSharpError( + f'target_name "{target_name}" not recognized. Please set target_profile directly.' + ) + + manifest_contents = None + if project_root is not None: + # Normalize the project path (i.e. fix file separators and remove unnecessary '.' and '..') + project_root = resolve(".", project_root) + qsharp_json = join(project_root, "qsharp.json") + if not exists(qsharp_json): + raise QSharpError( + f"{qsharp_json} not found. qsharp.json should exist at the project root and be a valid JSON file." + ) + + try: + (_, manifest_contents) = read_file(qsharp_json) + except Exception as e: + raise QSharpError( + f"Error reading {qsharp_json}. qsharp.json should exist at the project root and be a valid JSON file." + ) from e + + # Loop through the environment module and remove any dynamically added attributes that represent + # Q# callables or structs. This is necessary to avoid conflicts with the new interpreter instance. + keys_to_remove = [] + for key, val in code.__dict__.items(): + if ( + hasattr(val, "__global_callable") + or hasattr(val, "__qsharp_class") + or isinstance(val, types.ModuleType) + ): + keys_to_remove.append(key) + for key in keys_to_remove: + code.__delattr__(key) + + # Also remove any namespace modules dynamically added to the system. + keys_to_remove = [] + for key in sys.modules: + if key.startswith("qdk.code."): + keys_to_remove.append(key) + for key in keys_to_remove: + sys.modules.__delitem__(key) + + _interpreter = Interpreter( + target_profile, + language_features, + project_root, + read_file, + list_directory, + resolve, + fetch_github, + _make_callable, + _make_class, + trace_circuit, + ) + + _config = Config(target_profile, language_features, manifest_contents, project_root) + # Return the configuration information to provide a hint to the + # language service through the cell output. + return _config + + +def get_interpreter() -> Interpreter: + """ + Returns the Q# interpreter. + + :return: The Q# interpreter. + :rtype: Interpreter + """ + global _interpreter + if _interpreter is None: + init() + assert _interpreter is not None, "Failed to initialize the Q# interpreter." + return _interpreter + + +def get_config() -> Config: + """ + Returns the Q# interpreter configuration. + + :return: The Q# interpreter configuration. + :rtype: Config + """ + global _config + if _config is None: + init() + assert _config is not None, "Failed to initialize the Q# interpreter." + return _config + + +class StateDump: + """ + A state dump returned from the Q# interpreter. + """ + + """ + The number of allocated qubits at the time of the dump. + """ + qubit_count: int + + __inner: dict + __data: StateDumpData + + def __init__(self, data: StateDumpData): + self.__data = data + self.__inner = data.get_dict() + self.qubit_count = data.qubit_count + + def __getitem__(self, index: int) -> complex: + return self.__inner.__getitem__(index) + + def __iter__(self): + return self.__inner.__iter__() + + def __len__(self) -> int: + return len(self.__inner) + + def __repr__(self) -> str: + return self.__data.__repr__() + + def __str__(self) -> str: + return self.__data.__str__() + + def _repr_markdown_(self) -> str: + return self.__data._repr_markdown_() + + def check_eq( + self, state: Union[Dict[int, complex], List[complex]], tolerance: float = 1e-10 + ) -> bool: + """ + Checks if the state dump is equal to the given state. This is not mathematical equality, + as the check ignores global phase. + + :param state: The state to check against, provided either as a dictionary of state indices to complex amplitudes, + or as a list of real amplitudes. + :param tolerance: The tolerance for the check. Defaults to 1e-10. + :return: ``True`` if the state dump is equal to the given state within the given tolerance, ignoring global phase. + :rtype: bool + """ + phase = None + # Convert a dense list of real amplitudes to a dictionary of state indices to complex amplitudes + if isinstance(state, list): + state = {i: val for i, val in enumerate(state)} + # Filter out zero states from the state dump and the given state based on tolerance + state = {k: v for k, v in state.items() if abs(v) > tolerance} + inner_state = {k: v for k, v in self.__inner.items() if abs(v) > tolerance} + if len(state) != len(inner_state): + return False + for key in state: + if key not in inner_state: + return False + if phase is None: + # Calculate the phase based on the first state pair encountered. + # Every pair of states after this must have the same phase for the states to be equivalent. + phase = inner_state[key] / state[key] + elif abs(phase - inner_state[key] / state[key]) > tolerance: + # This pair of states does not have the same phase, + # within tolerance, so the equivalence check fails. + return False + return True + + def as_dense_state(self) -> List[complex]: + """ + Returns the state dump as a dense list of complex amplitudes. This will include zero amplitudes. + + :return: A dense list of complex amplitudes, one per computational basis state. + :rtype: List[complex] + """ + return [self.__inner.get(i, complex(0)) for i in range(2**self.qubit_count)] + + +class ShotResult(TypedDict): + """ + A single result of a shot. + """ + + events: List[Output | StateDump | str] + result: Any + messages: List[str] + matrices: List[Output] + dumps: List[StateDump] + + +def eval( + source: str, + *, + save_events: bool = False, +) -> Any: + """ + Evaluates Q# source code. + + Output is printed to console. + + :param source: The Q# source code to evaluate. + :keyword save_events: If true, all output will be saved and returned. If false, they will be printed. + :return: The value returned by the last statement in the source code, or the saved output if ``save_events`` is true. + :rtype: Any + :raises QSharpError: If there is an error evaluating the source code. + """ + ipython_helper() + + results: ShotResult = { + "events": [], + "result": None, + "messages": [], + "matrices": [], + "dumps": [], + } + + def on_save_events(output: Output) -> None: + # Append the output to the last shot's output list + if output.is_matrix(): + results["events"].append(output) + results["matrices"].append(output) + elif output.is_state_dump(): + dump_data = cast(StateDumpData, output.state_dump()) + state_dump = StateDump(dump_data) + results["events"].append(state_dump) + results["dumps"].append(state_dump) + elif output.is_message(): + stringified = str(output) + results["events"].append(stringified) + results["messages"].append(stringified) + + def callback(output: Output) -> None: + if _in_jupyter: + try: + display(output) + return + except: + # If IPython is not available, fall back to printing the output + pass + print(output, flush=True) + + telemetry_events.on_eval() + start_time = monotonic() + + output = get_interpreter().interpret( + source, on_save_events if save_events else callback + ) + results["result"] = qsharp_value_to_python_value(output) + + durationMs = (monotonic() - start_time) * 1000 + telemetry_events.on_eval_end(durationMs) + + if save_events: + return results + else: + return results["result"] + + +# Helper function that knows how to create a function that invokes a callable. This will be +# used by the underlying native code to create functions for callables on the fly that know +# how to get the currently initialized global interpreter instance. +def _make_callable(callable: GlobalCallable, namespace: List[str], callable_name: str): + module = code + # Create a name that will be used to collect the hierarchy of namespace identifiers if they exist and use that + # to register created modules with the system. + accumulated_namespace = "qdk.code" + accumulated_namespace += "." + for name in namespace: + accumulated_namespace += name + # Use the existing entry, which should already be a module. + if hasattr(module, name): + module = module.__getattribute__(name) + if sys.modules.get(accumulated_namespace) is None: + # This is an existing entry that is not yet registered in sys.modules, so add it. + # This can happen if a callable with the same name as this namespace is already + # defined. + sys.modules[accumulated_namespace] = module + else: + # This namespace entry doesn't exist as a module yet, so create it, add it to the environment, and + # add it to sys.modules so it supports import properly. + new_module = types.ModuleType(accumulated_namespace) + module.__setattr__(name, new_module) + sys.modules[accumulated_namespace] = new_module + module = new_module + accumulated_namespace += "." + + def _callable(*args): + ipython_helper() + + def callback(output: Output) -> None: + if _in_jupyter: + try: + display(output) + return + except: + # If IPython is not available, fall back to printing the output + pass + print(output, flush=True) + + args = python_args_to_interpreter_args(args) + + output = get_interpreter().invoke(callable, args, callback) + return qsharp_value_to_python_value(output) + + # Each callable is annotated so that we know it is auto-generated and can be removed on a re-init of the interpreter. + _callable.__global_callable = callable + + # Add the callable to the module. + if module.__dict__.get(callable_name) is None: + module.__setattr__(callable_name, _callable) + else: + # Preserve any existing attributes on the attribute with the matching name, + # since this could be a collision with an existing namespace/module. + for key, val in module.__dict__.get(callable_name).__dict__.items(): + if key != "__global_callable": + _callable.__dict__[key] = val + module.__setattr__(callable_name, _callable) + + +def qsharp_value_to_python_value(obj): + # Base case: Primitive types + if isinstance(obj, (bool, int, float, complex, str, Pauli, Result)): + return obj + + # Recursive case: Tuple + if isinstance(obj, tuple): + # Special case Value::UNIT maps to None. + if not obj: + return None + return tuple(qsharp_value_to_python_value(elt) for elt in obj) + + # Recursive case: Array + if isinstance(obj, list): + return [qsharp_value_to_python_value(elt) for elt in obj] + + # Recursive case: Callable or Closure + if isinstance(obj, (GlobalCallable, Closure)): + return obj + + # Recursive case: Udt + if isinstance(obj, UdtValue): + class_name = obj.name + fields = [] + args = [] + for name, value_ir in obj.fields: + val = qsharp_value_to_python_value(value_ir) + ty = type(val) + args.append(val) + fields.append((name, ty)) + return make_dataclass(class_name, fields)(*args) + + +def make_class_rec(qsharp_type: TypeIR) -> type: + class_name = qsharp_type.unwrap_udt().name + fields = {} + for field in qsharp_type.unwrap_udt().fields: + ty = None + kind = field[1].kind() + + if kind == TypeKind.Primitive: + prim_kind = field[1].unwrap_primitive() + if prim_kind == PrimitiveKind.Bool: + ty = bool + elif prim_kind == PrimitiveKind.Int: + ty = int + elif prim_kind == PrimitiveKind.Double: + ty = float + elif prim_kind == PrimitiveKind.Complex: + ty = complex + elif prim_kind == PrimitiveKind.String: + ty = str + elif prim_kind == PrimitiveKind.Pauli: + ty = Pauli + elif prim_kind == PrimitiveKind.Result: + ty = Result + else: + raise QSharpError(f"unknown primitive {prim_kind}") + elif kind == TypeKind.Tuple: + # Special case Value::UNIT maps to None. + if not field[1].unwrap_tuple(): + ty = type(None) + else: + ty = tuple + elif kind == TypeKind.Array: + ty = list + elif kind == TypeKind.Udt: + ty = make_class_rec(field[1]) + else: + raise QSharpError(f"unknown type {kind}") + fields[field[0]] = ty + + return make_dataclass( + class_name, + fields, + ) + + +def _make_class(qsharp_type: TypeIR, namespace: List[str], class_name: str): + """ + Helper function to create a python class given a description of it. This will be + used by the underlying native code to create classes on the fly corresponding to + the currently initialized interpreter instance. + """ + + module = code + # Create a name that will be used to collect the hierarchy of namespace identifiers if they exist and use that + # to register created modules with the system. + accumulated_namespace = "qdk.code" + accumulated_namespace += "." + for name in namespace: + accumulated_namespace += name + # Use the existing entry, which should already be a module. + if hasattr(module, name): + module = module.__getattribute__(name) + else: + # This namespace entry doesn't exist as a module yet, so create it, add it to the environment, and + # add it to sys.modules so it supports import properly. + new_module = types.ModuleType(accumulated_namespace) + module.__setattr__(name, new_module) + sys.modules[accumulated_namespace] = new_module + module = new_module + accumulated_namespace += "." + + QSharpClass = make_class_rec(qsharp_type) + + # Each class is annotated so that we know it is auto-generated and can be removed on a re-init of the interpreter. + QSharpClass.__qsharp_class = True + + # Add the class to the module. + module.__setattr__(class_name, QSharpClass) + + +def run( + entry_expr: Union[str, Callable, GlobalCallable, Closure], + shots: int, + *args, + on_result: Optional[Callable[[ShotResult], None]] = None, + save_events: bool = False, + noise: Optional[ + Union[ + Tuple[float, float, float], + PauliNoise, + BitFlipNoise, + PhaseFlipNoise, + DepolarizingNoise, + NoiseConfig, + ] + ] = None, + qubit_loss: Optional[float] = None, + seed: Optional[int] = None, +) -> List[Any]: + """ + Runs the given Q# expression for the given number of shots. + Each shot uses an independent instance of the simulator. + + :param entry_expr: The entry expression. Alternatively, a callable can be provided, + which must be a Q# callable. + :param shots: The number of shots to run. + :param *args: The arguments to pass to the callable, if one is provided. + :param on_result: A callback function that will be called with each result. + :param save_events: If true, the output of each shot will be saved. If false, they will be printed. + :param noise: The noise to use in simulation. + :param qubit_loss: The probability of qubit loss in simulation. + :param seed: The seed to use for the random number generator in simulation, if any. + + :return: A list of results or runtime errors. If ``save_events`` is true, a list of ``ShotResult`` is returned. + :rtype: List[Any] + :raises QSharpError: If there is an error interpreting the input. + :raises ValueError: If the number of shots is less than 1. + """ + ipython_helper() + + if shots < 1: + raise ValueError("The number of shots must be greater than 0.") + + telemetry_events.on_run( + shots, + noise=(noise is not None and noise != (0.0, 0.0, 0.0)), + qubit_loss=(qubit_loss is not None and qubit_loss > 0.0), + ) + start_time = monotonic() + + results: List[ShotResult] = [] + + def print_output(output: Output) -> None: + if _in_jupyter: + try: + display(output) + return + except: + # If IPython is not available, fall back to printing the output + pass + print(output, flush=True) + + def on_save_events(output: Output) -> None: + # Append the output to the last shot's output list + results[-1]["events"].append(output) + if output.is_matrix(): + results[-1]["matrices"].append(output) + elif output.is_state_dump(): + dump_data = cast(StateDumpData, output.state_dump()) + results[-1]["dumps"].append(StateDump(dump_data)) + elif output.is_message(): + results[-1]["messages"].append(str(output)) + + callable = None + run_entry_expr = None + if isinstance(entry_expr, Callable) and hasattr(entry_expr, "__global_callable"): + args = python_args_to_interpreter_args(args) + callable = entry_expr.__global_callable + elif isinstance(entry_expr, (GlobalCallable, Closure)): + args = python_args_to_interpreter_args(args) + callable = entry_expr + else: + assert isinstance(entry_expr, str) + run_entry_expr = entry_expr + + noise_config = None + if isinstance(noise, NoiseConfig): + noise_config = noise + noise = None + + shot_seed = seed + for shot in range(shots): + # We also don't want every shot to return the same results, so we update the seed for + # the next shot with the shot number. This keeps the behavior deterministic if a seed + # was provided. + if seed is not None: + shot_seed = shot + seed + + results.append( + {"result": None, "events": [], "messages": [], "matrices": [], "dumps": []} + ) + run_results = get_interpreter().run( + run_entry_expr, + on_save_events if save_events else print_output, + noise_config, + noise, + qubit_loss, + callable, + args, + shot_seed, + ) + run_results = qsharp_value_to_python_value(run_results) + results[-1]["result"] = run_results + if on_result: + on_result(results[-1]) + # For every shot after the first, treat the entry expression as None to trigger + # a rerun of the last executed expression without paying the cost for any additional + # compilation. + run_entry_expr = None + + durationMs = (monotonic() - start_time) * 1000 + telemetry_events.on_run_end(durationMs, shots) + + if save_events: + return results + else: + return [shot["result"] for shot in results] + + +# Class that wraps generated QIR, which can be used by +# azure-quantum as input data. +# +# This class must implement the QirRepresentable protocol +# that is defined by the azure-quantum package. +# See: https://github.com/microsoft/qdk-python/blob/fcd63c04aa871e49206703bbaa792329ffed13c4/azure-quantum/azure/quantum/target/target.py#L21 +class QirInputData: + # The name of this variable is defined + # by the protocol and must remain unchanged. + _name: str + + def __init__(self, name: str, ll_str: str): + self._name = name + self._ll_str = ll_str + + # The name of this method is defined + # by the protocol and must remain unchanged. + def _repr_qir_(self, **kwargs) -> bytes: + return self._ll_str.encode("utf-8") + + def __str__(self) -> str: + return self._ll_str + + +def compile( + entry_expr: Union[str, Callable, GlobalCallable, Closure], *args +) -> QirInputData: + """ + Compiles the Q# source code into a program that can be submitted to a target. + Either an entry expression or a callable with arguments must be provided. + + :param entry_expr: The Q# expression that will be used as the entrypoint + for the program. Alternatively, a callable can be provided, which must + be a Q# callable. + :param *args: The arguments to pass to the callable, if one is provided. + + :return: The compiled program. Use ``str()`` to get the QIR string. + :rtype: QirInputData + + Example: + + .. code-block:: python + program = qsharp.compile("...") + with open('myfile.ll', 'w') as file: + file.write(str(program)) + """ + ipython_helper() + start = monotonic() + interpreter = get_interpreter() + target_profile = get_config().get_target_profile() + telemetry_events.on_compile(target_profile) + if isinstance(entry_expr, Callable) and hasattr(entry_expr, "__global_callable"): + args = python_args_to_interpreter_args(args) + ll_str = interpreter.qir(callable=entry_expr.__global_callable, args=args) + elif isinstance(entry_expr, (GlobalCallable, Closure)): + args = python_args_to_interpreter_args(args) + ll_str = interpreter.qir(callable=entry_expr, args=args) + else: + assert isinstance(entry_expr, str) + ll_str = interpreter.qir(entry_expr=entry_expr) + res = QirInputData("main", ll_str) + durationMs = (monotonic() - start) * 1000 + telemetry_events.on_compile_end(durationMs, target_profile) + return res + + +def circuit( + entry_expr: Optional[Union[str, Callable, GlobalCallable, Closure]] = None, + *args, + operation: Optional[str] = None, + generation_method: Optional[CircuitGenerationMethod] = None, + max_operations: Optional[int] = None, + source_locations: bool = False, + group_by_scope: bool = True, + prune_classical_qubits: bool = False, +) -> Circuit: + """ + Synthesizes a circuit for a Q# program. Either an entry + expression or an operation must be provided. + + :param entry_expr: An entry expression. Alternatively, a callable can be provided, + which must be a Q# callable. + :type entry_expr: str or Callable + + :param *args: The arguments to pass to the callable, if one is provided. + + :keyword operation: The operation to synthesize. This can be a name of + an operation or a lambda expression. The operation must take only + qubits or arrays of qubits as parameters. + :kwtype operation: str + + :keyword generation_method: The method to use for circuit generation. + :attr:`~qsharp.CircuitGenerationMethod.ClassicalEval` evaluates classical + control flow at circuit generation time. + :attr:`~qsharp.CircuitGenerationMethod.Simulate` runs a full simulation to + trace the circuit. + :attr:`~qsharp.CircuitGenerationMethod.Static` uses partial evaluation and + requires a non-``Unrestricted`` target profile. Defaults to ``None`` which + auto-selects the generation method. + :kwtype generation_method: :class:`~qsharp.CircuitGenerationMethod` + + :keyword max_operations: The maximum number of operations to include in the circuit. + Defaults to ``None`` which means no limit. + :kwtype max_operations: int + + :keyword source_locations: If ``True``, annotates each gate with its source location. + :kwtype source_locations: bool + + :keyword group_by_scope: If ``True``, groups operations by their containing scope, such as function declarations or loop blocks. + :kwtype group_by_scope: bool + + :keyword prune_classical_qubits: If ``True``, removes qubits that are never used in a quantum + gate (e.g. qubits only used as classical controls). + :kwtype prune_classical_qubits: bool + + :return: The synthesized circuit. + :rtype: :class:`~qsharp._native.Circuit` + :raises QSharpError: If there is an error synthesizing the circuit. + """ + ipython_helper() + start = monotonic() + telemetry_events.on_circuit() + config = CircuitConfig( + max_operations=max_operations, + generation_method=generation_method, + source_locations=source_locations, + group_by_scope=group_by_scope, + prune_classical_qubits=prune_classical_qubits, + ) + + if isinstance(entry_expr, Callable) and hasattr(entry_expr, "__global_callable"): + args = python_args_to_interpreter_args(args) + res = get_interpreter().circuit( + config=config, callable=entry_expr.__global_callable, args=args + ) + elif isinstance(entry_expr, (GlobalCallable, Closure)): + args = python_args_to_interpreter_args(args) + res = get_interpreter().circuit(config=config, callable=entry_expr, args=args) + else: + assert entry_expr is None or isinstance(entry_expr, str) + res = get_interpreter().circuit(config, entry_expr, operation=operation) + + durationMs = (monotonic() - start) * 1000 + telemetry_events.on_circuit_end(durationMs) + + return res + + +def estimate( + entry_expr: Union[str, Callable, GlobalCallable, Closure], + params: Optional[Union[Dict[str, Any], List, EstimatorParams]] = None, + *args, +) -> EstimatorResult: + """ + Estimates resources for Q# source code. + Either an entry expression or a callable with arguments must be provided. + + :param entry_expr: The entry expression. Alternatively, a callable can be provided, + which must be a Q# callable. + :param params: The parameters to configure physical estimation. + + :return: The estimated resources. + :rtype: EstimatorResult + """ + + ipython_helper() + + def _coerce_estimator_params( + params: Optional[ + Union[Dict[str, Any], List[Dict[str, Any]], EstimatorParams] + ] = None, + ) -> List[Dict[str, Any]]: + if params is None: + return [{}] + elif isinstance(params, EstimatorParams): + if params.has_items: + return cast(List[Dict[str, Any]], params.as_dict()["items"]) + else: + return [params.as_dict()] + elif isinstance(params, dict): + return [params] + return params + + params = _coerce_estimator_params(params) + param_str = json.dumps(params) + telemetry_events.on_estimate() + start = monotonic() + if isinstance(entry_expr, Callable) and hasattr(entry_expr, "__global_callable"): + args = python_args_to_interpreter_args(args) + res_str = get_interpreter().estimate( + param_str, callable=entry_expr.__global_callable, args=args + ) + elif isinstance(entry_expr, (GlobalCallable, Closure)): + args = python_args_to_interpreter_args(args) + res_str = get_interpreter().estimate(param_str, callable=entry_expr, args=args) + else: + assert isinstance(entry_expr, str) + res_str = get_interpreter().estimate(param_str, entry_expr=entry_expr) + res = json.loads(res_str) + + try: + qubits = res[0]["logicalCounts"]["numQubits"] + except (KeyError, IndexError): + qubits = "unknown" + + durationMs = (monotonic() - start) * 1000 + telemetry_events.on_estimate_end(durationMs, qubits) + return EstimatorResult(res) + + +def logical_counts( + entry_expr: Union[str, Callable, GlobalCallable, Closure], + *args, +) -> LogicalCounts: + """ + Extracts logical resource counts from Q# source code. + Either an entry expression or a callable with arguments must be provided. + + :param entry_expr: The entry expression. Alternatively, a callable can be provided, + which must be a Q# callable. + + :return: Program resources in terms of logical gate counts. + :rtype: LogicalCounts + """ + + ipython_helper() + + if isinstance(entry_expr, Callable) and hasattr(entry_expr, "__global_callable"): + args = python_args_to_interpreter_args(args) + res_dict = get_interpreter().logical_counts( + callable=entry_expr.__global_callable, args=args + ) + elif isinstance(entry_expr, (GlobalCallable, Closure)): + args = python_args_to_interpreter_args(args) + res_dict = get_interpreter().logical_counts(callable=entry_expr, args=args) + else: + assert isinstance(entry_expr, str) + res_dict = get_interpreter().logical_counts(entry_expr=entry_expr) + return LogicalCounts(res_dict) + + +def set_quantum_seed(seed: Optional[int]) -> None: + """ + Sets the seed for the random number generator used for quantum measurements. + This applies to all Q# code executed, compiled, or estimated. + + :param seed: The seed to use for the quantum random number generator. + If None, the seed will be generated from entropy. + """ + get_interpreter().set_quantum_seed(seed) + + +def set_classical_seed(seed: Optional[int]) -> None: + """ + Sets the seed for the random number generator used for standard + library classical random number operations. + This applies to all Q# code executed, compiled, or estimated. + + :param seed: The seed to use for the classical random number generator. + If None, the seed will be generated from entropy. + """ + get_interpreter().set_classical_seed(seed) + + +def dump_machine() -> StateDump: + """ + Returns the sparse state vector of the simulator as a StateDump object. + + :return: The state of the simulator. + :rtype: StateDump + """ + ipython_helper() + return StateDump(get_interpreter().dump_machine()) + + +def dump_circuit() -> Circuit: + """ + Dumps a circuit showing the current state of the simulator. + + This circuit will contain the gates that have been applied + in the simulator up to the current point. + + Requires the interpreter to be initialized with `trace_circuit=True`. + + :return: The current circuit trace. + :rtype: Circuit + :raises QSharpError: If the interpreter was not initialized with ``trace_circuit=True``. + """ + ipython_helper() + return get_interpreter().dump_circuit() diff --git a/source/qdk_package/qdk/_simulation.py b/source/qdk_package/qdk/_simulation.py new file mode 100644 index 0000000000..3d2f8f7399 --- /dev/null +++ b/source/qdk_package/qdk/_simulation.py @@ -0,0 +1,727 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from pathlib import Path +import random +from typing import Callable, Literal, List, Optional, Tuple, TypeAlias, Union +import pyqir +from ._native import ( + QirInstructionId, + QirInstruction, + run_clifford, + run_parallel_shots, + run_adaptive_parallel_shots, + run_cpu_full_state, + NoiseConfig, + GpuContext, + try_create_gpu_adapter, +) +from pyqir import ( + Function, + FunctionType, + PointerType, + Type, + Linkage, +) +from ._qsharp import QirInputData, Result +from typing import TYPE_CHECKING +from ._adaptive_pass import AdaptiveProfilePass, OP_RECORD_OUTPUT + +if TYPE_CHECKING: # This is in the pyi file only + from ._native import GpuShotResults + + +class AggregateGatesPass(pyqir.QirModuleVisitor): + def __init__(self): + super().__init__() + self.gates: List[QirInstruction | Tuple] = [] + self.required_num_qubits = None + self.required_num_results = None + + def _get_value_as_string(self, value: pyqir.Value) -> str: + value = pyqir.extract_byte_string(value) + if value is None: + return "" + value = value.decode("utf-8") + return value + + def run(self, mod: pyqir.Module) -> Tuple[List[QirInstruction | Tuple], int, int]: + errors = mod.verify() + if errors is not None: + raise ValueError(f"Module verification failed: {errors}") + + # verify that the module is base profile + func = next(filter(pyqir.is_entry_point, mod.functions)) + self.required_num_qubits = pyqir.required_num_qubits(func) + self.required_num_results = pyqir.required_num_results(func) + + super().run(mod) + return (self.gates, self.required_num_qubits, self.required_num_results) + + def _on_block(self, block): + if ( + block.terminator + and block.terminator.opcode == pyqir.Opcode.BR + and len(block.terminator.operands) > 1 + ): + raise ValueError( + "simulation of programs with branching control flow is not supported" + ) + super()._on_block(block) + + def _on_call_instr(self, call: pyqir.Call) -> None: + callee_name = call.callee.name + if callee_name == "__quantum__qis__ccx__body": + self.gates.append( + ( + QirInstructionId.CCX, + pyqir.ptr_id(call.args[0]), + pyqir.ptr_id(call.args[1]), + pyqir.ptr_id(call.args[2]), + ) + ) + elif callee_name == "__quantum__qis__cx__body": + self.gates.append( + ( + QirInstructionId.CX, + pyqir.ptr_id(call.args[0]), + pyqir.ptr_id(call.args[1]), + ) + ) + elif callee_name == "__quantum__qis__cy__body": + self.gates.append( + ( + QirInstructionId.CY, + pyqir.ptr_id(call.args[0]), + pyqir.ptr_id(call.args[1]), + ) + ) + elif callee_name == "__quantum__qis__cz__body": + self.gates.append( + ( + QirInstructionId.CZ, + pyqir.ptr_id(call.args[0]), + pyqir.ptr_id(call.args[1]), + ) + ) + elif callee_name == "__quantum__qis__swap__body": + self.gates.append( + ( + QirInstructionId.SWAP, + pyqir.ptr_id(call.args[0]), + pyqir.ptr_id(call.args[1]), + ) + ) + elif callee_name == "__quantum__qis__rx__body": + self.gates.append( + ( + QirInstructionId.RX, + call.args[0].value, + pyqir.ptr_id(call.args[1]), + ) + ) + elif callee_name == "__quantum__qis__rxx__body": + self.gates.append( + ( + QirInstructionId.RXX, + call.args[0].value, + pyqir.ptr_id(call.args[1]), + pyqir.ptr_id(call.args[2]), + ) + ) + elif callee_name == "__quantum__qis__ry__body": + self.gates.append( + ( + QirInstructionId.RY, + call.args[0].value, + pyqir.ptr_id(call.args[1]), + ) + ) + elif callee_name == "__quantum__qis__ryy__body": + self.gates.append( + ( + QirInstructionId.RYY, + call.args[0].value, + pyqir.ptr_id(call.args[1]), + pyqir.ptr_id(call.args[2]), + ) + ) + elif callee_name == "__quantum__qis__rz__body": + self.gates.append( + ( + QirInstructionId.RZ, + call.args[0].value, + pyqir.ptr_id(call.args[1]), + ) + ) + elif callee_name == "__quantum__qis__rzz__body": + self.gates.append( + ( + QirInstructionId.RZZ, + call.args[0].value, + pyqir.ptr_id(call.args[1]), + pyqir.ptr_id(call.args[2]), + ) + ) + elif callee_name == "__quantum__qis__h__body": + self.gates.append((QirInstructionId.H, pyqir.ptr_id(call.args[0]))) + elif callee_name == "__quantum__qis__s__body": + self.gates.append((QirInstructionId.S, pyqir.ptr_id(call.args[0]))) + elif callee_name == "__quantum__qis__s__adj": + self.gates.append((QirInstructionId.SAdj, pyqir.ptr_id(call.args[0]))) + elif callee_name == "__quantum__qis__sx__body": + self.gates.append((QirInstructionId.SX, pyqir.ptr_id(call.args[0]))) + elif callee_name == "__quantum__qis__t__body": + self.gates.append((QirInstructionId.T, pyqir.ptr_id(call.args[0]))) + elif callee_name == "__quantum__qis__t__adj": + self.gates.append((QirInstructionId.TAdj, pyqir.ptr_id(call.args[0]))) + elif callee_name == "__quantum__qis__x__body": + self.gates.append((QirInstructionId.X, pyqir.ptr_id(call.args[0]))) + elif callee_name == "__quantum__qis__y__body": + self.gates.append((QirInstructionId.Y, pyqir.ptr_id(call.args[0]))) + elif callee_name == "__quantum__qis__z__body": + self.gates.append((QirInstructionId.Z, pyqir.ptr_id(call.args[0]))) + elif callee_name == "__quantum__qis__m__body": + self.gates.append( + ( + QirInstructionId.M, + pyqir.ptr_id(call.args[0]), + pyqir.ptr_id(call.args[1]), + ) + ) + elif callee_name == "__quantum__qis__mz__body": + self.gates.append( + ( + QirInstructionId.MZ, + pyqir.ptr_id(call.args[0]), + pyqir.ptr_id(call.args[1]), + ) + ) + elif callee_name == "__quantum__qis__mresetz__body": + self.gates.append( + ( + QirInstructionId.MResetZ, + pyqir.ptr_id(call.args[0]), + pyqir.ptr_id(call.args[1]), + ) + ) + elif callee_name == "__quantum__qis__reset__body": + self.gates.append((QirInstructionId.RESET, pyqir.ptr_id(call.args[0]))) + elif callee_name == "__quantum__qis__move__body": + self.gates.append( + ( + QirInstructionId.Move, + pyqir.ptr_id(call.args[0]), + ) + ) + elif callee_name == "__quantum__rt__result_record_output": + tag = self._get_value_as_string(call.args[1]) + self.gates.append( + ( + QirInstructionId.ResultRecordOutput, + str(pyqir.ptr_id(call.args[0])), + tag, + ) + ) + elif callee_name == "__quantum__rt__tuple_record_output": + tag = self._get_value_as_string(call.args[1]) + self.gates.append( + (QirInstructionId.TupleRecordOutput, str(call.args[0].value), tag) + ) + elif callee_name == "__quantum__rt__array_record_output": + tag = self._get_value_as_string(call.args[1]) + self.gates.append( + (QirInstructionId.ArrayRecordOutput, str(call.args[0].value), tag) + ) + elif ( + callee_name == "__quantum__rt__initialize" + or callee_name == "__quantum__rt__begin_parallel" + or callee_name == "__quantum__rt__end_parallel" + or callee_name == "__quantum__qis__barrier__body" + # We only hit this during noiseless simulations + or "qdk_noise" in call.callee.attributes.func + ): + pass + else: + raise ValueError(f"Unsupported call instruction: {callee_name}") + + +class CorrelatedNoisePass(AggregateGatesPass): + """ + This pass replaces the QIR intrinsics that are in the provided NoiseConfig + by correlated noise instructions that the simulator understands. + """ + + def __init__(self, noise_config: NoiseConfig): + super().__init__() + self.noise_intrinsics_table = noise_config.intrinsics + + def _on_call_instr(self, call: pyqir.Call) -> None: + callee_name = call.callee.name + if callee_name in self.noise_intrinsics_table: + self.gates.append( + ( + QirInstructionId.CorrelatedNoise, + self.noise_intrinsics_table.get_intrinsic_id(callee_name), + [pyqir.ptr_id(arg) for arg in call.args], + ) + ) + elif "qdk_noise" in call.callee.attributes.func: + # If we are running a noisy simulation, we treat + # missing noise intrinsics as an error. + raise ValueError(f"Missing noise intrinsic: {callee_name}") + else: + super()._on_call_instr(call) + + +class GpuCorrelatedNoisePass(AggregateGatesPass): + """ + A special case of the CorrelatedNoisePass that uses data loaded + directly from rust instead of a NoiseConfig object to detect the + correlated noise intrinsics. + """ + + def __init__(self, noise_table: List[Tuple[int, str, int]]): + super().__init__() + self.noise_table = dict() + for table_id, name, _count in noise_table: + self.noise_table[name] = table_id + + def _on_call_instr(self, call: pyqir.Call) -> None: + callee_name = call.callee.name + if callee_name in self.noise_table: + self.gates.append( + ( + QirInstructionId.CorrelatedNoise, + int(self.noise_table[callee_name]), # Noise table ID + [pyqir.ptr_id(qubit) for qubit in call.args], # qubit args + ) + ) + elif "qdk_noise" in call.callee.attributes.func: + # If we are running a noisy simulation, we treat + # missing noise intrinsics as an error. + raise ValueError(f"Missing noise intrinsic: {callee_name}") + else: + super()._on_call_instr(call) + + +class OutputRecordingPass(pyqir.QirModuleVisitor): + _output_str = "" + _closers = [] + _counters = [] + + def process_output(self, bitstring: str): + return eval( + self._output_str, + { + "o": [ + Result.Zero if x == "0" else Result.One if x == "1" else Result.Loss + for x in bitstring + ] + }, + ) + + def _on_function(self, function): + if pyqir.is_entry_point(function): + super()._on_function(function) + while len(self._closers) > 0: + self._output_str += self._closers.pop() + self._counters.pop() + + def _on_rt_result_record_output(self, call, result, target): + self._output_str += f"o[{pyqir.ptr_id(result)}]" + while len(self._counters) > 0: + self._output_str += "," + self._counters[-1] -= 1 + if self._counters[-1] == 0: + self._output_str += self._closers[-1] + self._closers.pop() + self._counters.pop() + else: + break + + def _on_rt_array_record_output(self, call, value, target): + self._output_str += "[" + self._closers.append("]") + # if len(self._counters) > 0: + # self._counters[-1] -= 1 + self._counters.append(value.value) + + def _on_rt_tuple_record_output(self, call, value, target): + self._output_str += "(" + self._closers.append(")") + # if len(self._counters) > 0: + # self._counters[-1] -= 1 + self._counters.append(value.value) + + +class DecomposeCcxPass(pyqir.QirModuleVisitor): + + h_func: Function + t_func: Function + tadj_func: Function + cz_func: Function + + def __init__(self): + super().__init__() + + def _on_module(self, module): + void = Type.void(module.context) + qubit_ty = PointerType(Type.void(module.context)) + + # Find or create all the needed functions. + for func in module.functions: + match func.name: + case "__quantum__qis__h__body": + self.h_func = func + case "__quantum__qis__t__body": + self.t_func = func + case "__quantum__qis__t__adj": + self.tadj_func = func + case "__quantum__qis__cz__body": + self.cz_func = func + if not hasattr(self, "h_func"): + self.h_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__h__body", + module, + ) + if not hasattr(self, "t_func"): + self.t_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__t__body", + module, + ) + if not hasattr(self, "tadj_func"): + self.tadj_func = Function( + FunctionType(void, [qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__t__adj", + module, + ) + if not hasattr(self, "cz_func"): + self.cz_func = Function( + FunctionType(void, [qubit_ty, qubit_ty]), + Linkage.EXTERNAL, + "__quantum__qis__cz__body", + module, + ) + super()._on_module(module) + + def _on_qis_ccx(self, call, ctrl1, ctrl2, target): + self.builder.insert_before(call) + self.builder.call(self.h_func, [target]) + self.builder.call(self.tadj_func, [ctrl1]) + self.builder.call(self.tadj_func, [ctrl2]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.cz_func, [target, ctrl1]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.t_func, [ctrl1]) + self.builder.call(self.h_func, [target]) + self.builder.call(self.cz_func, [ctrl2, target]) + self.builder.call(self.h_func, [target]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.cz_func, [ctrl2, ctrl1]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.t_func, [target]) + self.builder.call(self.tadj_func, [ctrl1]) + self.builder.call(self.h_func, [target]) + self.builder.call(self.cz_func, [ctrl2, target]) + self.builder.call(self.h_func, [target]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.cz_func, [target, ctrl1]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.tadj_func, [target]) + self.builder.call(self.t_func, [ctrl1]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.cz_func, [ctrl2, ctrl1]) + self.builder.call(self.h_func, [ctrl1]) + self.builder.call(self.h_func, [target]) + call.erase() + + +Simulator: TypeAlias = Callable[ + [List[QirInstruction], int, int, int, NoiseConfig, int], str +] + + +def preprocess_simulation_input( + input: Union[QirInputData, str, bytes], + shots: Optional[int] = 1, + noise: Optional[NoiseConfig] = None, + seed: Optional[int] = None, +) -> tuple[pyqir.Module, int, Optional[NoiseConfig], int]: + if shots is None: + shots = 1 + # If no seed specified, generate a random u32 to use + if seed is None: + seed = random.randint(0, 2**32 - 1) + if isinstance(noise, tuple): + raise ValueError( + "Specifying Pauli noise via a tuple is not supported. Use a NoiseConfig instead." + ) + + context = pyqir.Context() + if isinstance(input, QirInputData): + mod = pyqir.Module.from_ir(context, str(input)) + elif isinstance(input, str): + mod = pyqir.Module.from_ir(context, input) + else: + mod = pyqir.Module.from_bitcode(context, input) + + return (mod, shots, noise, seed) + + +def is_adaptive(mod: pyqir.Module) -> bool: + """Check if the QIR module uses the Adaptive Profile.""" + entry = next(filter(pyqir.is_entry_point, mod.functions), None) + if entry is None: + return False + func_attrs = entry.attributes.func + if "qir_profiles" not in func_attrs: + return False + return func_attrs["qir_profiles"].string_value == "adaptive_profile" + + +def run_qir_clifford( + input: Union[QirInputData, str, bytes], + shots: Optional[int] = 1, + noise: Optional[NoiseConfig] = None, + seed: Optional[int] = None, +) -> List: + (mod, shots, noise, seed) = preprocess_simulation_input(input, shots, noise, seed) + if noise is None: + (gates, num_qubits, num_results) = AggregateGatesPass().run(mod) + else: + (gates, num_qubits, num_results) = CorrelatedNoisePass(noise).run(mod) + recorder = OutputRecordingPass() + recorder.run(mod) + + return list( + map( + recorder.process_output, + run_clifford(gates, num_qubits, num_results, shots, noise, seed), + ) + ) + + +def run_qir_cpu( + input: Union[QirInputData, str, bytes], + shots: Optional[int] = 1, + noise: Optional[NoiseConfig] = None, + seed: Optional[int] = None, +) -> List: + (mod, shots, noise, seed) = preprocess_simulation_input(input, shots, noise, seed) + DecomposeCcxPass().run(mod) + if noise is None: + (gates, num_qubits, num_results) = AggregateGatesPass().run(mod) + else: + (gates, num_qubits, num_results) = CorrelatedNoisePass(noise).run(mod) + recorder = OutputRecordingPass() + recorder.run(mod) + + return list( + map( + recorder.process_output, + run_cpu_full_state(gates, num_qubits, num_results, shots, noise, seed), + ) + ) + + +def str_to_result(result: str): + match result: + case "0": + return Result.Zero + case "1": + return Result.One + case "L": + return Result.Loss + case _: + raise ValueError(f"Invalid result {result}") + + +def run_qir_gpu( + input: Union[QirInputData, str, bytes], + shots: Optional[int] = 1, + noise: Optional[NoiseConfig] = None, + seed: Optional[int] = None, +) -> List: + (mod, shots, noise, seed) = preprocess_simulation_input(input, shots, noise, seed) + # Ccx is not support in the GPU simulator, decompose it + DecomposeCcxPass().run(mod) + if is_adaptive(mod): + program = AdaptiveProfilePass().run(mod, noise) + results = run_adaptive_parallel_shots(program.as_dict(), shots, noise, seed) + + # Extract recorded output result indices from the bytecode. + # OP_RECORD_OUTPUT with aux1=0 is result_record_output where + # src0 is the result index in the results buffer. + recorded_result_indices = [] + for ins in program.instructions: + if (ins.opcode & 0xFF) == OP_RECORD_OUTPUT and ins.aux1 == 0: + recorded_result_indices.append(ins.src0) + # Filter shot_results to only include recorded output indices + filtered = [] + for s in results: + filtered.append([str_to_result(s[i]) for i in recorded_result_indices]) + return filtered + else: + if noise is None: + (gates, num_qubits, num_results) = AggregateGatesPass().run(mod) + else: + (gates, num_qubits, num_results) = CorrelatedNoisePass(noise).run(mod) + recorder = OutputRecordingPass() + recorder.run(mod) + return list( + map( + recorder.process_output, + run_parallel_shots(gates, shots, num_qubits, num_results, noise, seed), + ) + ) + + +def prepare_qir_with_correlated_noise( + input: Union[QirInputData, str, bytes], + noise_tables: List[Tuple[int, str, int]], +) -> Tuple[List[QirInstruction], int, int]: + # Turn the input into a QIR module + (mod, _, _, _) = preprocess_simulation_input(input, None, None, None) + + # Ccx is not support in the GPU simulator, decompose it + DecomposeCcxPass().run(mod) + + # Extract the gates including correlated noise instructions + (gates, required_num_qubits, required_num_results) = GpuCorrelatedNoisePass( + noise_tables + ).run(mod) + + return (gates, required_num_qubits, required_num_results) + + +class GpuSimulator: + """ + Represents a GPU-based QIR simulator. This is a 'full state' simulator that can simulate + quantum programs, including non-Clifford gates, up to a limit of 27 qubits. + """ + + def __init__(self): + self.gpu_context = GpuContext() + self._is_adaptive = False + self._recorded_result_indices = [] + self.tables = None + + def load_noise_tables( + self, + noise_dir: str, + ): + """ + Loads noise tables from the specified directory path. For each .csv file found in the directory, + the noise table is loaded and associated with a unique identifier. The name of the file (without the .csv extension) + is used as the label for the noise table, which should match the QIR instruction that will apply noise using this table. + + If testing various noise models, you may load new noise models at any time by calling this method again + with a different directory path. Previously loaded noise tables will be replaced. The program currently loaded + into the simulator (if any) will remain loaded, but any subsequent calls to `run_shots` will use the newly loaded noise tables. + + Each line of the table should be of the format: "IXYZ,1.345e-4" where IXYZ is a string of Pauli operators + representing the error on each qubit (Z applying to the first qubit argument, Y to the second, etc.), and the second value + is the corresponding error probability for that specific Pauli string. + + Blank lines, lines starting with #, or lines that start with the string "pauli" (i.e., a column header) are ignored. + """ + self.tables = self.gpu_context.load_noise_tables(noise_dir) + + def set_program(self, input: Union[QirInputData, str, bytes]): + """ + Load the QIR program into the GPU simulator, preparing it for execution. You may load and run + multiple programs sequentially by calling this method multiple times before calling `run_shots` + without needing to create a new simulator instance or reloading noise tables. + """ + # Parse the QIR module to detect profile + (mod, _, _, _) = preprocess_simulation_input(input, None, None, None) + if is_adaptive(mod): + self._is_adaptive = True + # Build noise_intrinsics dict from loaded noise tables (if any) + noise_intrinsics = None + if self.tables is not None: + noise_intrinsics = {name: table_id for table_id, name, _ in self.tables} + program = AdaptiveProfilePass().run(mod, noise_intrinsics=noise_intrinsics) + self.gpu_context.set_adaptive_program(program.as_dict()) + + # Extract recorded output result indices from the bytecode. + # OP_RECORD_OUTPUT with aux1=0 is result_record_output where + # src0 is the result index in the results buffer. + self._recorded_result_indices = [] + for instr in program.instructions: + if instr.opcode & 0xFF == OP_RECORD_OUTPUT and instr.aux1 == 0: + self._recorded_result_indices.append(instr.src0) + else: + (self.gates, self.required_num_qubits, self.required_num_results) = ( + prepare_qir_with_correlated_noise( + input, self.tables if not self.tables is None else [] + ) + ) + self.gpu_context.set_program( + self.gates, self.required_num_qubits, self.required_num_results + ) + + def run_shots(self, shots: int, seed: Optional[int] = None) -> "GpuShotResults": + """ + Run the loaded QIR program for the specified number of shots, using an optional seed for reproducibility. + If noise is to be applied, ensure that noise has been loaded prior to running shots. + """ + seed = seed if seed is not None else random.randint(0, 2**32 - 1) + if self._is_adaptive: + results = self.gpu_context.run_adaptive_shots(shots, seed=seed) + # Filter shot_results to only include recorded output indices + if self._recorded_result_indices: + indices = self._recorded_result_indices + filtered = [] + for s in results["shot_results"]: + filtered.append("".join(s[i] for i in indices)) + results["shot_results"] = filtered + return results + return self.gpu_context.run_shots(shots, seed=seed) + + +def run_qir( + input: Union[QirInputData, str, bytes], + shots: Optional[int] = 1, + noise: Optional[NoiseConfig] = None, + seed: Optional[int] = None, + type: Optional[Literal["clifford", "cpu", "gpu"]] = None, +) -> List: + """ + Simulate the given QIR source. + + :param input: The QIR source to simulate. + :param type: The type of simulator to use. + Use ``"clifford"`` if your QIR only contains Clifford gates and measurements. + Use ``"gpu"`` if you have a GPU available in your system. + Use ``"cpu"`` as a fallback option if you don't have a GPU in your system. + If ``None`` (default), the GPU simulator will be tried first, falling back to + CPU if a suitable GPU device could not be located. + :param shots: The number of shots to run. + :param noise: A noise model to use in the simulation. + :param seed: A seed for reproducibility. + :return: A list of measurement results, in the order they happened during the simulation. + :rtype: List + """ + if type is None: + try: + try_create_gpu_adapter() + type = "gpu" + except OSError: + type = "cpu" + + match type: + case "clifford": + return run_qir_clifford(input, shots, noise, seed) + case "cpu": + return run_qir_cpu(input, shots, noise, seed) + case "gpu": + return run_qir_gpu(input, shots, noise, seed) + case _: + raise ValueError(f"Invalid simulator type: {type}") diff --git a/source/qdk_package/qdk/applications/__init__.py b/source/qdk_package/qdk/applications/__init__.py index ef21e907f9..59e481eb93 100644 --- a/source/qdk_package/qdk/applications/__init__.py +++ b/source/qdk_package/qdk/applications/__init__.py @@ -1,25 +1,2 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. - -# flake8: noqa F403 -# pyright: ignore[reportWildcardImportFromLibrary] - -"""Quantum applications for the Q# ecosystem. - -This module re-exports all public symbols from [qsharp.applications](:mod:`qsharp.applications`), -making them available under the ``qdk.applications`` namespace. - -Requires the ``applications`` extra: ``pip install "qdk[applications]"``. - -Example: - - from qdk.applications import QSharpApplication -""" - -try: - # Re-export the top-level qsharp.applications names. - from qsharp.applications import * -except Exception as ex: - raise ImportError( - "qdk.applications requires the applications extras. Install with 'pip install \"qdk[applications]\"'." - ) from ex diff --git a/source/qdk_package/qdk/applications/magnets.py b/source/qdk_package/qdk/applications/magnets.py deleted file mode 100644 index 76f7cf94fb..0000000000 --- a/source/qdk_package/qdk/applications/magnets.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -# flake8: noqa F403 -# pyright: ignore[reportWildcardImportFromLibrary] - -"""Magnetic system applications for the Q# ecosystem. - -This module re-exports all public symbols from [qsharp.applications.magnets](:mod:`qsharp.applications.magnets`), -making them available under the ``qdk.applications.magnets`` namespace. It -provides classes for modeling and simulating magnetic systems such as the Ising -model using quantum algorithms. - -Requires the ``applications`` extra: ``pip install "qdk[applications]"``. - -Example: - - from qdk.applications.magnets import IsingModel -""" - -try: - # Re-export the top-level qsharp.applications.magnets names. - from qsharp.applications.magnets import * -except Exception as ex: - raise ImportError( - "qdk.applications.magnets requires the applications extras. Install with 'pip install \"qdk[applications]\"'." - ) from ex diff --git a/source/qdk_package/qdk/applications/magnets/__init__.py b/source/qdk_package/qdk/applications/magnets/__init__.py new file mode 100644 index 0000000000..56c536659e --- /dev/null +++ b/source/qdk_package/qdk/applications/magnets/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +# flake8: noqa F403 +# pyright: ignore[reportWildcardImportFromLibrary] + +"""Magnets application module. + +Re-exports from the submodules.""" + +from .geometry import * +from .models import * +from .trotter import * +from .utilities import * diff --git a/source/qdk_package/qdk/applications/magnets/geometry/__init__.py b/source/qdk_package/qdk/applications/magnets/geometry/__init__.py new file mode 100644 index 0000000000..4a7a380f86 --- /dev/null +++ b/source/qdk_package/qdk/applications/magnets/geometry/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Geometry module for representing quantum system topologies. + +This module provides hypergraph data structures for representing the +geometric structure of quantum systems, including lattice topologies +and interaction graphs. +""" + +from .complete import CompleteBipartiteGraph, CompleteGraph +from .lattice1d import Chain1D, Ring1D +from .lattice2d import Patch2D, Torus2D + +__all__ = [ + "CompleteBipartiteGraph", + "CompleteGraph", + "Chain1D", + "Ring1D", + "Patch2D", + "Torus2D", +] diff --git a/source/qdk_package/qdk/applications/magnets/geometry/complete.py b/source/qdk_package/qdk/applications/magnets/geometry/complete.py new file mode 100644 index 0000000000..6c1ce66176 --- /dev/null +++ b/source/qdk_package/qdk/applications/magnets/geometry/complete.py @@ -0,0 +1,150 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Complete graph geometries for quantum simulations. + +This module provides classes for representing complete graphs and complete +bipartite graphs as hypergraphs. These structures are useful for quantum +systems with all-to-all or bipartite all-to-all interactions. +""" + +from ..utilities import ( + Hyperedge, + Hypergraph, + HypergraphEdgeColoring, +) + + +class CompleteGraph(Hypergraph): + """A complete graph where every vertex is connected to every other vertex. + + In a complete graph K_n, there are n vertices and n(n-1)/2 edges, + with each pair of distinct vertices connected by exactly one edge. + + Attributes: + n: Number of vertices in the graph. + + Example: + + .. code-block:: python + >>> graph = CompleteGraph(4) + >>> graph.nvertices + 4 + >>> graph.nedges + 6 + """ + + def __init__(self, n: int, self_loops: bool = False) -> None: + """Initialize a complete graph. + + Args: + n: Number of vertices in the graph. + self_loops: If True, include self-loop edges on each vertex + for single-site terms. + """ + if self_loops: + _edges = [Hyperedge([i]) for i in range(n)] + else: + _edges = [] + + # Add all pairs of vertices + for i in range(n): + for j in range(i + 1, n): + _edges.append(Hyperedge([i, j])) + super().__init__(_edges) + + self.n = n + + def edge_coloring(self) -> HypergraphEdgeColoring: + """Compute edge coloring for this complete graph.""" + coloring = HypergraphEdgeColoring(self) + for edge in self.edges(): + if len(edge.vertices) == 1: + coloring.add_edge(edge, -1) + else: + if self.n % 2 == 0: + i, j = edge.vertices + m = self.n - 1 + if j == m: + coloring.add_edge(edge, i) + elif (j - i) % 2 == 0: + coloring.add_edge(edge, (j - i) // 2) + else: + coloring.add_edge(edge, (j - i + m) // 2) + else: + m = self.n + i, j = edge.vertices + if (j - i) % 2 == 0: + coloring.add_edge(edge, (j - i) // 2) + else: + coloring.add_edge(edge, (j - i + m) // 2) + return coloring + + +class CompleteBipartiteGraph(Hypergraph): + """A complete bipartite graph with two vertex sets. + + In a complete bipartite graph K_{m,n} (m <= n), there are m + n + vertices partitioned into two sets of sizes m and n. Every vertex + in the first set is connected to every vertex in the second set, + giving m * n edges total. + + Vertices 0 to m-1 form the first set, and vertices m to m+n-1 + form the second set. + + Attributes: + m: Number of vertices in the first set. + n: Number of vertices in the second set. + + Requires: + m <= n + + Example: + + .. code-block:: python + >>> graph = CompleteBipartiteGraph(2, 3) + >>> graph.nvertices + 5 + >>> graph.nedges + 6 + """ + + def __init__(self, m: int, n: int, self_loops: bool = False) -> None: + """Initialize a complete bipartite graph. + + Args: + m: Number of vertices in the first set (vertices 0 to m-1). + n: Number of vertices in the second set (vertices m to m+n-1). + self_loops: If True, include self-loop edges on each vertex + for single-site terms. + """ + assert m <= n, "Require m <= n for CompleteBipartiteGraph." + total_vertices = m + n + + if self_loops: + _edges = [Hyperedge([i]) for i in range(total_vertices)] + + else: + _edges = [] + + # Connect every vertex in first set to every vertex in second set + for i in range(m): + for j in range(m, m + n): + _edges.append(Hyperedge([i, j])) + super().__init__(_edges) + + self.m = m + self.n = n + + def edge_coloring(self) -> HypergraphEdgeColoring: + """Compute edge coloring for this complete bipartite graph.""" + coloring = HypergraphEdgeColoring(self) + m = self.m + n = self.n + for edge in self.edges(): + if len(edge.vertices) == 1: + coloring.add_edge(edge, -1) + else: + i, j = edge.vertices + coloring.add_edge(edge, (i + j - m) % n) + return coloring diff --git a/source/qdk_package/qdk/applications/magnets/geometry/lattice1d.py b/source/qdk_package/qdk/applications/magnets/geometry/lattice1d.py new file mode 100644 index 0000000000..c7ea60fb60 --- /dev/null +++ b/source/qdk_package/qdk/applications/magnets/geometry/lattice1d.py @@ -0,0 +1,123 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""One-dimensional lattice geometries for quantum simulations. + +This module provides classes for representing 1D lattice structures as +hypergraphs. These lattices are commonly used in quantum spin chain +simulations and other one-dimensional quantum systems. +""" + +from ..utilities import ( + Hyperedge, + Hypergraph, + HypergraphEdgeColoring, +) + + +class Chain1D(Hypergraph): + """A one-dimensional open chain lattice. + + Represents a linear chain of vertices with nearest-neighbor edges. + The chain has open boundary conditions, meaning the first and last + vertices are not connected. + + Attributes: + length: Number of vertices in the chain. + + Example: + + .. code-block:: python + >>> chain = Chain1D(4) + >>> chain.nvertices + 4 + >>> chain.nedges + 3 + """ + + def __init__(self, length: int, self_loops: bool = False) -> None: + """Initialize a 1D chain lattice. + + Args: + length: Number of vertices in the chain. + self_loops: If True, include self-loop edges on each vertex + for single-site terms. + """ + if self_loops: + _edges = [Hyperedge([i]) for i in range(length)] + + else: + _edges = [] + + for i in range(length - 1): + _edges.append(Hyperedge([i, i + 1])) + + super().__init__(_edges) + self.length = length + + def edge_coloring(self) -> HypergraphEdgeColoring: + """Compute a valid edge coloring for this chain.""" + coloring = HypergraphEdgeColoring(self) + for edge in self.edges(): + if len(edge.vertices) == 1: + coloring.add_edge(edge, -1) + else: + i, j = edge.vertices + color = min(i, j) % 2 + coloring.add_edge(edge, color) + return coloring + + +class Ring1D(Hypergraph): + """A one-dimensional ring (periodic chain) lattice. + + Represents a circular chain of vertices with nearest-neighbor edges. + The ring has periodic boundary conditions, meaning the first and last + vertices are connected. + + Attributes: + length: Number of vertices in the ring. + + Example: + + .. code-block:: python + >>> ring = Ring1D(4) + >>> ring.nvertices + 4 + >>> ring.nedges + 4 + """ + + def __init__(self, length: int, self_loops: bool = False) -> None: + """Initialize a 1D ring lattice. + + Args: + length: Number of vertices in the ring. + self_loops: If True, include self-loop edges on each vertex + for single-site terms. + """ + if self_loops: + _edges = [Hyperedge([i]) for i in range(length)] + else: + _edges = [] + + for i in range(length): + _edges.append(Hyperedge([i, (i + 1) % length])) + super().__init__(_edges) + + self.length = length + + def edge_coloring(self) -> HypergraphEdgeColoring: + """Compute a valid edge coloring for this ring.""" + coloring = HypergraphEdgeColoring(self) + for edge in self.edges(): + if len(edge.vertices) == 1: + coloring.add_edge(edge, -1) + else: + i, j = edge.vertices + if {i, j} == {0, self.length - 1}: + color = (self.length % 2) + 1 + else: + color = min(i, j) % 2 + coloring.add_edge(edge, color) + return coloring diff --git a/source/qdk_package/qdk/applications/magnets/geometry/lattice2d.py b/source/qdk_package/qdk/applications/magnets/geometry/lattice2d.py new file mode 100644 index 0000000000..6c75d12c66 --- /dev/null +++ b/source/qdk_package/qdk/applications/magnets/geometry/lattice2d.py @@ -0,0 +1,187 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Two-dimensional lattice geometries for quantum simulations. + +This module provides classes for representing 2D lattice structures as +hypergraphs. These lattices are commonly used in quantum spin system +simulations and other two-dimensional quantum systems. +""" + +from ..utilities import ( + Hyperedge, + Hypergraph, + HypergraphEdgeColoring, +) + + +class Patch2D(Hypergraph): + """A two-dimensional open rectangular lattice. + + Represents a rectangular grid of vertices with nearest-neighbor edges. + The patch has open boundary conditions, meaning edges do not wrap around. + + Vertices are indexed in row-major order: vertex (x, y) has index y * width + x. + + Attributes: + width: Number of vertices in the horizontal direction. + height: Number of vertices in the vertical direction. + + Example: + + .. code-block:: python + >>> patch = Patch2D(3, 2) + >>> str(patch) + '3x2 lattice patch with 6 vertices and 7 edges' + """ + + def __init__(self, width: int, height: int, self_loops: bool = False) -> None: + """Initialize a 2D patch lattice. + + Args: + width: Number of vertices in the horizontal direction. + height: Number of vertices in the vertical direction. + self_loops: If True, include self-loop edges on each vertex + for single-site terms. + """ + self.width = width + self.height = height + + if self_loops: + _edges = [Hyperedge([i]) for i in range(width * height)] + else: + _edges = [] + + # Horizontal edges (connecting (x, y) to (x+1, y)) + for y in range(height): + for x in range(width - 1): + _edges.append(Hyperedge([self._index(x, y), self._index(x + 1, y)])) + + # Vertical edges (connecting (x, y) to (x, y+1)) + for y in range(height - 1): + for x in range(width): + _edges.append(Hyperedge([self._index(x, y), self._index(x, y + 1)])) + super().__init__(_edges) + + def _index(self, x: int, y: int) -> int: + """Convert (x, y) coordinates to vertex index.""" + return y * self.width + x + + def __str__(self) -> str: + """Return the summary string ``"{width}x{height} lattice patch with {nvertices} vertices and {nedges} edges"``.""" + return f"{self.width}x{self.height} lattice patch with {self.nvertices} vertices and {self.nedges} edges" + + def __repr__(self) -> str: + """Return a string representation of the Patch2D geometry.""" + return f"Patch2D(width={self.width}, height={self.height})" + + def edge_coloring(self) -> HypergraphEdgeColoring: + """Compute edge coloring for this 2D patch.""" + coloring = HypergraphEdgeColoring(self) + for edge in self.edges(): + if len(edge.vertices) == 1: + coloring.add_edge(edge, -1) + continue + + u, v = edge.vertices + x_u, y_u = u % self.width, u // self.width + x_v, y_v = v % self.width, v // self.width + + if y_u == y_v: + color = 0 if min(x_u, x_v) % 2 == 0 else 1 + else: + color = 2 if min(y_u, y_v) % 2 == 0 else 3 + coloring.add_edge(edge, color) + return coloring + + +class Torus2D(Hypergraph): + """A two-dimensional toroidal (periodic) lattice. + + Represents a rectangular grid of vertices with nearest-neighbor edges + and periodic boundary conditions in both directions. The topology is + that of a torus. + + Vertices are indexed in row-major order: vertex (x, y) has index y * width + x. + + Attributes: + width: Number of vertices in the horizontal direction. + height: Number of vertices in the vertical direction. + + Example: + + .. code-block:: python + >>> torus = Torus2D(3, 2) + >>> str(torus) + '3x2 lattice torus with 6 vertices and 12 edges' + """ + + def __init__(self, width: int, height: int, self_loops: bool = False) -> None: + """Initialize a 2D torus lattice. + + Args: + width: Number of vertices in the horizontal direction. + height: Number of vertices in the vertical direction. + self_loops: If True, include self-loop edges on each vertex + for single-site terms. + """ + self.width = width + self.height = height + + if self_loops: + _edges = [Hyperedge([i]) for i in range(width * height)] + else: + _edges = [] + + # Horizontal edges (connecting (x, y) to ((x+1) % width, y)) + for y in range(height): + for x in range(width): + _edges.append( + Hyperedge([self._index(x, y), self._index((x + 1) % width, y)]) + ) + + # Vertical edges (connecting (x, y) to (x, (y+1) % height)) + for y in range(height): + for x in range(width): + _edges.append( + Hyperedge([self._index(x, y), self._index(x, (y + 1) % height)]) + ) + + super().__init__(_edges) + + def _index(self, x: int, y: int) -> int: + """Convert (x, y) coordinates to vertex index.""" + return y * self.width + x + + def __str__(self) -> str: + """Return the summary string ``"{width}x{height} lattice torus with {nvertices} vertices and {nedges} edges"``.""" + return f"{self.width}x{self.height} lattice torus with {self.nvertices} vertices and {self.nedges} edges" + + def __repr__(self) -> str: + """Return a string representation of the Torus2D geometry.""" + return f"Torus2D(width={self.width}, height={self.height})" + + def edge_coloring(self) -> HypergraphEdgeColoring: + """Compute edge coloring for this 2D torus.""" + coloring = HypergraphEdgeColoring(self) + for edge in self.edges(): + if len(edge.vertices) == 1: + coloring.add_edge(edge, -1) + continue + + u, v = edge.vertices + x_u, y_u = u % self.width, u // self.width + x_v, y_v = v % self.width, v // self.width + + if y_u == y_v: + if {x_u, x_v} == {0, self.width - 1}: + color = 1 if self.width % 2 == 0 else 4 + else: + color = 0 if min(x_u, x_v) % 2 == 0 else 1 + else: + if {y_u, y_v} == {0, self.height - 1}: + color = 3 if self.height % 2 == 0 else 5 + else: + color = 2 if min(y_u, y_v) % 2 == 0 else 3 + coloring.add_edge(edge, color) + return coloring diff --git a/source/qdk_package/qdk/applications/magnets/models/__init__.py b/source/qdk_package/qdk/applications/magnets/models/__init__.py new file mode 100644 index 0000000000..67c7f569ef --- /dev/null +++ b/source/qdk_package/qdk/applications/magnets/models/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Models module for quantum spin models. + +This module provides classes for representing quantum spin models +as Hamiltonians built from Pauli operators. +""" + +from .model import IsingModel, Model, HeisenbergModel + +__all__ = ["Model", "IsingModel", "HeisenbergModel"] diff --git a/source/qdk_package/qdk/applications/magnets/models/model.py b/source/qdk_package/qdk/applications/magnets/models/model.py new file mode 100644 index 0000000000..754d6741c1 --- /dev/null +++ b/source/qdk_package/qdk/applications/magnets/models/model.py @@ -0,0 +1,230 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +# pyright: reportPrivateImportUsage=false + +from collections.abc import Sequence +from typing import Optional + +from ..utilities import ( + Hyperedge, + Hypergraph, + HypergraphEdgeColoring, + PauliString, +) + +"""Base Model class for quantum spin models. + +This module provides the base class for representing quantum spin models +as Hamiltonians. The Model class integrates with hypergraph geometries +to define interaction topologies and stores coefficients for each edge. +""" + + +class Model: + """Base class for quantum spin models. + + This class represents a quantum spin Hamiltonian defined on a hypergraph + geometry. The Hamiltonian is characterized by: + + - Ops: A list of PauliStrings (one entry per interaction term) + - Terms: Groupings of operator indices for Trotterization or parallel execution + + The model is built on a hypergraph geometry that defines which qubits + interact with each other. + + Attributes: + geometry: The Hypergraph defining the interaction topology. + + Example: + + .. code-block:: python + >>> from qsharp.magnets.geometry import Chain1D + >>> geometry = Chain1D(4) + >>> model = Model(geometry) + >>> model.set_coefficient((0, 1), 1.5) + >>> model.set_pauli_string((0, 1), PauliString.from_qubits((0, 1), "ZZ")) + >>> model.get_coefficient((0, 1)) + 1.5 + """ + + def __init__(self, geometry: Hypergraph): + """Initialize the Model. + + Creates a quantum spin model on the given geometry. + + The model stores operators lazily in ``_ops`` as interaction operators + are defined. Noncommuting collections of operators are collected in + ``_terms`` that stores the indices of its interaction operators. This + list of arrays seperate terms into parallizable groups by color. It is + initialized as one empty term group. + + Args: + geometry: Hypergraph defining the interaction topology. The number + of vertices determines the number of qubits in the model. + """ + self.geometry: Hypergraph = geometry + self._qubits: set[int] = set() + self._ops: list[PauliString] = [] + for edge in geometry.edges(): + self._qubits.update(edge.vertices) + self._terms: dict[int, dict[int, list[int]]] = {} + + def add_interaction( + self, + edge: Hyperedge, + pauli_string: Sequence[int | str] | str, + coefficient: complex = 1.0, + term: Optional[int] = None, + color: int = 0, + ) -> None: + """Add an interaction term to the model. + + Args: + edge: The Hyperedge representing the qubits involved in the interaction. + pauli_string: The PauliString operator for this interaction. + coefficient: The complex coefficient multiplying this term (default 1.0). + """ + if edge not in self.geometry.edges(): + raise ValueError("Edge is not part of the model geometry.") + s = PauliString.from_qubits(edge.vertices, pauli_string, coefficient) + self._ops.append(s) + if term is not None: + if term not in self._terms: + self._terms[term] = {} + if color not in self._terms[term]: + self._terms[term][color] = [] + self._terms[term][color].append(len(self._ops) - 1) + + @property + def nqubits(self) -> int: + """Return the number of qubits in the model.""" + return len(self._qubits) + + @property + def nterms(self) -> int: + """Return the number of term groups in the model.""" + return len(self._terms) + + @property + def terms(self) -> list[int]: + """Get the list of term indices in the model.""" + return list(self._terms.keys()) + + def ncolors(self, term: int) -> int: + """Return the number of colors in a given term.""" + if term not in self._terms: + raise ValueError(f"Term {term} does not exist in the model.") + return len(self._terms[term]) + + def colors(self, term: int) -> list[int]: + """Return the list of colors in a given term.""" + if term not in self._terms: + raise ValueError(f"Term {term} does not exist in the model.") + return list(self._terms[term].keys()) + + def nops(self, term: int, color: int) -> int: + """Return the number of operators in a given term and color.""" + if term not in self._terms: + raise ValueError(f"Term {term} does not exist in the model.") + if color not in self._terms[term]: + raise ValueError(f"Color {color} does not exist in term {term}.") + return len(self._terms[term][color]) + + def ops(self, term: int, color: int) -> list[PauliString]: + """Return the list of operators in a given term and color.""" + if term not in self._terms: + raise ValueError(f"Term {term} does not exist in the model.") + if color not in self._terms[term]: + raise ValueError(f"Color {color} does not exist in term {term}.") + return [self._ops[i] for i in self._terms[term][color]] + + def __str__(self) -> str: + """String representation of the model.""" + return "Generic model with {} terms on {} qubits.".format( + len(self._terms), len(self._qubits) + ) + + def __repr__(self) -> str: + """String representation of the model.""" + return self.__str__() + + +class IsingModel(Model): + """Translation-invariant Ising model on a hypergraph geometry. + + The Hamiltonian is: + H = -J * Σ_{} Z_i Z_j - h * Σ_i X_i + + - Single-vertex edges define X-field terms with coefficient ``-h``. + - Two-vertex edges define ZZ-coupling terms with coefficient ``-J``. + - Terms are grouped into two groups: ``0`` for field terms and ``1`` for + coupling terms. + """ + + def __init__(self, geometry: Hypergraph, h: float, J: float): + super().__init__(geometry) + self.h = h + self.J = J + self._terms = {0: {}, 1: {}} + + coloring: HypergraphEdgeColoring = geometry.edge_coloring() + for edge in geometry.edges(): + vertices = edge.vertices + if len(vertices) == 1: + self.add_interaction(edge, "X", -h, term=0, color=0) + elif len(vertices) == 2: + color = coloring.color(edge.vertices) + if color is None: + raise ValueError("Geometry edge coloring failed to assign a color.") + self.add_interaction(edge, "ZZ", -J, term=1, color=color) + + def __str__(self) -> str: + return ( + f"Ising model with {self.nterms} terms on {self.nqubits} qubits " + f"(h={self.h}, J={self.J})." + ) + + def __repr__(self) -> str: + return ( + f"IsingModel(nqubits={self.nqubits}, nterms={self.nterms}, " + f"h={self.h}, J={self.J})" + ) + + +class HeisenbergModel(Model): + """Translation-invariant Heisenberg model on a hypergraph geometry. + + The Hamiltonian is: + H = -J * Σ_{} (X_i X_j + Y_i Y_j + Z_i Z_j) + + - Two-vertex edges define XX, YY, and ZZ coupling terms with coefficient ``-J``. + - Terms are grouped into three parts: ``0`` for XX, ``1`` for YY, and ``2`` for ZZ. + """ + + def __init__(self, geometry: Hypergraph, J: float): + super().__init__(geometry) + self.J = J + self.coloring: HypergraphEdgeColoring = geometry.edge_coloring() + self._terms = {0: {}, 1: {}, 2: {}} + for edge in geometry.edges(): + vertices = edge.vertices + if len(vertices) == 2: + color = self.coloring.color(edge.vertices) + if color is None: + raise ValueError("Geometry edge coloring failed to assign a color.") + self.add_interaction(edge, "XX", -J, term=0, color=color) + self.add_interaction(edge, "YY", -J, term=1, color=color) + self.add_interaction(edge, "ZZ", -J, term=2, color=color) + + def __str__(self) -> str: + return ( + f"Heisenberg model with {self.nterms} terms on {self.nqubits} qubits " + f"(J={self.J})." + ) + + def __repr__(self) -> str: + return ( + f"HeisenbergModel(nqubits={self.nqubits}, nterms={self.nterms}, " + f"J={self.J})" + ) diff --git a/source/qdk_package/qdk/applications/magnets/trotter/__init__.py b/source/qdk_package/qdk/applications/magnets/trotter/__init__.py new file mode 100644 index 0000000000..d4beaa68c5 --- /dev/null +++ b/source/qdk_package/qdk/applications/magnets/trotter/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Trotter-Suzuki methods for time evolution.""" + +from .trotter import ( + TrotterStep, + TrotterExpansion, + strang_splitting, + suzuki_recursion, + yoshida_recursion, + fourth_order_trotter_suzuki, +) + +__all__ = [ + "TrotterStep", + "TrotterExpansion", + "strang_splitting", + "suzuki_recursion", + "yoshida_recursion", + "fourth_order_trotter_suzuki", +] diff --git a/source/qdk_package/qdk/applications/magnets/trotter/trotter.py b/source/qdk_package/qdk/applications/magnets/trotter/trotter.py new file mode 100644 index 0000000000..4caaea5d01 --- /dev/null +++ b/source/qdk_package/qdk/applications/magnets/trotter/trotter.py @@ -0,0 +1,372 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Trotter schedule utilities for magnet models. + +This module provides: + +- ``TrotterStep``: a schedule of ``(time, term_index)`` entries, +- recursion helpers (Suzuki and Yoshida) that raise the order by 2, +- factory helpers such as Strang splitting, and +- ``TrotterExpansion`` to apply a step repeatedly to a concrete model. +""" + +from collections.abc import Callable +from typing import Iterator, Optional +from ..models import Model +from ..utilities import PauliString + +import math + +try: + import cirq +except Exception as ex: + raise ImportError( + "qsharp.magnets.models requires the cirq extras. Install with 'pip install \"qsharp[cirq]\"'." + ) from ex + + +class TrotterStep: + """Schedule of Hamiltonian-term applications for one Trotter step. + + A ``TrotterStep`` stores an ordered list of ``(time, term_index)`` tuples. + Each tuple indicates that term group ``term_index`` should be applied for + evolution time ``time``. + + The constructor builds a first-order step over the provided term indices: + + .. math:: + + e^{-i H t} \\approx \\prod_k e^{-i H_k t}, \\quad H = \\sum_k H_k. + + where each supplied term index appears once with duration ``time_step``. + """ + + def __init__(self, terms: list[int] = [], time_step: float = 0.0): + """Initialize a Trotter step from explicit term indices. + + Args: + terms: Ordered term indices to include in this step. + time_step: Duration associated with each listed term. + + Notes: + If ``terms`` is empty, the step is initialized as order 0. + Otherwise, it is initialized as order 1. + """ + self._nterms = len(terms) + self._time_step = time_step + self._order = 1 if self._nterms > 0 else 0 + self._repr_string: Optional[str] = None + self.terms: list[tuple[float, int]] = [(time_step, j) for j in terms] + + @property + def order(self) -> int: + """Get the order of the Trotter decomposition.""" + return self._order + + @property + def nterms(self) -> int: + """Get the number of term entries used to build this schedule.""" + return self._nterms + + @property + def time_step(self) -> float: + """Get the base time step metadata stored on this step.""" + return self._time_step + + def reduce(self) -> None: + """ + Reduce the Trotter step in place by combining consecutive terms that are the same. + + This can be useful for optimizing the Trotter sequence by merging adjacent + applications of the same term into a single application with a longer time step. + + Example: + >>> trotter = TrotterStep() + >>> trotter.terms = [(0.5, 0), (0.5, 0), (0.5, 1)] + >>> trotter.reduce() + >>> list(trotter.step()) + [(1.0, 0), (0.5, 1)] + """ + if len(self.terms) > 1: + reduced_terms: list[tuple[float, int]] = [] + current_time, current_term = self.terms[0] + + for time, term in self.terms[1:]: + if term == current_term: + current_time += time + else: + reduced_terms.append((current_time, current_term)) + current_time, current_term = time, term + + reduced_terms.append((current_time, current_term)) + self.terms = reduced_terms + + def step(self) -> Iterator[tuple[float, int]]: + """Iterate over ``(time, term_index)`` entries for this step.""" + return iter(self.terms) + + def cirq(self, model: Model) -> cirq.Circuit: + """Build a Cirq circuit for one application of this Trotter step. + + Args: + model: Model that maps each term index to grouped Pauli operators. + + Returns: + A ``cirq.Circuit`` containing ``cirq.PauliStringPhasor`` operations + in the same order as ``self.step()``. + """ + _INT_TO_CIRQ = (cirq.I, cirq.X, cirq.Z, cirq.Y) + circuit = cirq.Circuit() + for time, term_index in self.step(): + for color in model.colors(term_index): + for op in model.ops(term_index, color): + pauli = cirq.PauliString( + { + cirq.LineQubit(p.qubit): _INT_TO_CIRQ[p.op] + for p in op._paulis + }, + ) + oper = cirq.PauliStringPhasor(pauli, exponent_neg=time / math.pi) + circuit.append(oper) + return circuit + + def __str__(self) -> str: + """String representation of the Trotter decomposition.""" + return f"Trotter expansion of order {self._order}: time_step={self._time_step}, num_terms={self._nterms}" + + def __repr__(self) -> str: + """String representation of the Trotter decomposition.""" + if self._repr_string is not None: + return self._repr_string + else: + return f"TrotterStep(num_terms={self._nterms}, time_step={self._time_step})" + + +def suzuki_recursion(trotter: TrotterStep) -> TrotterStep: + """ + Apply one level of Suzuki recursion to double the order of a Trotter step. + + Given a k-th order Trotter step S_k(t), this function constructs a (k+2)-nd order + step using the Suzuki fractal decomposition: + + S_{k+2}(t) = S_{k}(p t) S_{k}(p t) S_{k}((1 - 4p) t) S_{k}(p t) S_{k}(p t) + + where p = 1 / (4 - 4^{1/(2k+1)}). + + The resulting step has improved accuracy: the error scales as O(t^{k+3}) instead + of O(t^{k+1}), at the cost of 5x more exponential applications per step. + + Args: + trotter: A TrotterStep of order k to be promoted to order k+2. + + Returns: + A new TrotterStep of order k+2 constructed via Suzuki recursion. + + References: + M. Suzuki, Phys. Lett. A 146, 319 (1990). + """ + + suzuki = TrotterStep() + suzuki._nterms = trotter._nterms + suzuki._time_step = trotter._time_step + suzuki._order = trotter._order + 2 + suzuki._repr_string = f"SuzukiRecursion(order={suzuki._order}, time_step={suzuki._time_step}, num_terms={suzuki._nterms})" + + p = 1 / (4 - 4 ** (1 / (2 * trotter.order + 1))) + + suzuki.terms = [(p * time, term_index) for time, term_index in trotter.step()] + suzuki.terms += [(p * time, term_index) for time, term_index in trotter.step()] + suzuki.terms += [ + ((1 - 4 * p) * time, term_index) for time, term_index in trotter.step() + ] + suzuki.terms += [(p * time, term_index) for time, term_index in trotter.step()] + suzuki.terms += [(p * time, term_index) for time, term_index in trotter.step()] + suzuki.reduce() # Combine consecutive terms that are the same + + return suzuki + + +def yoshida_recursion(trotter: TrotterStep) -> TrotterStep: + """ + Apply one level of Yoshida recursion to increase the order of a Trotter step by 2. + + Given a k-th order Trotter step S_k(t), this function constructs a (k+2)-nd order + step using Yoshida's symmetric triple-jump composition: + + S_{k+2}(t) = S_{k}(w_1 t) S_{k}(w_0 t) S_{k}(w_1 t) + + where: + w_1 = 1 / (2 - 2^{1/(2k+1)}) + w_0 = -2^{1/(2k+1)} / (2 - 2^{1/(2k+1)}) = 1 - 2 w_1 + + The resulting step has improved accuracy: the error scales as O(t^{k+3}) instead + of O(t^{k+1}), at the cost of 3x more exponential applications per step. + + Args: + trotter: A TrotterStep of order k to be promoted to order k+2. + + Returns: + A new TrotterStep of order k+2 constructed via Yoshida recursion. + + References: + H. Yoshida, Phys. Lett. A 150, 262 (1990). + """ + + yoshida = TrotterStep() + yoshida._nterms = trotter._nterms + yoshida._time_step = trotter._time_step + yoshida._order = trotter._order + 2 + yoshida._repr_string = f"YoshidaRecursion(order={yoshida._order}, time_step={yoshida._time_step}, num_terms={yoshida._nterms})" + + cube_root_2 = 2 ** (1 / (2 * trotter.order + 1)) + w1 = 1 / (2 - cube_root_2) + w0 = 1 - 2 * w1 # equivalent to -cube_root_2 / (2 - cube_root_2) + + yoshida.terms = [(w1 * time, term_index) for time, term_index in trotter.step()] + yoshida.terms += [(w0 * time, term_index) for time, term_index in trotter.step()] + yoshida.terms += [(w1 * time, term_index) for time, term_index in trotter.step()] + yoshida.reduce() # Combine consecutive terms that are the same + + return yoshida + + +def strang_splitting(terms: list[int], time: float) -> TrotterStep: + """ + Create a second-order Strang splitting schedule for explicit term indices. + + The second-order Trotter formula uses symmetric splitting: + + e^{-i H t} \\approx \\prod_{k=1}^{n-1} e^{-i H_k t/2} \\, e^{-i H_n t} \\, \\prod_{k=n-1}^{1} e^{-i H_k t/2} + + This provides second-order accuracy in the time step, compared to + first-order for the basic Trotter decomposition. + + Example: + + .. code-block:: python + >>> strang = strang_splitting(terms=[0, 1, 2], time=0.5) + >>> list(strang.step()) + [(0.25, 0), (0.25, 1), (0.5, 2), (0.25, 1), (0.25, 0)] + + Args: + terms: Ordered term indices for a single symmetric step. Must be non-empty. + time: Total evolution time assigned to this second-order step. + + Returns: + A second-order ``TrotterStep``. + + References: + G. Strang, SIAM J. Numer. Anal. 5, 506 (1968). + """ + strang = TrotterStep() + strang._nterms = len(terms) + strang._time_step = time + strang._order = 2 + strang._repr_string = f"StrangSplitting(time_step={time}, num_terms={len(terms)})" + strang.terms = [] + for i in range(len(terms) - 1): + strang.terms.append((time / 2, terms[i])) + strang.terms.append((time, terms[-1])) + for i in reversed(range(len(terms) - 1)): + strang.terms.append((time / 2, terms[i])) + return strang + + +def fourth_order_trotter_suzuki(terms: list[int], time: float) -> TrotterStep: + """ + Factory function for creating a fourth-order Trotter-Suzuki decomposition + using Suzuki recursion. + + This is obtained by applying one level of Suzuki recursion to the second-order + Strang splitting. The resulting fourth-order decomposition has improved accuracy + compared to the second-order Strang splitting, at the cost of more exponential + applications per step. + + Example: + + .. code-block:: python + >>> fourth_order = fourth_order_trotter_suzuki(terms=[0, 1, 2], time=0.5) + >>> list(fourth_order.step()) + [(0.1767766952966369, 0), (0.1767766952966369, 1), (0.1767766952966369, 2), (0.3535533905932738, 1), (0.3535533905932738, 0), (0.1767766952966369, 1), (0.1767766952966369, 2), (0.1767766952966369, 1), (0.1767766952966369, 0)] + """ + return suzuki_recursion(strang_splitting(terms, time)) + + +class TrotterExpansion: + """Repeated application of a Trotter method on a concrete model. + + ``TrotterExpansion`` builds one step with ``trotter_method(model.terms, dt)`` + where ``dt = time / num_steps`` and then repeats it ``num_steps`` times. + + Iteration via :meth:`step` yields ``PauliString`` operators already scaled by + the per-entry schedule time. + """ + + def __init__( + self, + trotter_method: Callable[[list[int], float], TrotterStep], + model: Model, + time: float, + num_steps: int, + ): + """Initialize a repeated-step Trotter expansion. + + Args: + trotter_method: Callable mapping ``(terms, dt)`` to a ``TrotterStep``. + model: Model that defines term groups and per-term operators. + time: Total evolution time. + num_steps: Number of repeated Trotter steps. + """ + self._model = model + self._num_steps = num_steps + self._trotter_step = trotter_method(model.terms, time / num_steps) + + @property + def order(self) -> int: + """Get the order of the underlying Trotter step.""" + return self._trotter_step.order + + @property + def nterms(self) -> int: + """Get the number of Hamiltonian terms.""" + return self._model.nterms + + @property + def nsteps(self) -> int: + """Get the number of Trotter steps.""" + return self._num_steps + + @property + def total_time(self) -> float: + """Get the total evolution time (time_step * num_steps).""" + return self._trotter_step.time_step * self._num_steps + + def step(self) -> Iterator[PauliString]: + """Iterate over scaled operators for the full expansion. + + Yields: + ``PauliString`` operators with coefficients scaled by schedule time, + in execution order across all repeated steps. + """ + for _ in range(self._num_steps): + for s, i in self._trotter_step.step(): + for c in self._model.colors(i): + for op in self._model.ops(i, c): + yield (op * s) + + def cirq(self) -> cirq.CircuitOperation: + """Get a repeated Cirq circuit operation for this expansion.""" + circuit = self._trotter_step.cirq(self._model).freeze() + return cirq.CircuitOperation(circuit, repetitions=self._num_steps) + + def __str__(self) -> str: + """String representation of the Trotter expansion.""" + return ( + f"TrotterExpansion(order={self.order}, num_steps={self._num_steps}, " + f"total_time={self.total_time}, num_terms={self.nterms})" + ) + + def __repr__(self) -> str: + """Repr representation of the Trotter expansion.""" + return f"TrotterExpansion({self._trotter_step!r}, num_steps={self._num_steps})" diff --git a/source/qdk_package/qdk/applications/magnets/utilities/__init__.py b/source/qdk_package/qdk/applications/magnets/utilities/__init__.py new file mode 100644 index 0000000000..b350f7da40 --- /dev/null +++ b/source/qdk_package/qdk/applications/magnets/utilities/__init__.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Utilities module for magnets package. + +This module provides utility data structures and algorithms used across +the magnets package, including hypergraph representations. +""" + +from .hypergraph import ( + Hyperedge, + Hypergraph, + HypergraphEdgeColoring, +) +from .pauli import Pauli, PauliString, PauliX, PauliY, PauliZ + +__all__ = [ + "Hyperedge", + "Hypergraph", + "HypergraphEdgeColoring", + "Pauli", + "PauliString", + "PauliX", + "PauliY", + "PauliZ", +] diff --git a/source/qdk_package/qdk/applications/magnets/utilities/hypergraph.py b/source/qdk_package/qdk/applications/magnets/utilities/hypergraph.py new file mode 100644 index 0000000000..b7caffbd99 --- /dev/null +++ b/source/qdk_package/qdk/applications/magnets/utilities/hypergraph.py @@ -0,0 +1,317 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Hypergraph data structures for representing quantum system geometries. + +This module provides classes for representing hypergraphs, which generalize +graphs by allowing edges (hyperedges) to connect any number of vertices. +Hypergraphs are useful for representing interaction terms in quantum +Hamiltonians, where multi-body interactions can involve more than two sites. +""" + +import random +from typing import Iterator, Optional + + +class Hyperedge: + """A hyperedge connecting one or more vertices in a hypergraph. + + A hyperedge generalizes the concept of an edge in a graph. While a + traditional edge connects exactly two vertices, a hyperedge can connect + any number of vertices. This is useful for representing: + - Single-site terms (self-loops): 1 vertex + - Two-body interactions: 2 vertices + - Multi-body interactions: 3+ vertices + Each hyperedge is defined by a set of unique vertex indices, which are + stored as a sorted tuple for consistency and hashability. + + Attributes: + vertices: Sorted tuple of vertex indices connected by this hyperedge. + + Example: + + .. code-block:: python + >>> edge = Hyperedge([2, 0, 1]) + >>> edge.vertices + (0, 1, 2) + """ + + def __init__(self, vertices: list[int]) -> None: + """Initialize a hyperedge with the given vertices. + + Args: + vertices: List of vertex indices. Will be sorted internally. + """ + self.vertices: tuple[int, ...] = tuple(sorted(set(vertices))) + + def __str__(self) -> str: + return str(self.vertices) + + def __repr__(self) -> str: + return f"Hyperedge({list(self.vertices)})" + + +class Hypergraph: + """A hypergraph consisting of vertices connected by hyperedges. + + A hypergraph is a generalization of a graph where edges (hyperedges) can + connect any number of vertices. This class serves as the base class for + various lattice geometries used in quantum simulations. + + Attributes: + _edge_set: Set of hyperedges in the hypergraph. + _vertex_set: Set of all unique vertex indices in the hypergraph. + + Note: + Edge colors are managed separately by :class:`HypergraphEdgeColoring`. + Use :meth:`edge_coloring` to generate a coloring for this hypergraph. + + Example: + + .. code-block:: python + >>> edges = [Hyperedge([0, 1]), Hyperedge([1, 2]), Hyperedge([0, 2])] + >>> graph = Hypergraph(edges) + >>> graph.nvertices + 3 + >>> graph.nedges + 3 + """ + + def __init__(self, edges: list[Hyperedge]) -> None: + """Initialize a hypergraph with the given edges. + + Args: + edges: List of hyperedges defining the hypergraph structure. + """ + self._vertex_set = set() + self._edge_set = set(edges) + for edge in edges: + self._vertex_set.update(edge.vertices) + + @property + def nvertices(self) -> int: + """Return the number of vertices in the hypergraph.""" + return len(self._vertex_set) + + def vertices(self) -> Iterator[int]: + """Iterate over all vertex indices in the hypergraph. + + Returns: + Iterator of vertex indices in ascending order. + """ + return iter(sorted(self._vertex_set)) + + @property + def nedges(self) -> int: + """Return the number of hyperedges in the hypergraph.""" + return len(self._edge_set) + + def edges(self) -> Iterator[Hyperedge]: + """Iterate over all hyperedges in the hypergraph. + + Returns: + Iterator of all hyperedges in the hypergraph. + """ + return iter(self._edge_set) + + def add_edge(self, edge: Hyperedge) -> None: + """Add a hyperedge to the hypergraph. + + Args: + edge: The Hyperedge instance to add. + """ + self._edge_set.add(edge) + self._vertex_set.update(edge.vertices) + + def edge_coloring( + self, seed: Optional[int] = 0, trials: int = 1 + ) -> "HypergraphEdgeColoring": + """Compute a (nondeterministic) greedy edge coloring of this hypergraph. + + Args: + seed: Optional random seed for reproducibility. + trials: Number of randomized trials to attempt. The best coloring + (fewest colors) is returned. + + Returns: + A :class:`HypergraphEdgeColoring` for this hypergraph. + """ + all_edges = sorted(self.edges(), key=lambda edge: edge.vertices) + + if not all_edges: + return HypergraphEdgeColoring(self) + + num_trials = max(trials, 1) + best_coloring: Optional[HypergraphEdgeColoring] = None + least_colors: Optional[int] = None + + for trial in range(num_trials): + trial_seed = None if seed is None else seed + trial + rng = random.Random(trial_seed) + + edge_order = list(all_edges) + rng.shuffle(edge_order) + + coloring = HypergraphEdgeColoring(self) + num_colors = 0 + + for edge in edge_order: + if len(edge.vertices) == 1: + coloring.add_edge(edge, -1) + continue + + assigned = False + for color in range(num_colors): + used_vertices = set().union( + *( + candidate.vertices + for candidate in coloring.edges_of_color(color) + ) + ) + if not any(vertex in used_vertices for vertex in edge.vertices): + coloring.add_edge(edge, color) + assigned = True + break + + if not assigned: + coloring.add_edge(edge, num_colors) + num_colors += 1 + + if least_colors is None or coloring.ncolors < least_colors: + least_colors = coloring.ncolors + best_coloring = coloring + + assert best_coloring is not None + return best_coloring + + def __str__(self) -> str: + return f"Hypergraph with {self.nvertices} vertices and {self.nedges} edges." + + def __repr__(self) -> str: + return f"Hypergraph({list(self._edge_set)})" + + +class HypergraphEdgeColoring: + """Edge-color assignment for a :class:`Hypergraph`. + + This class stores colors separately from :class:`Hypergraph` and enforces + the rule that multi-vertex edges sharing a color do not share any vertices. + + Conventions: + + - Colors for nontrivial edges must be nonnegative integers. + - Single-vertex edges may use a special color (for example ``-1``). + - Only nonnegative colors contribute to :attr:`ncolors`. + + Note: + Colors are keyed by edge vertex tuples (``edge.vertices``), not by + ``Hyperedge`` object identity. As a result, :meth:`color` accepts edge + vertex tuples directly, while :meth:`add_edge` still requires an edge + instance that belongs to :attr:`hypergraph`. + + Attributes: + hypergraph: The supporting :class:`Hypergraph` whose edges can be + colored by this instance. + """ + + def __init__(self, hypergraph: Hypergraph) -> None: + self.hypergraph = hypergraph + self._colors: dict[tuple[int, ...], int] = {} # Vertices-to-color mapping + self._used_vertices: dict[int, set[int]] = ( + {} + ) # Set of vertices used by each color + + @property + def ncolors(self) -> int: + """Return the number of distinct nonnegative colors in the coloring.""" + return len(self._used_vertices) + + def color(self, vertices: tuple[int, ...]) -> Optional[int]: + """Return the color assigned to edge vertices. + + Args: + vertices: Canonical vertex tuple for the edge to query (typically + ``edge.vertices``). + + Returns: + The color assigned to ``vertices``, or ``None`` if the edge has + not been added to this coloring. + """ + if not isinstance(vertices, tuple) or not all( + isinstance(vertex, int) for vertex in vertices + ): + raise TypeError("vertices must be tuple[int, ...]") + return self._colors.get(vertices) + + def colors(self) -> Iterator[int]: + """Iterate over distinct nonnegative colors present in the coloring. + + Returns: + Iterator of distinct nonnegative color indices. + """ + return iter(self._used_vertices.keys()) + + def add_edge(self, edge: Hyperedge, color: int) -> None: + """Add ``edge`` to this coloring with the specified ``color``. + + For multi-vertex edges, this enforces that no previously added edge + with the same color shares a vertex with ``edge``. + + Args: + edge: The Hyperedge instance to add. This must be an edge present + in :attr:`hypergraph` (typically one returned by + ``hypergraph.edges()``). + color: Color index for the edge. + + Raises: + TypeError: If ``edge`` is not a :class:`Hyperedge`. + ValueError: If ``edge`` is not part of :attr:`hypergraph`. + ValueError: If ``color`` is negative for a nontrivial edge. + RuntimeError: If adding ``edge`` would create a same-color vertex + conflict. + """ + if not isinstance(edge, Hyperedge): + raise TypeError(f"edge must be Hyperedge, got {type(edge).__name__}") + + if edge not in self.hypergraph.edges(): + raise ValueError("edge must belong to the supporting Hypergraph") + + vertices = edge.vertices + + if len(vertices) == 1: + # Single-vertex edges can be colored with a special color (e.g., -1) + self._colors[vertices] = color + else: + if color < 0: + raise ValueError( + "Color index must be nonnegative for multi-vertex edges." + ) + if color not in self._used_vertices: + self._colors[vertices] = color + self._used_vertices[color] = set(vertices) + else: + if any(v in self._used_vertices[color] for v in vertices): + raise RuntimeError( + "Edge conflicts with existing edge of same color." + ) + self._colors[vertices] = color + self._used_vertices[color].update(vertices) + + self._colors[vertices] = color + + def edges_of_color(self, color: int) -> Iterator[Hyperedge]: + """Iterate over hyperedges with a specific color. + + Args: + color: Color index for filtering edges. + + Returns: + Iterator of edges currently assigned to ``color``. + """ + return iter( + [ + edge + for edge in self.hypergraph.edges() + if self._colors.get(edge.vertices) == color + ] + ) diff --git a/source/qdk_package/qdk/applications/magnets/utilities/pauli.py b/source/qdk_package/qdk/applications/magnets/utilities/pauli.py new file mode 100644 index 0000000000..4eb7b92e5b --- /dev/null +++ b/source/qdk_package/qdk/applications/magnets/utilities/pauli.py @@ -0,0 +1,270 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Pauli operator representation for quantum spin systems.""" + +from collections.abc import Sequence + +try: + import cirq +except Exception as ex: + raise ImportError( + "qsharp.magnets.models requires the cirq extras. Install with 'pip install \"qsharp[cirq]\"'." + ) from ex + + +class Pauli: + """Single-qubit Pauli term tied to an explicit qubit index. + + ``Pauli`` stores a Pauli identifier and the qubit it acts on. The Pauli + identifier can be provided either as an integer code or a label: + + - ``0`` / ``"I"`` + - ``1`` / ``"X"`` + - ``2`` / ``"Z"`` + - ``3`` / ``"Y"`` + + Note: + The integer mapping follows the internal QDK convention where ``2`` is + ``Z`` and ``3`` is ``Y``. + + Example: + + .. code-block:: python + >>> p = Pauli("Y", qubit=2) + >>> p.op + 3 + >>> p.qubit + 2 + """ + + _VALID_INTS = {0, 1, 2, 3} + _STR_TO_INT = {"I": 0, "X": 1, "Z": 2, "Y": 3} + + def __init__(self, value: int | str, qubit: int = 0) -> None: + """Initialize a Pauli operator. + + Args: + value: An integer 0-3 or one of 'I', 'X', 'Y', 'Z' (case-insensitive). + qubit: The index of the qubit this operator acts on. Defaults to 0. + + Raises: + ValueError: If ``value`` is not a valid integer/string Pauli identifier. + """ + if isinstance(value, int): + if value not in self._VALID_INTS: + raise ValueError(f"Integer value must be 0-3, got {value}.") + self._op = value + elif isinstance(value, str): + key = value.upper() + if key not in self._STR_TO_INT: + raise ValueError( + f"String value must be one of 'I', 'X', 'Y', 'Z', got '{value}'." + ) + self._op = self._STR_TO_INT[key] + else: + raise ValueError(f"Expected int or str, got {type(value).__name__}.") + self.qubit: int = qubit + + @property + def op(self) -> int: + """Integer encoding of this Pauli term. + + Returns: + ``0`` for ``I``, ``1`` for ``X``, ``2`` for ``Z``, ``3`` for ``Y``. + """ + return self._op + + def __str__(self) -> str: + labels = {0: "I", 1: "X", 2: "Z", 3: "Y"} + return f"{labels[self._op]}({self.qubit})" + + def __repr__(self) -> str: + labels = {0: "I", 1: "X", 2: "Z", 3: "Y"} + return f"Pauli('{labels[self._op]}', qubit={self.qubit})" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Pauli): + return NotImplemented + return self._op == other._op and self.qubit == other.qubit + + def __hash__(self) -> int: + return hash((self._op, self.qubit)) + + @property + def cirq(self): + """Return this Pauli term as a Cirq gate operation on ``LineQubit``. + + Returns: + A Cirq operation equivalent to + ``cirq.{I|X|Z|Y}.on(cirq.LineQubit(self.qubit))``. + """ + _INT_TO_CIRQ = (cirq.I, cirq.X, cirq.Z, cirq.Y) + return _INT_TO_CIRQ[self._op].on(cirq.LineQubit(self.qubit)) + + +def PauliX(qubit: int) -> Pauli: + """Create a Pauli-X operator on the given qubit.""" + return Pauli("X", qubit) + + +def PauliY(qubit: int) -> Pauli: + """Create a Pauli-Y operator on the given qubit.""" + return Pauli("Y", qubit) + + +def PauliZ(qubit: int) -> Pauli: + """Create a Pauli-Z operator on the given qubit.""" + return Pauli("Z", qubit) + + +class PauliString: + """Ordered tensor product of single-qubit ``Pauli`` terms with a coefficient. + + ``PauliString`` stores: + + - an ordered tuple of :class:`Pauli` objects (including each term's qubit), and + - a complex scalar coefficient. + + Construction options: + + - pass a sequence of :class:`Pauli` objects to ``PauliString(...)`` + - use :meth:`from_qubits` to pair qubit indices with Pauli labels/codes + + Example: + + .. code-block:: python + >>> ps = PauliString([PauliX(0), PauliZ(1)], coefficient=-1j) + >>> ps.qubits + (0, 1) + >>> ps2 = PauliString.from_qubits((0, 1), "XZ", coefficient=-1j) + >>> ps == ps2 + True + """ + + def __init__(self, paulis: Sequence[Pauli], coefficient: complex = 1.0) -> None: + """Initialize a PauliString from a sequence of Pauli operators. + + Args: + paulis: A sequence of :class:`Pauli` instances, each with its + own qubit index. + coefficient: Complex coefficient multiplying the Pauli string (default 1.0). + + Raises: + TypeError: If any element is not a Pauli instance. + """ + for p in paulis: + if not isinstance(p, Pauli): + raise TypeError( + f"Expected Pauli instance, got {type(p).__name__}. " + "Use PauliString.from_qubits() for int/str values." + ) + self._paulis: tuple[Pauli, ...] = tuple(paulis) + self._coefficient: complex = coefficient + + @classmethod + def from_qubits( + cls, + qubits: tuple[int, ...], + values: Sequence[int | str] | str, + coefficient: complex = 1.0, + ) -> "PauliString": + """Create a PauliString from qubit indices and Pauli labels. + + Args: + qubits: Tuple of qubit indices. + values: Sequence of Pauli identifiers (integers 0-3 or strings + 'I', 'X', 'Y', 'Z'). A plain string like ``"XZI"`` is also + accepted and treated as individual characters. + coefficient: Complex coefficient multiplying the Pauli string. + + Returns: + A new PauliString instance. + + Raises: + ValueError: If qubits and values have different lengths, or if + any value is not a valid Pauli identifier. + """ + if len(qubits) != len(values): + raise ValueError( + f"Length mismatch: {len(qubits)} qubits vs {len(values)} values." + ) + paulis = [Pauli(v, q) for q, v in zip(qubits, values)] + return cls(paulis, coefficient=coefficient) + + @property + def qubits(self) -> tuple[int, ...]: + """Tuple of qubit indices in the same order as the stored Pauli terms. + + Returns: + Tuple of qubit indices, one per Pauli operator. + """ + return tuple(p.qubit for p in self._paulis) + + @property + def coefficient(self) -> complex: + """Complex coefficient multiplying this Pauli string.""" + return self._coefficient + + @property + def paulis(self) -> str: + """String of Pauli labels in the same order as the stored Pauli terms. + + Returns: + String of Pauli labels ('I', 'X', 'Z', 'Y'), one per Pauli operator. + """ + labels = {0: "I", 1: "X", 2: "Z", 3: "Y"} + return "".join(labels[p.op] for p in self._paulis) + + def __iter__(self): + """Iterate over Pauli terms in stored order. + + Yields: + :class:`Pauli` instances in order. + """ + return iter(self._paulis) + + def __len__(self) -> int: + return len(self._paulis) + + def __getitem__(self, index: int) -> Pauli: + return self._paulis[index] + + def __mul__(self, scalar: complex) -> "PauliString": + """Scale the coefficient of this PauliString by a complex scalar.""" + return PauliString(self._paulis, coefficient=self._coefficient * scalar) + + def __str__(self) -> str: + labels = {0: "I", 1: "X", 2: "Z", 3: "Y"} + s = "".join(map(str, self._paulis)) + return f"{self._coefficient} * {s}" + + def __repr__(self) -> str: + labels = {0: "I", 1: "X", 2: "Z", 3: "Y"} + s = "".join(labels[p.op] for p in self._paulis) + return f"PauliString(qubits={self.qubits}, ops='{s}', coefficient={self._coefficient})" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, PauliString): + return NotImplemented + return self._paulis == other._paulis and self._coefficient == other._coefficient + + def __hash__(self) -> int: + return hash((self._paulis, self._coefficient)) + + @property + def cirq(self): + """Return the corresponding Cirq ``PauliString``. + + Constructs a ``cirq.PauliString`` by applying each single-qubit + Pauli to its corresponding ``cirq.LineQubit``. + + Returns: + A ``cirq.PauliString`` on ``cirq.LineQubit`` instances with + ``self._coefficient`` as its coefficient. + """ + _INT_TO_CIRQ = (cirq.I, cirq.X, cirq.Z, cirq.Y) + return cirq.PauliString( + {cirq.LineQubit(p.qubit): _INT_TO_CIRQ[p.op] for p in self._paulis}, + coefficient=self._coefficient, + ) diff --git a/source/qdk_package/qdk/cirq.py b/source/qdk_package/qdk/cirq.py deleted file mode 100644 index 228a2a0804..0000000000 --- a/source/qdk_package/qdk/cirq.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""Cirq interoperability for the Q# ecosystem. - -This module re-exports all public symbols from [qsharp.interop.cirq](:mod:`qsharp.interop.cirq`), -making them available under the ``qdk.cirq`` namespace. The primary export -is :class:`~qsharp.interop.cirq.NeutralAtomSampler` — a -standard ``cirq.Sampler`` that runs Cirq circuits on the local -NeutralAtomDevice simulator. - -For full API documentation see [qsharp.interop.cirq](:mod:`qsharp.interop.cirq`). - -Requires the ``cirq`` extra: ``pip install qdk[cirq]``. - -Usage: - - import cirq - from qdk.cirq import NeutralAtomSampler - - q0, q1 = cirq.LineQubit.range(2) - circuit = cirq.Circuit([ - cirq.H(q0), - cirq.CNOT(q0, q1), - cirq.measure(q0, q1, key="m"), - ]) - - sampler = NeutralAtomSampler(seed=42) - result = sampler.run(circuit, repetitions=1000) - print(result.histogram(key="m")) -""" - -try: - from qsharp.interop.cirq import * # pyright: ignore[reportWildcardImportFromLibrary] -except Exception as ex: - raise ImportError( - "qdk.cirq requires the cirq extra. Install with 'pip install qdk[cirq]'." - ) from ex diff --git a/source/qdk_package/qdk/cirq/__init__.py b/source/qdk_package/qdk/cirq/__init__.py new file mode 100644 index 0000000000..8a484fc8ab --- /dev/null +++ b/source/qdk_package/qdk/cirq/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Cirq interoperability for the Q# ecosystem. + +This module provides a :class:`~qsharp.interop.cirq.NeutralAtomSampler` — a standard +``cirq.Sampler`` that runs Cirq circuits on the local NeutralAtomDevice +simulator. + +Usage: + + import cirq + from qsharp.interop.cirq import NeutralAtomSampler + + q0, q1 = cirq.LineQubit.range(2) + circuit = cirq.Circuit([ + cirq.H(q0), + cirq.CNOT(q0, q1), + cirq.measure(q0, q1, key="m"), + ]) + + sampler = NeutralAtomSampler(seed=42) + result = sampler.run(circuit, repetitions=1000) + print(result.histogram(key="m")) +""" + +from ._neutral_atom import NeutralAtomSampler +from ._result import NeutralAtomCirqResult + +__all__ = [ + "NeutralAtomSampler", + "NeutralAtomCirqResult", +] diff --git a/source/qdk_package/qdk/cirq/_neutral_atom.py b/source/qdk_package/qdk/cirq/_neutral_atom.py new file mode 100644 index 0000000000..4ae6e154af --- /dev/null +++ b/source/qdk_package/qdk/cirq/_neutral_atom.py @@ -0,0 +1,172 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""NeutralAtomSampler — a cirq.Sampler backed by the local NeutralAtomDevice.""" + +from __future__ import annotations + +from typing import List, Optional, TYPE_CHECKING + +import cirq + +from ._result import NeutralAtomCirqResult, measurement_dict, to_cirq_result + +if TYPE_CHECKING: + from .._simulation import NoiseConfig + from .._device._atom import NeutralAtomDevice + + +class NeutralAtomSampler(cirq.Sampler): + """A ``cirq.Sampler`` that runs Cirq circuits on the local NeutralAtomDevice simulator. + + This sampler integrates with the standard Cirq sampler protocol, so it can + be used anywhere a ``cirq.Sampler`` is expected. + + Pipeline for each ``run()`` call: + + 1. ``cirq.Circuit.to_qasm(version="3.0")`` → OpenQASM 3.0 + 2. OpenQASM 3.0 → QIR (base profile, via the Q# compiler) + 3. QIR → ``NeutralAtomDevice.simulate()`` (decompose, schedule, simulate) + 4. Raw shots → :class:`NeutralAtomCirqResult` + + Example:: + + import cirq + from qdk.cirq import NeutralAtomSampler + from qdk._simulation import NoiseConfig + + q0, q1 = cirq.LineQubit.range(2) + circuit = cirq.Circuit([ + cirq.H(q0), + cirq.CNOT(q0, q1), + cirq.measure(q0, q1, key="m"), + ]) + + # Noiseless simulation + sampler = NeutralAtomSampler(seed=42) + result = sampler.run(circuit, repetitions=1000) + print(result.histogram(key="m")) + + # Noisy simulation — 1% loss on Rz (native gate) + noise = NoiseConfig() + noise.rz.loss = 0.01 + sampler = NeutralAtomSampler(noise=noise, seed=42) + result = sampler.run(circuit, repetitions=1000) + print(f"Accepted: {len(result.measurements['m'])} / {len(result.raw_shots)}") + + :keyword noise: Optional :class:`~qsharp._simulation.NoiseConfig` describing + per-gate noise. The device decomposes gates to the native set + ``{Rz, SX, CZ, MResetZ}``; configure noise on those native gates. + For example, a Cirq ``X`` gate arriving via QASM 2.0 is decomposed + to ``SX·SX``, so ``noise.sx`` is the relevant field. Defaults to + ``None`` (noiseless). + :kwtype noise: NoiseConfig + :keyword simulator_type: Force a particular simulator backend. + ``"clifford"`` — Clifford-only, fast. Requires a Clifford circuit. + ``"cpu"`` — Full state-vector on CPU. + ``"gpu"`` — Full state-vector on GPU. + ``None`` (default) — GPU if available, CPU otherwise. + :kwtype simulator_type: str + :keyword seed: Optional integer seed for reproducibility. Defaults to ``None``. + :kwtype seed: int + :keyword device: An existing :class:`~qsharp._device._atom.NeutralAtomDevice` + instance to reuse across calls. A default-configured device is + created lazily on the first call when not provided. + :kwtype device: NeutralAtomDevice + """ + + def __init__( + self, + *, + noise: Optional["NoiseConfig"] = None, + simulator_type: Optional[str] = None, + seed: Optional[int] = None, + device: Optional["NeutralAtomDevice"] = None, + ) -> None: + self._noise = noise + self._simulator_type = simulator_type + self._seed = seed + self._device = device + + def _get_device(self) -> "NeutralAtomDevice": + """Return the NeutralAtomDevice, creating a default one on first access.""" + if self._device is None: + from .._device._atom import NeutralAtomDevice + + self._device = NeutralAtomDevice() + return self._device + + def run_sweep( + self, + program: cirq.AbstractCircuit, + params: cirq.Sweepable, + repetitions: int = 1, + ) -> List[NeutralAtomCirqResult]: + """Run the circuit for each parameter resolver in the sweep. + + :param program: The Cirq circuit to simulate. + :param params: A ``cirq.Sweepable`` defining the parameter resolvers + to sweep over. Each resolver produces one result. + :param repetitions: Number of shots per parameter resolver. + :type repetitions: int + :return: A list of :class:`NeutralAtomCirqResult` objects, one per resolver. + :rtype: List[NeutralAtomCirqResult] + """ + resolvers = ( + list(cirq.to_sweep(params)) + if params is not None + else [cirq.ParamResolver()] + ) + return [ + self._run_once(program, resolver, repetitions) for resolver in resolvers + ] + + def _run_once( + self, + circuit: cirq.AbstractCircuit, + param_resolver: cirq.ParamResolver, + repetitions: int, + ) -> NeutralAtomCirqResult: + from .._native import compile_qasm_program_to_qir + from .._fs import read_file, list_directory, resolve + from .._http import fetch_github + from .._qsharp import TargetProfile + + # Resolve parameters + resolved_circuit = cirq.resolve_parameters(circuit, param_resolver) + + # Step 1: Cirq circuit → QASM 3.0 + try: + qasm = resolved_circuit.to_qasm(version="3.0") + except Exception as exc: + raise ValueError( + "Failed to convert the Cirq circuit to QASM 3.0. " + "Ensure every gate in the circuit supports QASM serialization " + f"(see cirq.Circuit.to_qasm). Original error: {exc}" + ) from exc + + # Step 2: QASM 3.0 → QIR (base profile) + qir = compile_qasm_program_to_qir( + qasm, + read_file, + list_directory, + resolve, + fetch_github, + name="cirq_circuit", + target_profile=TargetProfile.Base, + search_path=".", + ) + + # Step 3: QIR → NeutralAtomDevice simulation + device = self._get_device() + raw_shots = device.simulate( + qir, + shots=repetitions, + noise=self._noise, + type=self._simulator_type, + seed=self._seed, + ) + + # Step 4: Build NeutralAtomCirqResult + meas_dict = measurement_dict(resolved_circuit) + return to_cirq_result(raw_shots, meas_dict, param_resolver) diff --git a/source/qdk_package/qdk/cirq/_result.py b/source/qdk_package/qdk/cirq/_result.py new file mode 100644 index 0000000000..98e37ea822 --- /dev/null +++ b/source/qdk_package/qdk/cirq/_result.py @@ -0,0 +1,310 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Result types and conversion utilities for the Cirq–NeutralAtomDevice integration.""" + +from __future__ import annotations + +import ast +import re +from typing import Any, Dict, List, Optional, Sequence + +import cirq +import numpy as np + + +# --------------------------------------------------------------------------- +# Result type +# --------------------------------------------------------------------------- + + +class NeutralAtomCirqResult(cirq.ResultDict): + """A ``cirq.ResultDict`` that also carries raw (loss-inclusive) shot data. + + The inherited ``measurements`` field contains only *accepted* shots - those + where every measured qubit returned a clean ``{0, 1}`` outcome. Shots in + which one or more qubits were lost during the simulation are excluded from + ``measurements`` but are preserved in ``raw_shots``. + + The ``raw_shots`` attribute holds the full list of simulation results, one + entry per shot, in the native simulator output format (tuple, list, or + scalar). This includes shots that contain qubit-loss markers. + + Use :meth:`raw_measurements` to retrieve the full per-shot data (including + loss markers) in the same ``{key: 2D-array (shots x bits)}`` format as + ``measurements``, but with Unicode string dtype so that non-binary markers + are preserved. + """ + + __slots__ = ("raw_shots", "_measurement_dict_data", "_raw_measurements_cache") + + def __init__( + self, + *, + params: cirq.ParamResolver, + measurements: Dict[str, np.ndarray], + raw_shots: List[Any], + measurement_dict: Dict[str, Sequence[int]], + ) -> None: + super().__init__(params=params, measurements=measurements) + self.raw_shots = raw_shots + self._measurement_dict_data = measurement_dict + self._raw_measurements_cache: Optional[Dict[str, Any]] = None + + def raw_measurements(self) -> Dict[str, Any]: + """Return unfiltered per-shot measurement symbols including loss markers. + + The structure mirrors ``measurements``: ``{key: 2D array (shots x bits)}``, + but the array dtype is ``" width: + chars = chars[:width] + rows_by_key[key].append(chars) + + try: + raw_meas: Dict[str, Any] = { + k: np.asarray(v, dtype=" Dict[str, List[int]]: + """Extract ``{measurement_key: [global_qubit_indices]}`` from a Cirq circuit. + + Qubit indices are determined by ``sorted(circuit.all_qubits())``, matching + the ordering that Cirq's ``to_qasm()`` uses when it numbers the qubits. + + :param circuit: The Cirq circuit to introspect. + :return: An ordered dict mapping each measurement key to the list of global qubit + indices that key covers, in the order they are measured. + :rtype: Dict[str, List[int]] + """ + ordered_qubits = sorted(circuit.all_qubits()) + index_by_qubit = {q: i for i, q in enumerate(ordered_qubits)} + + keys_in_order: List[str] = [] + key_to_qubits: Dict[str, List[int]] = {} + + for op in circuit.all_operations(): + if isinstance(op.gate, cirq.MeasurementGate): + key = op.gate.key + if key not in key_to_qubits: + keys_in_order.append(key) + key_to_qubits[key] = [] + key_to_qubits[key].extend(index_by_qubit[q] for q in op.qubits) + + return {k: key_to_qubits[k] for k in keys_in_order} + + +# --------------------------------------------------------------------------- +# Bit-string parsing utilities +# --------------------------------------------------------------------------- + + +def _qir_display_to_bitstring(obj: Any) -> str: + """Convert a raw QIR simulation result value to a flat bitstring. + + Handles the various formats the NeutralAtomDevice simulator may emit: + - ``qsharp.Result`` enum values (``Result.One`` -> ``"1"``, ``Result.Zero`` -> ``"0"``) + - ``tuple`` - multiple classical registers, joined with spaces + - ``list`` - single register bits, each element processed recursively + - ``str`` - already a representation, parsed with ``ast.literal_eval`` if needed + - other - converted to string with ``str()`` + """ + # Handle qsharp.Result enum values produced by the local simulator. + try: + from qdk._qsharp import Result as _Result + + if obj == _Result.One: + return "1" + if obj == _Result.Zero: + return "0" + if obj == _Result.Loss: + return "-" + except ImportError: + pass + + if isinstance(obj, str) and not re.match(r"[\d\s\-]+$", obj): + try: + obj = ast.literal_eval(obj) + except Exception: + return str(obj) + + if isinstance(obj, tuple): + return " ".join(_qir_display_to_bitstring(t) for t in obj) + if isinstance(obj, list): + # Recurse per element so Result.One/Zero inside lists are handled correctly. + return "".join(_qir_display_to_bitstring(bit) for bit in obj) + return str(obj) + + +def _split_registers(bitstring: str, key_lengths: List[int]) -> List[str]: + """Split a flat or space-delimited bitstring into per-register chunks. + + :param bitstring: The raw bitstring, possibly containing spaces between registers. + :param key_lengths: The expected width of each register, in order. + :return: A list of register strings, one per key. + :rtype: List[str] + """ + raw = str(bitstring).strip() + + if " " in raw: + return raw.split(" ") + + if not key_lengths: + return [raw] + + total_len = sum(key_lengths) + if total_len == len(raw): + regs: List[str] = [] + start = 0 + for length in key_lengths: + regs.append(raw[start : start + length]) + start += length + return regs + + return [raw] + + +# --------------------------------------------------------------------------- +# Loss-filtering shot conversion +# --------------------------------------------------------------------------- + + +def _shots_to_rows( + shots: Sequence[Any], + measurement_dict_data: Optional[Dict[str, Sequence[int]]] = None, +) -> Dict[str, List[List[int]]]: + """Convert raw simulation shots to ``{key: [[bit_per_shot]]}`` filtering loss. + + Shots where any qubit returned a non-binary value (loss marker) are silently + dropped. Only ``{0, 1}`` shots contribute to the returned arrays. + + :param shots: Raw simulation output, one entry per shot. + :param measurement_dict_data: ``{key: [qubit_indices]}`` — the measurement + register layout. Defaults to a single key ``"m"`` with no qubits. + :return: ``{key: list_of_rows}`` where each row is a list of 0/1 integers. + :rtype: Dict[str, List[List[int]]] + """ + if measurement_dict_data is None: + measurement_dict_data = {"m": []} + + measurement_keys = list(measurement_dict_data.keys()) + key_lengths = [len(measurement_dict_data[k]) for k in measurement_keys] + + shots_by_key: Dict[str, List[List[int]]] = {k: [] for k in measurement_keys} + + for shot in shots: + bitstring = _qir_display_to_bitstring(shot) + registers = _split_registers(bitstring, key_lengths) + + if len(registers) == len(measurement_keys): + parts = registers + else: + flattened = "".join(registers) + parts = _split_registers(flattened, key_lengths) + + per_key_rows: Dict[str, List[int]] = {} + is_valid_shot = True + + for key, bits in zip(measurement_keys, parts): + bit_chars = list(str(bits).strip()) + if not all(ch in "01" for ch in bit_chars): + is_valid_shot = False + break + per_key_rows[key] = [1 if ch == "1" else 0 for ch in bit_chars] + + if not is_valid_shot: + continue + + for key in measurement_keys: + shots_by_key[key].append(per_key_rows.get(key, [])) + + return shots_by_key + + +# --------------------------------------------------------------------------- +# Result construction +# --------------------------------------------------------------------------- + + +def to_cirq_result( + raw_shots: List[Any], + meas_dict: Dict[str, List[int]], + param_resolver: Optional[cirq.ParamResolverOrSimilarType] = None, +) -> NeutralAtomCirqResult: + """Build a :class:`NeutralAtomCirqResult` from raw simulation output. + + :param raw_shots: The raw per-shot results from ``NeutralAtomDevice.simulate()``. + :param meas_dict: ``{key: [qubit_indices]}`` as returned by :func:`measurement_dict`. + :param param_resolver: Cirq parameter resolver for the circuit. Defaults to the + empty resolver. + :return: A :class:`NeutralAtomCirqResult` whose ``measurements`` field contains only + loss-free shots, and whose ``raw_shots`` / ``raw_measurements()`` retain + all shots including those with loss markers. + :rtype: NeutralAtomCirqResult + """ + if param_resolver is None: + param_resolver = cirq.ParamResolver({}) + + normalized = meas_dict or {"m": []} + shots_by_key = _shots_to_rows(raw_shots, normalized) + measurement_keys = list(normalized.keys()) + + measurements: Dict[str, np.ndarray] = {} + for key in measurement_keys: + rows = shots_by_key.get(key, []) + if not rows: + measurements[key] = np.zeros((0, 0), dtype=np.int8) + else: + measurements[key] = np.asarray(rows, dtype=np.int8) + + return NeutralAtomCirqResult( + params=param_resolver, + measurements=measurements, + raw_shots=raw_shots, + measurement_dict=normalized, + ) diff --git a/source/qdk_package/qdk/code/__init__.py b/source/qdk_package/qdk/code/__init__.py new file mode 100644 index 0000000000..695b54fb63 --- /dev/null +++ b/source/qdk_package/qdk/code/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +Code module that receives any user-defined Q# callables as Python functions. +""" diff --git a/source/qdk_package/qdk/code/__init__.pyi b/source/qdk_package/qdk/code/__init__.pyi new file mode 100644 index 0000000000..50d3523caa --- /dev/null +++ b/source/qdk_package/qdk/code/__init__.pyi @@ -0,0 +1,4 @@ +from typing import Any + +# This helps Pyright understand that this module may have dynamic attributes. +def __getattr__(name: str) -> Any: ... diff --git a/source/qdk_package/qdk/estimator.py b/source/qdk_package/qdk/estimator.py deleted file mode 100644 index 2bec068820..0000000000 --- a/source/qdk_package/qdk/estimator.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""Resource estimation utilities for the Q# ecosystem. - -This module re-exports all public symbols from [qsharp.estimator](:mod:`qsharp.estimator`), -making them available under the ``qdk.estimator`` namespace. It provides -classes for configuring and interpreting Microsoft Resource Estimator jobs. - -Key exports: - -- :class:`~qsharp.estimator.EstimatorParams` — top-level input parameters for a resource estimation job. -- :class:`~qsharp.estimator.EstimatorResult` — result container with formatted tables and diagrams. -- :class:`~qsharp.estimator.LogicalCounts` — pre-calculated logical resource counts for physical estimation. -- :class:`~qsharp.estimator.QubitParams`, :class:`~qsharp.estimator.QECScheme` — predefined model name constants. -- :class:`~qsharp.estimator.EstimatorQubitParams`, :class:`~qsharp.estimator.EstimatorQecScheme` — custom model configuration. -- :class:`~qsharp.estimator.ErrorBudgetPartition` — budget and constraint settings. - -For full API documentation see [qsharp.estimator](:mod:`qsharp.estimator`). -""" - -from qsharp.estimator import * # pyright: ignore[reportWildcardImportFromLibrary] diff --git a/source/qdk_package/qdk/estimator/__init__.py b/source/qdk_package/qdk/estimator/__init__.py new file mode 100644 index 0000000000..ef870f3dad --- /dev/null +++ b/source/qdk_package/qdk/estimator/__init__.py @@ -0,0 +1,36 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from ._estimator import ( + EstimatorError, + LogicalCounts, + EstimatorResult, + QubitParams, + QECScheme, + MeasurementErrorRate, + EstimatorQubitParams, + EstimatorQecScheme, + ProtocolSpecificDistillationUnitSpecification, + DistillationUnitSpecification, + ErrorBudgetPartition, + EstimatorConstraints, + EstimatorInputParamsItem, + EstimatorParams, +) + +__all__ = [ + "EstimatorError", + "LogicalCounts", + "EstimatorResult", + "QubitParams", + "QECScheme", + "MeasurementErrorRate", + "EstimatorQubitParams", + "EstimatorQecScheme", + "ProtocolSpecificDistillationUnitSpecification", + "DistillationUnitSpecification", + "ErrorBudgetPartition", + "EstimatorConstraints", + "EstimatorInputParamsItem", + "EstimatorParams", +] diff --git a/source/qdk_package/qdk/estimator/_estimator.py b/source/qdk_package/qdk/estimator/_estimator.py new file mode 100644 index 0000000000..2349862420 --- /dev/null +++ b/source/qdk_package/qdk/estimator/_estimator.py @@ -0,0 +1,1180 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +import re +from typing import Any, Dict, List, Optional, Union +from dataclasses import dataclass, field +from .._native import physical_estimates + +import json + +try: + # Both markdown and mdx_math (from python-markdown-math) must be present for our markdown + # rendering logic to work. If either is missing, we'll fall back to plain text. + import markdown + import mdx_math + + has_markdown = True +except ImportError: + has_markdown = False + + +class EstimatorError(BaseException): + """ + An error returned from the resource estimation. + """ + + def __init__(self, code: str, message: str): + self.message = f"Error estimating resources ({code}):\n{message}" + self.code = code + + def __str__(self): + return self.message + + +@dataclass +class AutoValidatingParams: + """ + A helper class for target parameters. + + It has a function as_dict that automatically extracts a dictionary from + the class' fields. They are added to the result dictionary if their value + is not None, the key is automatically transformed from Python snake case + to camel case, and if validate is True and if the field has a validation + function, the field is validated beforehand. + """ + + def as_dict(self, validate=True): + result = {} + + for name, field in self.__dataclass_fields__.items(): + field_value = self.__getattribute__(name) + if field_value is not None: + # validate field? + if validate and "validate" in field.metadata: + func = field.metadata["validate"] + # check for indirect call (like in @staticmethod) + if hasattr(func, "__func__"): + func = func.__func__ + func(name, field_value) + + # translate field name to camel case + s = re.sub(r"(_|-)+", " ", name).title().replace(" ", "") + attribute = "".join([s[0].lower(), s[1:]]) + result[attribute] = field_value + + if validate: + self.post_validation(result) + + return result + + def post_validation(self, result): + """ + A function that is called after all individual fields have been + validated, but before the result is returned. + + Here result is the current dictionary. + """ + pass + + +def validating_field(validation_func, default=None): + """ + A helper method to declare field for an AutoValidatingParams data class. + """ + return field(default=default, metadata={"validate": validation_func}) + + +class QubitParams: + """ + Predefined qubit model name constants for use with :class:`EstimatorQubitParams`. + + Pass one of these string constants as the ``name`` field to select a built-in + qubit model for resource estimation. + """ + + GATE_US_E3 = "qubit_gate_us_e3" + GATE_US_E4 = "qubit_gate_us_e4" + GATE_NS_E3 = "qubit_gate_ns_e3" + GATE_NS_E4 = "qubit_gate_ns_e4" + MAJ_NS_E4 = "qubit_maj_ns_e4" + MAJ_NS_E6 = "qubit_maj_ns_e6" + + +class QECScheme: + """ + Predefined quantum error correction scheme name constants for use with + :class:`EstimatorQecScheme`. + + Pass one of these string constants as the ``name`` field to select a + built-in QEC scheme for resource estimation. + """ + + SURFACE_CODE = "surface_code" + FLOQUET_CODE = "floquet_code" + + +def _check_error_rate(name, value): + if value <= 0.0 or value >= 1.0: + raise ValueError(f"{name} must be between 0 and 1") + + +def _check_error_rate_or_process_and_readout(name, value): + if value is None: + return + + if isinstance(value, float): + _check_error_rate(name, value) + return + + if not isinstance(value, MeasurementErrorRate): + raise ValueError( + f"{name} must be either a float or " + "MeasurementErrorRate with two fields: 'process' and 'readout'" + ) + + +def check_time(name, value): + pat = r"^(\+?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\s*(s|ms|μs|µs|us|ns)$" + if re.match(pat, value) is None: + raise ValueError( + f"{name} is not a valid time string; use a " "suffix s, ms, us, or ns" + ) + + +@dataclass +class MeasurementErrorRate(AutoValidatingParams): + """ + Measurement error rate specified separately for process and readout errors. + + Used as the value of ``one_qubit_measurement_error_rate`` or + ``two_qubit_joint_measurement_error_rate`` in :class:`EstimatorQubitParams` + when process and readout error rates differ. + + :param process: Error rate during the measurement process. Must be in ``(0, 1)``. + :param readout: Error rate during readout. Must be in ``(0, 1)``. + """ + + process: float = field(metadata={"validate": _check_error_rate}) + readout: float = field(metadata={"validate": _check_error_rate}) + + +@dataclass +class EstimatorQubitParams(AutoValidatingParams): + """ + Physical qubit parameters for resource estimation. + + Specify a built-in qubit model by setting ``name`` to one of the + :class:`QubitParams` constants, or fully define a custom model by setting + ``instruction_set`` and all relevant timing and error-rate fields. + """ + + @staticmethod + def check_instruction_set(name, value): + if value not in [ + "gate-based", + "gate_based", + "GateBased", + "gateBased", + "Majorana", + "majorana", + ]: + raise ValueError(f"{name} must be GateBased or Majorana") + + name: Optional[str] = None + instruction_set: Optional[str] = validating_field(check_instruction_set) + one_qubit_measurement_time: Optional[str] = validating_field(check_time) + two_qubit_joint_measurement_time: Optional[str] = validating_field(check_time) + one_qubit_gate_time: Optional[str] = validating_field(check_time) + two_qubit_gate_time: Optional[str] = validating_field(check_time) + t_gate_time: Optional[str] = validating_field(check_time) + one_qubit_measurement_error_rate: Union[None, float, MeasurementErrorRate] = ( + validating_field(_check_error_rate_or_process_and_readout) + ) + two_qubit_joint_measurement_error_rate: Union[None, float, MeasurementErrorRate] = ( + validating_field(_check_error_rate_or_process_and_readout) + ) + one_qubit_gate_error_rate: Optional[float] = validating_field(_check_error_rate) + two_qubit_gate_error_rate: Optional[float] = validating_field(_check_error_rate) + t_gate_error_rate: Optional[float] = validating_field(_check_error_rate) + idle_error_rate: Optional[float] = validating_field(_check_error_rate) + + _default_models = [ + QubitParams.GATE_US_E3, + QubitParams.GATE_US_E4, + QubitParams.GATE_NS_E3, + QubitParams.GATE_NS_E4, + QubitParams.MAJ_NS_E4, + QubitParams.MAJ_NS_E6, + ] + _gate_based = ["gate-based", "gate_based", "GateBased", "gateBased"] + _maj_based = ["Majorana", "majorana"] + + def post_validation(self, result): + # check whether all fields have been specified in case a custom qubit + # model is specified + custom = result != {} and ( + self.name is None or self.name not in self._default_models + ) + + # no further validation needed for non-custom models + if not custom: + return + + # instruction set must be set + if self.instruction_set is None: + raise LookupError( + "instruction_set must be set for custom qubit " "parameters" + ) + + # NOTE at this point, we know that instruction set must have valid + # value + if self.one_qubit_measurement_time is None: + raise LookupError("one_qubit_measurement_time must be set") + if self.one_qubit_measurement_error_rate is None: + raise LookupError("one_qubit_measurement_error_rate must be set") + + # this only needs to be checked for gate based qubits + if self.instruction_set in self._gate_based: + if self.one_qubit_gate_time is None: + raise LookupError("one_qubit_gate_time must be set") + + def as_dict(self, validate=True) -> Dict[str, Any]: + qubit_params = super().as_dict(validate) + if len(qubit_params) != 0: + if isinstance(self.one_qubit_measurement_error_rate, MeasurementErrorRate): + qubit_params["oneQubitMeasurementErrorRate"] = ( + self.one_qubit_measurement_error_rate.as_dict(validate) + ) + + if isinstance( + self.two_qubit_joint_measurement_error_rate, MeasurementErrorRate + ): + qubit_params["twoQubitJointMeasurementErrorRate"] = ( + self.two_qubit_joint_measurement_error_rate.as_dict(validate) + ) + + return qubit_params + + +@dataclass +class EstimatorQecScheme(AutoValidatingParams): + """ + Quantum error correction scheme parameters for resource estimation. + + Specify a built-in scheme by setting ``name`` to one of the + :class:`QECScheme` constants, or define a custom scheme by setting + the threshold and code-distance parameters directly. + """ + + name: Optional[str] = None + error_correction_threshold: Optional[float] = validating_field(_check_error_rate) + crossing_prefactor: Optional[float] = None + distance_coefficient_power: Optional[int] = None + logical_cycle_time: Optional[str] = None + physical_qubits_per_logical_qubit: Optional[str] = None + max_code_distance: Optional[int] = None + + +@dataclass +class ProtocolSpecificDistillationUnitSpecification(AutoValidatingParams): + """ + Protocol-specific specification for a magic-state distillation unit. + + Defines the number of physical qubits and the duration (in logical cycle + time units) for one round of distillation under a specific QEC code. + """ + + num_unit_qubits: Optional[int] = None + duration_in_qubit_cycle_time: Optional[int] = None + + def post_validation(self, result): + if self.num_unit_qubits is None: + raise LookupError("num_unit_qubits must be set") + + if self.duration_in_qubit_cycle_time is None: + raise LookupError("duration_in_qubit_cycle_time must be set") + + +@dataclass +class DistillationUnitSpecification(AutoValidatingParams): + """ + Specification for a magic-state distillation unit. + + Either select a built-in unit by setting ``name``, or define a custom unit + by providing ``num_input_ts``, ``num_output_ts``, ``failure_probability_formula``, + ``output_error_rate_formula``, and optionally physical and logical qubit + specifications. + """ + + name: Optional[str] = None + display_name: Optional[str] = None + num_input_ts: Optional[int] = None + num_output_ts: Optional[int] = None + failure_probability_formula: Optional[str] = None + output_error_rate_formula: Optional[str] = None + physical_qubit_specification: Optional[ + ProtocolSpecificDistillationUnitSpecification + ] = None + logical_qubit_specification: Optional[ + ProtocolSpecificDistillationUnitSpecification + ] = None + logical_qubit_specification_first_round_override: Optional[ + ProtocolSpecificDistillationUnitSpecification + ] = None + + def has_custom_specification(self): + return ( + self.display_name is not None + or self.num_input_ts is not None + or self.num_output_ts is not None + or self.failure_probability_formula is not None + or self.output_error_rate_formula is not None + or self.physical_qubit_specification is not None + or self.logical_qubit_specification is not None + or self.logical_qubit_specification_first_round_override is not None + ) + + def has_predefined_name(self): + return self.name is not None + + def post_validation(self, result): + if not self.has_custom_specification() and not self.has_predefined_name(): + raise LookupError( + "name must be set or custom specification must be provided" + ) + + if self.has_custom_specification() and self.has_predefined_name(): + raise LookupError( + "If predefined name is provided, " + "custom specification is not allowed. " + "Either remove name or remove all other " + "specification of the distillation unit" + ) + + if self.has_predefined_name(): + return # all other validation is on the server side + + if self.num_input_ts is None: + raise LookupError("num_input_ts must be set") + + if self.num_output_ts is None: + raise LookupError("num_output_ts must be set") + + if self.failure_probability_formula is None: + raise LookupError("failure_probability_formula must be set") + + if self.output_error_rate_formula is None: + raise LookupError("output_error_rate_formula must be set") + + if self.physical_qubit_specification is not None: + self.physical_qubit_specification.post_validation(result) + + if self.logical_qubit_specification is not None: + self.logical_qubit_specification.post_validation(result) + + if self.logical_qubit_specification_first_round_override is not None: + self.logical_qubit_specification_first_round_override.post_validation( + result + ) + + def as_dict(self, validate=True) -> Dict[str, Any]: + specification_dict = super().as_dict(validate) + if len(specification_dict) != 0: + if self.physical_qubit_specification is not None: + physical_qubit_specification_dict = ( + self.physical_qubit_specification.as_dict(validate) + ) + if len(physical_qubit_specification_dict) != 0: + specification_dict["physicalQubitSpecification"] = ( + physical_qubit_specification_dict + ) + + if self.logical_qubit_specification is not None: + logical_qubit_specification_dict = ( + self.logical_qubit_specification.as_dict(validate) + ) + if len(logical_qubit_specification_dict) != 0: + specification_dict["logicalQubitSpecification"] = ( + logical_qubit_specification_dict + ) + + if self.logical_qubit_specification_first_round_override is not None: + logical_qubit_specification_first_round_override_dict = ( + self.logical_qubit_specification_first_round_override.as_dict( + validate + ) + ) + if len(logical_qubit_specification_first_round_override_dict) != 0: + specification_dict[ + "logicalQubitSpecificationFirstRoundOverride" + ] = logical_qubit_specification_first_round_override_dict + + return specification_dict + + +@dataclass +class ErrorBudgetPartition(AutoValidatingParams): + """ + Partition of the total error budget across algorithm components. + + The three fields must sum to the overall error budget. Defaults to equal + thirds of ``0.001`` (i.e. each component gets ``~3.33e-4``). + + :param logical: Budget allocated to logical errors in the algorithm. + :param t_states: Budget allocated to T-state distillation errors. + :param rotations: Budget allocated to rotation synthesis errors. + """ + + logical: float = 0.001 / 3 + t_states: float = 0.001 / 3 + rotations: float = 0.001 / 3 + + +@dataclass +class EstimatorConstraints(AutoValidatingParams): + """ + Optional runtime and resource constraints for resource estimation. + + At most one of ``max_duration`` or ``max_physical_qubits`` may be set + simultaneously. + """ + + @staticmethod + def at_least_one(name, value): + if value < 1: + raise ValueError(f"{name} must be at least 1") + + logical_depth_factor: Optional[float] = validating_field(at_least_one) + max_t_factories: Optional[int] = validating_field(at_least_one) + max_duration: Optional[int] = validating_field(check_time) + max_physical_qubits: Optional[int] = validating_field(at_least_one) + + def post_validation(self, result): + if self.max_duration is not None and self.max_physical_qubits is not None: + raise LookupError( + "Both duration and number of physical qubits constraints are provided, but only one is allowed at a time." + ) + + +class EstimatorInputParamsItem: + """ + Input parameters for a single resource estimation job. + + Contains qubit model, QEC scheme, distillation unit specifications, constraints, + and error budget settings. Used directly for single-point estimation or as a + base class for batching via :class:`EstimatorParams`. + """ + + def __init__(self): + super().__init__() + + self.qubit_params: EstimatorQubitParams = EstimatorQubitParams() + self.qec_scheme: EstimatorQecScheme = EstimatorQecScheme() + self.distillation_unit_specifications = ( + [] + ) # type: List[DistillationUnitSpecification] + self.constraints: EstimatorConstraints = EstimatorConstraints() + self.error_budget: Optional[Union[float, ErrorBudgetPartition]] = None + self.estimate_type: Optional[str] = None + + def as_dict(self, validate=True, additional_params=None) -> Dict[str, Any]: + result = {} + + qubit_params = self.qubit_params.as_dict(validate) + if len(qubit_params) != 0: + result["qubitParams"] = qubit_params + elif hasattr(additional_params, "qubit_params"): + qubit_params = additional_params.qubit_params.as_dict(validate) + if len(qubit_params) != 0: + result["qubitParams"] = qubit_params + + qec_scheme = self.qec_scheme.as_dict(validate) + if len(qec_scheme) != 0: + result["qecScheme"] = qec_scheme + elif hasattr(additional_params, "qec_scheme"): + qec_scheme = additional_params.qec_scheme.as_dict(validate) + if len(qec_scheme) != 0: + result["qecScheme"] = qec_scheme + + for specification in self.distillation_unit_specifications: + specification_dict = specification.as_dict(validate) + if len(specification_dict) != 0: + if result.get("distillationUnitSpecifications") is None: + result["distillationUnitSpecifications"] = [] + + result["distillationUnitSpecifications"].append(specification_dict) + if result.get("distillationUnitSpecifications") is not None and hasattr( + additional_params, "distillation_unit_specifications" + ): + for specification in additional_params.distillation_unit_specifications: + specification_dict = specification.as_dict(validate) + if len(specification_dict) != 0: + if result.get("distillationUnitSpecifications") is None: + result["distillationUnitSpecifications"] = [] + + result["distillationUnitSpecifications"].append(specification_dict) + + constraints = self.constraints.as_dict(validate) + if len(constraints) != 0: + result["constraints"] = constraints + elif hasattr(additional_params, "constraints"): + constraints = additional_params.constraints.as_dict(validate) + if len(constraints) != 0: + result["constraints"] = constraints + + if self.error_budget is not None: + if isinstance(self.error_budget, float) or isinstance( + self.error_budget, int + ): + if validate and (self.error_budget <= 0 or self.error_budget >= 1): + message = "error_budget must be value between 0 and 1" + raise ValueError(message) + result["errorBudget"] = self.error_budget + elif isinstance(self.error_budget, ErrorBudgetPartition): + result["errorBudget"] = self.error_budget.as_dict(validate) + elif hasattr(additional_params, "error_budget"): + if isinstance(additional_params.error_budget, float) or isinstance( + additional_params.error_budget, int + ): + if validate and ( + additional_params.error_budget <= 0 + or additional_params.error_budget >= 1 + ): + message = "error_budget must be value between 0 and 1" + raise ValueError(message) + result["errorBudget"] = additional_params.error_budget + elif isinstance(additional_params.error_budget, ErrorBudgetPartition): + result["errorBudget"] = additional_params.error_budget.as_dict(validate) + + if self.estimate_type is not None: + if self.estimate_type not in ["frontier", "singlePoint"]: + raise ValueError( + "estimate_type must be either 'frontier' or 'singlePoint'" + ) + result["estimateType"] = self.estimate_type + + return result + + +class EstimatorParams(EstimatorInputParamsItem): + """ + Top-level input parameters for the Microsoft Resource Estimator. + + Extends :class:`~qsharp.estimator.EstimatorInputParamsItem` with support for batching: pass + ``num_items`` to create a batching job where each item can override the + top-level parameters. + + :param num_items: Number of batching items. If ``None``, creates a + single-point estimation job. + :type num_items: int + """ + + MAX_NUM_ITEMS: int = 1000 + + def __init__(self, num_items: Optional[int] = None): + EstimatorInputParamsItem.__init__(self) + + if num_items is not None: + self.has_items = True + if num_items <= 0 or num_items > self.MAX_NUM_ITEMS: + raise ValueError( + "num_items must be a positive value less or equal to " + f"{self.MAX_NUM_ITEMS}" + ) + self._items = [EstimatorInputParamsItem() for _ in range(num_items)] + else: + self.has_items = False + + @property + def items(self) -> List: + if self.has_items: + return self._items + else: + raise Exception( + "Cannot access items in a non-batching job, call " + "make_params with num_items parameter" + ) + + def as_dict(self, validate=True) -> Dict[str, Any]: + """ + Constructs a dictionary from the input params. + + For batching jobs, top-level entries are merged into item entries. + Item entries have priority in case they are specified. + """ + + # initialize result and set type hint + result: Dict[str, Any] = EstimatorInputParamsItem.as_dict(self, validate) + + if self.has_items: + result["items"] = [item.as_dict(validate, self) for item in self._items] + # In case of batching, no need to stop if failing an item + result["resumeAfterFailedItem"] = True + + return result + + +class HTMLWrapper: + """ + Simple HTML wrapper to expose _repr_html_ for Jupyter clients. + """ + + def __init__(self, content: str): + self.content = content + + def _repr_html_(self): + return self.content + + +class EstimatorResult(dict): + """ + Microsoft Resource Estimator result. + + The class represents simple resource estimation results as well as batching + resource estimation results. The latter can be indexed by an integer index to + access an individual result from the batching result. + """ + + MAX_DEFAULT_ITEMS_IN_TABLE = 5 + + def __init__(self, data: Union[Dict, List]): + self._error = None + + if isinstance(data, list) and len(data) == 1: + data = data[0] + if not EstimatorResult._is_succeeded(data): + raise EstimatorError(data["code"], data["message"]) + + if isinstance(data, dict): + self._data = data + super().__init__(data) + + self._is_simple = True + if EstimatorResult._is_succeeded(self): + self._repr = self._item_result_table() + self.summary = HTMLWrapper(self._item_result_summary_table()) + self.diagram = EstimatorResultDiagram(self.data().copy()) + else: + self._error = EstimatorError(data["code"], data["message"]) + + elif isinstance(data, list): + super().__init__( + {idx: EstimatorResult(item_data) for idx, item_data in enumerate(data)} + ) + + self._data = data + self._is_simple = False + num_items = len(data) + self._repr = "" + if num_items > self.MAX_DEFAULT_ITEMS_IN_TABLE: + self._repr += ( + "

Info: The overview table is " + "cut off after " + f"{self.MAX_DEFAULT_ITEMS_IN_TABLE} items. If " + "you want to see all items, suffix the result " + "variable with [:]

" + ) + num_items = self.MAX_DEFAULT_ITEMS_IN_TABLE + self._repr += self._batch_result_table(range(num_items)) + + # Add plot function for batching jobs + self.plot = self._plot + self.summary_data_frame = self._summary_data_frame + + @staticmethod + def _is_succeeded(data): + return "status" in data and data["status"] == "success" + + def data(self, idx: Optional[int] = None) -> Any: + """ + Returns raw data of the result object. + + In case of a batching job, you can pass an index to access a specific + item. + """ + if idx is None: + return self._data + elif not self._is_simple: + return self._data[idx] + else: + msg = "Cannot pass parameter 'idx' to 'data' for non-batching job" + raise ValueError(msg) + + @property + def error(self) -> Optional[EstimatorError]: + """ + Returns the error object if the result is an error. + """ + return self._error + + @property + def logical_counts(self): + """ + Returns the logical counts of the result. + """ + if self._is_simple: + return LogicalCounts(self.data()["logicalCounts"]) + else: + return LogicalCounts(self.data(0)["logicalCounts"]) + + def _repr_html_(self): + """ + HTML table representation of the result. + """ + if self._error: + raise self._error + return self._repr + + def __getitem__(self, key): + """ + If the result represents a batching job and key is a slice, a + side-by-side table comparison is shown for the indexes represented by + the slice. + + Otherwise, the key is used to access the raw data directly. + """ + if isinstance(key, slice): + if self._is_simple: + msg = "Cannot pass slice to '__getitem__' for non-batching job" + raise ValueError(msg) + return HTMLWrapper(self._batch_result_table(range(len(self))[key])) + else: + if super().__contains__(key): + return super().__getitem__(key) + elif super().__contains__("frontierEntries"): + return super().__getitem__("frontierEntries")[0].__getitem__(key) + else: + raise KeyError(key) + + def _plot(self, **kwargs): + """ + Plots all result items in a space time plot, where the x-axis shows + total runtime, and the y-axis shows total number of physical qubits. + Both axes are in log-scale. + + :param **kwargs: Common options: + + - ``labels`` (list): List of labels for the legend. Defaults to ``[]``. + """ + try: + import matplotlib.pyplot as plt + except ImportError: + raise ImportError( + "Missing optional 'matplotlib' dependency. To install run: " + "pip install matplotlib" + ) + + labels = kwargs.pop("labels", []) + + [xs, ys] = zip( + *[ + ( + self.data(i)["physicalCounts"]["runtime"], + self.data(i)["physicalCounts"]["physicalQubits"], + ) + for i in range(len(self)) + ] + ) + + _ = plt.figure(figsize=(15, 8)) + + plt.ylabel("Physical qubits") + plt.xlabel("Runtime") + plt.loglog() + for i, (x, y) in enumerate(zip(xs, ys)): + if isinstance(labels, list) and i < len(labels): + label = labels[i] + else: + label = str(i) + plt.scatter(x=[x], y=[y], label=label, marker="os+x"[i % 4]) + + nsec = 1 + usec = 1e3 * nsec + msec = 1e3 * usec + sec = 1e3 * msec + min = 60 * sec + hour = 60 * min + day = 24 * hour + week = 7 * day + month = 31 * day + year = 365 * month + decade = 10 * year + century = 10 * decade + + time_units = [ + nsec, + usec, + msec, + sec, + min, + hour, + day, + week, + month, + year, + decade, + century, + ] + time_labels = [ + "1 ns", + "1 µs", + "1 ms", + "1 s", + "1 min", + "1 hour", + "1 day", + "1 week", + "1 month", + "1 year", + "1 decade", + "1 century", + ] + + cutoff = ( + next( + (i for i, x in enumerate(time_units) if x > max(xs)), + len(time_units) - 1, + ) + + 1 + ) + + plt.xticks(time_units[0:cutoff], time_labels[0:cutoff], rotation=90) + plt.legend(loc="upper left") + plt.show() + + @property + def json(self): + """ + Returns a JSON representation of the resource estimation result data. + """ + if not hasattr(self, "_json"): + import json + + self._json = json.dumps(self._data) + + return self._json + + def _summary_data_frame(self, **kwargs): + try: + import pandas as pd + except ImportError: + raise ImportError( + "Missing optional 'pandas' dependency. To install run: " + "pip install pandas" + ) + + # get labels or use default value, then extend with missing elements, + # and truncate extra elements + labels = kwargs.pop("labels", []) + labels.extend(range(len(labels), len(self))) + labels = labels[: len(self)] + + def get_row(result): + if EstimatorResult._is_succeeded(result): + formatted = result["physicalCountsFormatted"] + + return ( + formatted["algorithmicLogicalQubits"], + formatted["logicalDepth"], + formatted["numTstates"], + result["logicalQubit"]["codeDistance"], + formatted["numTfactories"], + formatted["physicalQubitsForTfactoriesPercentage"], + formatted["physicalQubits"], + formatted["rqops"], + formatted["runtime"], + ) + else: + return ["No solution found"] * 9 + + data = [get_row(self.data(index)) for index in range(len(self))] + columns = [ + "Logical qubits", + "Logical depth", + "T states", + "Code distance", + "T factories", + "T factory fraction", + "Physical qubits", + "rQOPS", + "Physical runtime", + ] + return pd.DataFrame(data, columns=columns, index=labels) + + def _item_result_table(self): + html = "" + + if has_markdown: + md = markdown.Markdown(extensions=["mdx_math"]) + for group in self["reportData"]["groups"]: + html += f""" +
+ + {group['title']} + + """ + for entry in group["entries"]: + val = self + for key in entry["path"].split("/"): + if key not in val and "frontierEntries" in val: + val = val["frontierEntries"][0] + val = val[key] + if has_markdown: + explanation = md.convert(entry["explanation"]) + else: + explanation = entry["explanation"] + html += f""" + + + + + + """ + html += "
{entry['label']}{val} + {entry["description"]} +
+ {explanation} +
" + + html += f'
Assumptions
    ' + if has_markdown: + for assumption in self["reportData"]["assumptions"]: + html += f"
  • {md.convert(assumption)}
  • " + html += "
" + + return html + + def _item_result_summary_table(self): + html = """ + """ + + if has_markdown: + md = markdown.Markdown(extensions=["mdx_math"]) + for group in self["reportData"]["groups"]: + html += f""" +
+ + {group['title']} + + """ + for entry in group["entries"]: + val = self + for key in entry["path"].split("/"): + val = val[key] + if has_markdown: + explanation = md.convert(entry["explanation"]) + else: + explanation = entry["explanation"] + html += f""" + + + + + + """ + html += "
{explanation}{entry['label']}{val}{entry["description"]}
" + + html += f"
Assumptions
    " + if has_markdown: + for assumption in self["reportData"]["assumptions"]: + html += f"
  • {md.convert(assumption)}
  • " + html += "
" + + return html + + def _batch_result_table(self, indices): + succeeded_item_indices = [ + i for i in indices if EstimatorResult._is_succeeded(self[i]) + ] + if len(succeeded_item_indices) == 0: + print("None of the jobs succeeded") + return "" + + first_succeeded_item_index = succeeded_item_indices[0] + + html = "" + + if has_markdown: + md = markdown.Markdown(extensions=["mdx_math"]) + + item_headers = "".join(f"{i}" for i in indices) + + for group_index, group in enumerate( + self[first_succeeded_item_index]["reportData"]["groups"] + ): + html += f""" +
+ + {group['title']} + + + {item_headers}""" + + visited_entries = set() + + for entry in [ + entry + for index in succeeded_item_indices + for entry in self[index]["reportData"]["groups"][group_index]["entries"] + ]: + label = entry["label"] + if label in visited_entries: + continue + visited_entries.add(label) + + html += f""" + + + """ + + for index in indices: + val = self[index] + if index in succeeded_item_indices: + for key in entry["path"].split("/"): + if key in val: + val = val[key] + else: + val = "N/A" + break + else: + val = "N/A" + html += f""" + + """ + + html += """ + + """ + html += "
Item
{label}{val}
" + + html += f'
Assumptions
    ' + if has_markdown: + for assumption in self[0]["reportData"]["assumptions"]: + html += f"
  • {md.convert(assumption)}
  • " + html += "
" + + return html + + @staticmethod + def _is_succeeded(obj): + return "status" in obj and obj["status"] == "success" + + +class EstimatorResultDiagram: + def __init__(self, data): + data.pop("reportData") + self.data_json = json.dumps(data).replace(" ", "") + self.vis_lib = "https://cdn-aquavisualization-prod.azureedge.net/resource-estimation/index.js" + self.space = HTMLWrapper(self._space_diagram()) + self.time = HTMLWrapper(self._time_diagram()) + + def _space_diagram(self): + html = f""" + + """ + return html + + def _time_diagram(self): + html = f""" + + """ + return html + + +class LogicalCounts(dict): + """ + Microsoft Resource Estimator Logical Counts. + + The class represents logical counts that can be used as input to physical estimation of resources + in the Microsoft Resource Estimator. + """ + + def __init__(self, data: Dict): + self._data = {} + self._data["numQubits"] = data.get("numQubits", 0) + self._data["tCount"] = data.get("tCount", 0) + self._data["rotationCount"] = data.get("rotationCount", 0) + self._data["rotationDepth"] = data.get("rotationDepth", 0) + self._data["cczCount"] = data.get("cczCount", 0) + self._data["ccixCount"] = data.get("ccixCount", 0) + self._data["measurementCount"] = data.get("measurementCount", 0) + if "numComputeQubits" in data: + self._data["numComputeQubits"] = data["numComputeQubits"] + if "readFromMemoryCount" in data: + self._data["readFromMemoryCount"] = data["readFromMemoryCount"] + if "writeToMemoryCount" in data: + self._data["writeToMemoryCount"] = data["writeToMemoryCount"] + super().__init__(self._data) + + @property + def json(self): + """ + Returns a JSON representation of the logical counts. + """ + if not hasattr(self, "_json"): + import json + + self._json = json.dumps(self._data) + + return self._json + + def estimate( + self, params: Union[dict, List, EstimatorParams] = None + ) -> EstimatorResult: + """ + Estimates resources for the current logical counts, using the + Parallel Synthesis Sequential Pauli Computation (PSSPC) layout method. + + :param params: The parameters to configure physical estimation. + :return: The estimated resources. + :rtype: EstimatorResult + """ + if params is None: + params = [{}] + elif isinstance(params, EstimatorParams): + if params.has_items: + params = params.as_dict()["items"] + else: + params = [params.as_dict()] + elif isinstance(params, dict): + params = [params] + return EstimatorResult( + json.loads(physical_estimates(self.json, json.dumps(params))) + ) diff --git a/source/qdk_package/qdk/noisy_simulator/__init__.py b/source/qdk_package/qdk/noisy_simulator/__init__.py new file mode 100644 index 0000000000..c150ec9a45 --- /dev/null +++ b/source/qdk_package/qdk/noisy_simulator/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from ._noisy_simulator import ( + NoisySimulatorError, + Operation, + Instrument, + DensityMatrixSimulator, + StateVectorSimulator, +) + +__all__ = [ + "NoisySimulatorError", + "Operation", + "Instrument", + "DensityMatrixSimulator", + "StateVectorSimulator", +] diff --git a/source/qdk_package/qdk/noisy_simulator/_noisy_simulator.py b/source/qdk_package/qdk/noisy_simulator/_noisy_simulator.py new file mode 100644 index 0000000000..c640d14c85 --- /dev/null +++ b/source/qdk_package/qdk/noisy_simulator/_noisy_simulator.py @@ -0,0 +1,10 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from .._native import ( + NoisySimulatorError, + Operation, + Instrument, + DensityMatrixSimulator, + StateVectorSimulator, +) diff --git a/source/qdk_package/qdk/noisy_simulator/_noisy_simulator.pyi b/source/qdk_package/qdk/noisy_simulator/_noisy_simulator.pyi new file mode 100644 index 0000000000..cd740324b6 --- /dev/null +++ b/source/qdk_package/qdk/noisy_simulator/_noisy_simulator.pyi @@ -0,0 +1,242 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from typing import Optional, List, Any + +class NoisySimulatorError(BaseException): + """ + EXPERIMENTAL: + + An error returned from the Q# noisy simulator. + """ + + ... + +class Operation: + """ + EXPERIMENTAL: + + This struct represents a quantum operation. A quantum operation is a linear + transformation that maps a valid density matrix to another valid density matrices. + """ + + def __init__(self, kraus_operators: Any) -> None: + """ + Construct an operation from a list of Kraus operators. + Matrices must be of dimension 2^k x 2^k, where k is an integer. + + :param kraus_operators: List of Kraus operators. Each operator is a 2D matrix stored as + a list of lists of complex numbers, or a numpy array. + :type kraus_operators: List[List[List[complex]]] + :raises NoisySimulatorError: If the Kraus matrices are ill formed. + """ + ... + + def get_effect_matrix(self) -> List[List[complex]]: + r""" + Returns effect matrix: + $$ (\sum_i K_i^{\dagger} K_i) $$ + where $K_i$ are Kraus operators. + """ + ... + + def get_operation_matrix(self) -> List[List[complex]]: + r""" + Return matrix representation: + $$ \sum_i K_i \otimes K_{i}* $$ + where $K_i$ are Kraus operators. + """ + ... + + def get_kraus_operators(self) -> List[List[List[complex]]]: + """ + Return list of Kraus operators. + """ + ... + + def get_number_of_qubits(self) -> int: + """ + Return the number of qubits that the operation acts on. + """ + +class Instrument: + """ + EXPERIMENTAL: + + An instrument is the means by which we make measurements on a quantum system. + """ + + def __init__(self, operations: List[Operation]) -> None: + """ + Constructs an instrument from a list of operations. + """ + ... + +class DensityMatrix: + """ + EXPERIMENTAL: + + A square complex matrix of size 2^k x 2^k representing the state + of a quantum system. The data is stored in a linear vector for + performance reasons. + """ + + def data(self) -> List[List[complex]]: + """ + Returns a copy of the matrix data. + """ + ... + + def dimension(self) -> int: + """ + Returns the dimension of the matrix. E.g.: if the matrix is + 5 x 5, it returns 5. + """ + ... + + def number_of_qubits(self) -> int: + """ + Returns the number of qubits in the system. + """ + ... + +class DensityMatrixSimulator: + """ + EXPERIMENTAL: + + A quantum circuit simulator using a density matrix. + + If the simulator reaches an invalid state due to a numerical + error, it will raise a `SimulatorException`. + """ + + def __init__(self, number_of_qubits: int, seed: Optional[int]) -> None: + """ + Creates a new `DensityMatrixSimulator`. + """ + ... + + def apply_operation(self, operation: Operation, qubits: List[int]) -> None: + """ + Apply an operation to the given qubit ids. + """ + ... + + def apply_instrument(self, instrument: Instrument, qubits: List[int]) -> None: + """ + Apply non selective evolution to the given qubit ids. + """ + ... + + def sample_instrument(self, instrument: Instrument, qubits: List[int]) -> int: + """ + Performs selective evolution under the given instrument. + Returns the index of the observed outcome. + + Use this method to perform measurements on the quantum system. + """ + + def get_state(self) -> Optional[DensityMatrix]: + """ + Returns the `DensityMatrix` if the simulator is in a valid state, + otherwise returns None. + """ + ... + + def set_state(self, state: DensityMatrix) -> None: + """ + Set state of the quantum system to another `DensityMatrix` of the + same dimensions. + """ + ... + + def set_trace(self, trace: float) -> None: + """ + Set trace of the quantum system. That is, the probability of + finding the quantum system in the current state. The new trace + must be a number between 0 and 1. + """ + ... + +class StateVector: + """ + EXPERIMENTAL: + + A vector representing a pure state of a quantum system. + """ + + def data(self) -> List[complex]: + """ + Returns a copy of the vector data. + """ + ... + + def dimension(self) -> int: + """ + Returns the dimension of the vector. + """ + ... + + def number_of_qubits(self) -> int: + """ + Returns the number of qubits in the system. + """ + ... + +class StateVectorSimulator: + """ + EXPERIMENTAL: + + A quantum circuit simulator using a density matrix. + + If the simulator reaches an invalid state due to a numerical + error, it will raise a `SimulatorException`. + """ + + def __init__(self, number_of_qubits: int, seed: Optional[int]) -> None: + """ + Creates a new `DensityMatrixSimulator`. + """ + ... + + def apply_operation(self, operation: Operation, qubits: List[int]) -> None: + """ + Apply an operation to the given qubit ids. + """ + ... + + def apply_instrument(self, instrument: Instrument, qubits: List[int]) -> None: + """ + Apply non selective evolution to the given qubit ids. + """ + ... + + def sample_instrument(self, instrument: Instrument, qubits: List[int]) -> int: + """ + Performs selective evolution under the given instrument. + Returns the index of the observed outcome. + + Use this method to perform measurements on the quantum system. + """ + + def get_state(self) -> Optional[StateVector]: + """ + Returns the `StateVector` if the simulator is in a valid state, + otherwise returns None. + """ + ... + + def set_state(self, state: StateVector) -> None: + """ + Set state of the quantum system to another `StateVector` of the + same dimensions. + """ + ... + + def set_trace(self, trace: float) -> None: + """ + Set trace of the quantum system. That is, the probability of + finding the quantum system in the current state. The new trace + must be a number between 0 and 1. + """ + ... diff --git a/source/qdk_package/qdk/openqasm.py b/source/qdk_package/qdk/openqasm.py deleted file mode 100644 index c51382c4dc..0000000000 --- a/source/qdk_package/qdk/openqasm.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""OpenQASM interoperability for the Q# ecosystem. - -This module re-exports all public symbols from [qsharp.openqasm](:mod:`qsharp.openqasm`), -making them available under the ``qdk.openqasm`` namespace. It provides -functions for importing, compiling, running, and estimating resources for -OpenQASM 2.0 and 3.0 programs using the local Q# toolchain. - -Key exports: - -- :func:`~qsharp.openqasm.import_openqasm` — parse and import an OpenQASM program into the Q# interpreter. -- :func:`~qsharp.openqasm.run` — execute an OpenQASM program and return shot results. -- :func:`~qsharp.openqasm.estimate` — run the Microsoft Resource Estimator on an OpenQASM program. -- :func:`~qsharp.openqasm.circuit` — synthesize a circuit diagram from an OpenQASM program. - -For full API documentation see [qsharp.openqasm](:mod:`qsharp.openqasm`). -""" - -from qsharp.openqasm import * # pyright: ignore[reportWildcardImportFromLibrary] diff --git a/source/qdk_package/qdk/openqasm/__init__.py b/source/qdk_package/qdk/openqasm/__init__.py new file mode 100644 index 0000000000..5e9d3757a0 --- /dev/null +++ b/source/qdk_package/qdk/openqasm/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from ._circuit import circuit +from ._compile import compile +from ._estimate import estimate +from ._import import import_openqasm +from ._run import run +from .._native import ProgramType, OutputSemantics, QasmError # type: ignore + +__all__ = [ + "circuit", + "compile", + "estimate", + "import_openqasm", + "run", + "ProgramType", + "OutputSemantics", + "QasmError", +] diff --git a/source/qdk_package/qdk/openqasm/_circuit.py b/source/qdk_package/qdk/openqasm/_circuit.py new file mode 100644 index 0000000000..eaed78cba8 --- /dev/null +++ b/source/qdk_package/qdk/openqasm/_circuit.py @@ -0,0 +1,114 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from time import monotonic +from typing import Any, Callable, Dict, Optional, Union +from .._fs import read_file, list_directory, resolve +from .._http import fetch_github +from .._native import circuit_qasm_program # type: ignore +from .._qsharp import ( + get_interpreter, + ipython_helper, + Circuit, + CircuitConfig, + python_args_to_interpreter_args, +) +from .. import telemetry_events + + +def circuit( + source: Optional[Union[str, Callable]] = None, + *args, + **kwargs: Any, +) -> Circuit: + """ + Synthesizes a circuit for an OpenQASM program. Either a program string or + an operation must be provided. + + :param source: An OpenQASM program. Alternatively, a callable can be provided, + which must be an already imported global callable. + :type source: str or Callable + + :param *args: The arguments to pass to the callable, if one is provided. + + :keyword generation_method: The method to use for circuit generation. + :attr:`~qsharp.CircuitGenerationMethod.ClassicalEval` evaluates classical + control flow at circuit generation time. + :attr:`~qsharp.CircuitGenerationMethod.Simulate` runs a full simulation to + trace the circuit. + :attr:`~qsharp.CircuitGenerationMethod.Static` uses partial evaluation and + requires a non-``Unrestricted`` target profile. Defaults to ``None`` which + auto-selects the generation method. + :kwtype generation_method: :class:`~qsharp.CircuitGenerationMethod` + + :keyword max_operations: The maximum number of operations to include in the circuit. + Defaults to ``None`` which means no limit. + :kwtype max_operations: int + + :keyword source_locations: If ``True``, annotates each gate with its source location. + Defaults to ``False``. + :kwtype source_locations: bool + + :keyword group_by_scope: If ``True``, groups operations by their containing scope, such as function declarations or loop blocks. + Defaults to ``True``. + :kwtype group_by_scope: bool + + :keyword prune_classical_qubits: If ``True``, removes qubits that are never used in a quantum + gate (e.g. qubits only used as classical controls). Defaults to ``False``. + :kwtype prune_classical_qubits: bool + + :keyword name: The name of the program. This is used as the entry point for the program. + :kwtype name: str + + :keyword search_path: The optional search path for resolving file references. + :kwtype search_path: str + + :return: The synthesized circuit. + :rtype: :class:`~qsharp._native.Circuit` + :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. + :raises QSharpError: If there is an error evaluating or synthesizing the circuit. + """ + + ipython_helper() + start = monotonic() + telemetry_events.on_circuit_qasm() + + max_operations = kwargs.pop("max_operations", None) + generation_method = kwargs.pop("generation_method", None) + source_locations = kwargs.pop("source_locations", False) + group_by_scope = kwargs.pop("group_by_scope", True) + prune_classical_qubits = kwargs.pop("prune_classical_qubits", False) + config = CircuitConfig( + max_operations=max_operations, + generation_method=generation_method, + source_locations=source_locations, + group_by_scope=group_by_scope, + prune_classical_qubits=prune_classical_qubits, + ) + + if isinstance(source, Callable) and hasattr(source, "__global_callable"): + args = python_args_to_interpreter_args(args) + res = get_interpreter().circuit( + config, callable=source.__global_callable, args=args + ) + else: + # remove any entries from kwargs with a None key or None value + kwargs = {k: v for k, v in kwargs.items() if k is not None and v is not None} + + if "search_path" not in kwargs: + kwargs["search_path"] = "." + + res = circuit_qasm_program( + source, + config, + read_file, + list_directory, + resolve, + fetch_github, + **kwargs, + ) + + durationMs = (monotonic() - start) * 1000 + telemetry_events.on_circuit_qasm_end(durationMs) + + return res diff --git a/source/qdk_package/qdk/openqasm/_compile.py b/source/qdk_package/qdk/openqasm/_compile.py new file mode 100644 index 0000000000..8f34963eb1 --- /dev/null +++ b/source/qdk_package/qdk/openqasm/_compile.py @@ -0,0 +1,100 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from time import monotonic +from typing import Any, Callable, Dict, Optional, Union +from .._fs import read_file, list_directory, resolve +from .._http import fetch_github + +from .._native import ( # type: ignore + compile_qasm_program_to_qir, +) +from .._qsharp import ( + QirInputData, + get_interpreter, + ipython_helper, + TargetProfile, + python_args_to_interpreter_args, +) +from .. import telemetry_events + + +def compile( + source: Union[str, Callable], + *args: Any, + **kwargs: Any, +) -> QirInputData: + """ + Compiles the OpenQASM source code into a program that can be submitted to a + target as QIR (Quantum Intermediate Representation). + Either a full program or a callable with arguments must be provided. + + :param source: An OpenQASM program. Alternatively, a callable can be provided, + which must be an already imported global callable. + :type source: str or Callable + :param *args: The arguments to pass to the callable, if one is provided. + :param **kwargs: Additional keyword arguments for compiling the source program. Common options: + + - ``name`` (str): The name of the circuit. This is used as the entry point for the program. + - ``target_profile`` (TargetProfile): The target profile to use for code generation. + - ``search_path`` (str): The optional search path for resolving file references. + - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. + :return: The compiled program. Use ``str()`` to get the QIR string. + :rtype: QirInputData + :raises ValueError: If ``source`` is neither a string nor a callable with a + ``__global_callable`` attribute. + :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. + :raises QSharpError: If there is an error compiling the program. + + Example: + + .. code-block:: python + from qsharp.openqasm import compile + source = ... + program = compile(source) + with open('myfile.ll', 'w') as file: + file.write(str(program)) + """ + + ipython_helper() + start = monotonic() + + # This doesn't work the same way as the Q# compile function as it doesn't + # have access to the global configuration which has the target profile. + # Instead, we get the target profile from the kwargs and pass it to the telemetry event. + target_profile = str(kwargs.get("target_profile", "unspecified")) + + telemetry_events.on_compile_qasm(target_profile) + + if isinstance(source, Callable) and hasattr(source, "__global_callable"): + args = python_args_to_interpreter_args(args) + ll_str = get_interpreter().qir( + entry_expr=None, callable=source.__global_callable, args=args + ) + elif isinstance(source, str): + # remove any entries from kwargs with a None key or None value + kwargs = {k: v for k, v in kwargs.items() if k is not None and v is not None} + + if "search_path" not in kwargs: + kwargs["search_path"] = "." + if "target_profile" not in kwargs: + kwargs["target_profile"] = TargetProfile.Base + + ll_str = compile_qasm_program_to_qir( + source, + read_file, + list_directory, + resolve, + fetch_github, + **kwargs, + ) + else: + raise ValueError( + "source must be a string or a callable with __global_callable attribute" + ) + res = QirInputData("main", ll_str) + + durationMs = (monotonic() - start) * 1000 + telemetry_events.on_compile_qasm_end(durationMs, target_profile) + + return res diff --git a/source/qdk_package/qdk/openqasm/_estimate.py b/source/qdk_package/qdk/openqasm/_estimate.py new file mode 100644 index 0000000000..755f61eaec --- /dev/null +++ b/source/qdk_package/qdk/openqasm/_estimate.py @@ -0,0 +1,107 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import json +from time import monotonic +from typing import Any, Callable, Dict, List, Optional, Union, cast +from .._fs import read_file, list_directory, resolve +from .._http import fetch_github +from .._native import ( # type: ignore + resource_estimate_qasm_program, +) +from ..estimator import EstimatorParams, EstimatorResult + +from .._qsharp import ( + get_interpreter, + ipython_helper, + python_args_to_interpreter_args, +) +from .. import telemetry_events + + +def estimate( + source: Union[str, Callable], + params: Optional[Union[Dict[str, Any], List, EstimatorParams]] = None, + *args: Any, + **kwargs: Any, +) -> EstimatorResult: + """ + Estimates the resource requirements for executing OpenQASM source code. + Either a full program or a callable with arguments must be provided. + + :param source: An OpenQASM program. Alternatively, a callable can be provided, + which must be an already imported global callable. + :type source: str or Callable + :param params: The parameters to configure estimation. + :type params: Dict, List, or EstimatorParams + :param *args: The arguments to pass to the callable, if one is provided. + :param **kwargs: Additional keyword arguments. Common options: + + - ``name`` (str): The name of the circuit. This is used as the entry point for the program. + Defaults to ``'program'``. + - ``search_path`` (str): The optional search path for resolving imports. + :return: The estimated resources. + :rtype: EstimatorResult + :raises ValueError: If ``source`` is neither a string nor a callable with a + ``__global_callable`` attribute. + :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. + :raises QSharpError: If there is an error compiling the program. + """ + + ipython_helper() + + def _coerce_estimator_params( + params: Optional[ + Union[Dict[str, Any], List[Dict[str, Any]], EstimatorParams] + ] = None, + ) -> List[Dict[str, Any]]: + if params is None: + return [{}] + elif isinstance(params, EstimatorParams): + if params.has_items: + return cast(List[Dict[str, Any]], params.as_dict()["items"]) + else: + return [params.as_dict()] + elif isinstance(params, dict): + return [params] + return params + + params = _coerce_estimator_params(params) + param_str = json.dumps(params) + telemetry_events.on_estimate_qasm() + start = monotonic() + if isinstance(source, Callable) and hasattr(source, "__global_callable"): + args = python_args_to_interpreter_args(args) + res_str = get_interpreter().estimate( + param_str, entry_expr=None, callable=source.__global_callable, args=args + ) + elif isinstance(source, str): + # remove any entries from kwargs with a None key or None value + kwargs = {k: v for k, v in kwargs.items() if k is not None and v is not None} + + if "search_path" not in kwargs: + kwargs["search_path"] = "." + + res_str = resource_estimate_qasm_program( + source, + param_str, + read_file, + list_directory, + resolve, + fetch_github, + **kwargs, + ) + else: + raise ValueError( + "source must be a string or a callable with __global_callable attribute" + ) + res = json.loads(res_str) + + try: + qubits = res[0]["logicalCounts"]["numQubits"] + except (KeyError, IndexError): + qubits = "unknown" + + durationMs = (monotonic() - start) * 1000 + telemetry_events.on_estimate_qasm_end(durationMs, qubits) + return EstimatorResult(res) diff --git a/source/qdk_package/qdk/openqasm/_import.py b/source/qdk_package/qdk/openqasm/_import.py new file mode 100644 index 0000000000..e616ee0d39 --- /dev/null +++ b/source/qdk_package/qdk/openqasm/_import.py @@ -0,0 +1,72 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from time import monotonic +from typing import Any + +from ._ipython import display_or_print +from .._fs import read_file, list_directory, resolve +from .._http import fetch_github +from .._qsharp import ( + get_interpreter, + ipython_helper, +) +from .. import telemetry_events + + +def import_openqasm( + source: str, + **kwargs: Any, +) -> Any: + """ + Imports OpenQASM source code into the active QDK interpreter. By default, import uses ``ProgramType.Operation`` + such that the source becomes a Q# operation in the global namespace with parameters for any declared classical + inputs and parameters for each of the declared qubits, while any explicit or implicit output declarations become + the return type of the operation. + Alternatively, specifying ``ProgramType.File`` will treat the input source as a stand-alone program and create + an operation in the ``qasm_import`` namespace that only takes classical parameters, allocates the required qubits + internally and releases them at the end of the operation. + Finally, using ``ProgramType.Fragments`` executes the provided source in the current interactive interpreter, + defining any declared variables or operations in the current scope and returning the value of the last statement + in the source. + + :param source: An OpenQASM program or fragment. + :type source: str + :param **kwargs: Additional keyword arguments. Common options: + + - ``name`` (str): The name of the program. This is used as the entry point for the program. + - ``search_path`` (str): The optional search path for resolving file references. + - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. + - ``program_type`` (ProgramType): The type of program compilation to perform. + Defaults to ``ProgramType.Operation``. + :return: The value returned by the last statement in the source code. + :rtype: Any + :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. + :raises QSharpError: If there is an error compiling the program. + """ + + ipython_helper() + + telemetry_events.on_import_qasm() + start_time = monotonic() + + # remove any entries from kwargs with a None key or None value + kwargs = {k: v for k, v in kwargs.items() if k is not None and v is not None} + + if "search_path" not in kwargs: + kwargs["search_path"] = "." + + res = get_interpreter().import_qasm( + source, + display_or_print, + read_file, + list_directory, + resolve, + fetch_github, + **kwargs, + ) + + durationMs = (monotonic() - start_time) * 1000 + telemetry_events.on_import_qasm_end(durationMs) + + return res diff --git a/source/qdk_package/qdk/openqasm/_ipython.py b/source/qdk_package/qdk/openqasm/_ipython.py new file mode 100644 index 0000000000..32a11bf82b --- /dev/null +++ b/source/qdk_package/qdk/openqasm/_ipython.py @@ -0,0 +1,24 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from .._native import Output # type: ignore + +_in_jupyter = False +try: + from IPython.display import display + + if get_ipython().__class__.__name__ == "ZMQInteractiveShell": # type: ignore + _in_jupyter = True # Jupyter notebook or qtconsole +except: + pass + + +def display_or_print(output: Output) -> None: + if _in_jupyter: + try: + display(output) + return + except: + # If IPython is not available, fall back to printing the output + pass + print(output, flush=True) diff --git a/source/qdk_package/qdk/openqasm/_run.py b/source/qdk_package/qdk/openqasm/_run.py new file mode 100644 index 0000000000..1b82cb41ff --- /dev/null +++ b/source/qdk_package/qdk/openqasm/_run.py @@ -0,0 +1,195 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from time import monotonic +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union +from .._fs import read_file, list_directory, resolve +from .._http import fetch_github +from .._native import QasmError, Output, run_qasm_program # type: ignore +from .._qsharp import ( + BitFlipNoise, + DepolarizingNoise, + PauliNoise, + PhaseFlipNoise, + ShotResult, + StateDump, + StateDumpData, + get_interpreter, + ipython_helper, + python_args_to_interpreter_args, + NoiseConfig, +) +from .. import telemetry_events +from ._ipython import display_or_print + + +def run( + source: Union[str, Callable], + shots: int = 1024, + *args: Any, + on_result: Optional[Callable[[ShotResult], None]] = None, + save_events: bool = False, + noise: Optional[ + Union[ + Tuple[float, float, float], + PauliNoise, + BitFlipNoise, + PhaseFlipNoise, + DepolarizingNoise, + NoiseConfig, + ] + ] = None, + qubit_loss: Optional[float] = None, + as_bitstring: bool = False, + **kwargs: Any, +) -> List[Any]: + """ + Runs the given OpenQASM program for the given number of shots. + Either a full program or a callable with arguments must be provided. + Each shot uses an independent instance of the simulator. + + :param source: An OpenQASM program. Alternatively, a callable can be provided, + which must be an already imported global callable. + :type source: str or Callable + :param shots: The number of shots to run. Defaults to ``1024``. + :type shots: int + :param *args: The arguments to pass to the callable, if one is provided. + :param on_result: A callback function that will be called with each result. + Only used when a callable is provided. + :type on_result: Callable + :param save_events: If true, the output of each shot will be saved. If false, they will be printed. + Only used when a callable is provided. + :type save_events: bool + :param noise: The noise to use in simulation. + :type noise: Union[Tuple[float, float, float], PauliNoise, BitFlipNoise, PhaseFlipNoise, DepolarizingNoise, NoiseConfig] + :param qubit_loss: The probability of qubit loss in simulation. + :type qubit_loss: float + :param as_bitstring: If true, the result registers will be converted to bitstrings. + :type as_bitstring: bool + :param **kwargs: Additional keyword arguments for compiling the source program. Common options: + + - ``name`` (str): The name of the circuit. This is used as the entry point for the program. + - ``target_profile`` (TargetProfile): The target profile to use for code generation. + - ``search_path`` (str): The optional search path for resolving file references. + - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. + - ``seed`` (int): The seed to use for the random number generator. + :return: A list of results or runtime errors. If ``save_events`` is true, a list of ``ShotResult`` values is returned. + :rtype: List[Any] + :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. + :raises QSharpError: If there is an error interpreting the input. + :raises ValueError: If the number of shots is less than 1. + :raises QasmError: If ``on_result`` or ``save_events`` are used when running OpenQASM programs. + """ + + ipython_helper() + + if shots < 1: + raise ValueError("The number of shots must be greater than 0.") + + telemetry_events.on_run_qasm( + shots, noise=noise is not None, qubit_loss=qubit_loss is not None + ) + start_time = monotonic() + + results: List[ShotResult] = [] + + def on_save_events(output: Output) -> None: + # Append the output to the last shot's output list + results[-1]["events"].append(output) + if output.is_matrix(): + results[-1]["matrices"].append(output) + elif output.is_state_dump(): + dump_data = cast(StateDumpData, output.state_dump()) + results[-1]["dumps"].append(StateDump(dump_data)) + elif output.is_message(): + results[-1]["messages"].append(str(output)) + + callable = None + source_str: Optional[str] = None + if isinstance(source, Callable) and hasattr(source, "__global_callable"): + args = python_args_to_interpreter_args(args) + callable = source.__global_callable + elif isinstance(source, str): + source_str = source + + noise_config = None + if isinstance(noise, NoiseConfig): + noise_config = noise + noise = None + + if callable: + for _ in range(shots): + results.append( + { + "result": None, + "events": [], + "matrices": [], + "dumps": [], + "messages": [], + } + ) + run_results = get_interpreter().run( + source_str, + on_save_events if save_events else display_or_print, + noise_config, + noise, + qubit_loss=qubit_loss, + callable=callable, + args=args, + ) + results[-1]["result"] = run_results + + if on_result: + on_result(results[-1]) + + if not save_events: + # If we are not saving events, we can just return the results + # as a list of results. + results = [result["result"] for result in results] + else: + # running the QASM program in isolation means we can't use the + # interpreter to run the program, so we can't cache the compilation + # results. This means we need to compile the program for each + # shot, or we push the shots into the QASM program and compile it once. + # + # This breaks the output streaming and event saving. + if on_result or save_events: + raise QasmError( + "The `on_result` and `save_events` parameters are not supported when running QASM programs." + ) + + if source_str is None: + raise QasmError( + "source must be a string or a callable with __global_callable attribute" + ) + + # remove any entries from kwargs with a None key or None value + kwargs = {k: v for k, v in kwargs.items() if k is not None and v is not None} + + if "search_path" not in kwargs: + kwargs["search_path"] = "." + + kwargs["shots"] = shots + + results = run_qasm_program( + source_str, + display_or_print, + noise_config, + noise, + qubit_loss, + read_file, + list_directory, + resolve, + fetch_github, + **kwargs, + ) + + durationMs = (monotonic() - start_time) * 1000 + telemetry_events.on_run_qasm_end(durationMs, shots) + + if as_bitstring: + from ._utils import as_bitstring as convert_to_bitstring + + results = convert_to_bitstring(results) + + return results diff --git a/source/qdk_package/qdk/openqasm/_utils.py b/source/qdk_package/qdk/openqasm/_utils.py new file mode 100644 index 0000000000..11bde66b8d --- /dev/null +++ b/source/qdk_package/qdk/openqasm/_utils.py @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from .. import Result + + +def _map_qsharp_result_to_bit(v) -> str: + if isinstance(v, Result): + if v == Result.One: + return "1" + else: + return "0" + return str(v) + + +def _convert_result_arrays_to_bitstrings(obj): + if isinstance(obj, tuple): + return tuple([_convert_result_arrays_to_bitstrings(term) for term in obj]) + elif isinstance(obj, list): + # if all elements are Q# results, convert to bitstring + if all([isinstance(bit, Result) for bit in obj]): + return "".join([_map_qsharp_result_to_bit(bit) for bit in obj]) + return [_convert_result_arrays_to_bitstrings(bit) for bit in obj] + elif isinstance(obj, Result): + if obj == Result.One: + return 1 + else: + return 0 + else: + return obj + + +def as_bitstring(obj): + """ + Convert Q# results to bitstrings. + + :param obj: The object to convert. + :return: The converted object. + """ + return _convert_result_arrays_to_bitstrings(obj) diff --git a/source/qdk_package/qdk/qiskit.py b/source/qdk_package/qdk/qiskit.py deleted file mode 100644 index 8f26479eb2..0000000000 --- a/source/qdk_package/qdk/qiskit.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""Qiskit interoperability for the Q# ecosystem. - -This module re-exports all public symbols from [qsharp.interop.qiskit](:mod:`qsharp.interop.qiskit`), -making them available under the ``qdk.qiskit`` namespace. It provides Qiskit -backends backed by the local Q# simulator and NeutralAtomDevice, allowing -Qiskit circuits to be run locally without any cloud connection. - -Key exports: - -- :class:`~qsharp.interop.qiskit.backends.qsharp_backend.QSharpBackend` -- :class:`~qsharp.interop.qiskit.backends.neutral_atom_backend.NeutralAtomBackend` -- :class:`~qsharp.interop.qiskit.backends.re_backend.ResourceEstimatorBackend` -- :func:`~qsharp.interop.qiskit.estimate` - -For full API documentation see [qsharp.interop.qiskit](:mod:`qsharp.interop.qiskit`). - -Requires the ``qiskit`` extra: ``pip install qdk[qiskit]``. - -Usage: - - from qiskit import QuantumCircuit - from qdk.qiskit import NeutralAtomBackend - - circuit = QuantumCircuit(2, 2) - circuit.h(0) - circuit.cx(0, 1) - circuit.measure([0, 1], [0, 1]) - - backend = NeutralAtomBackend() - job = backend.run(circuit, shots=1000) - result = job.result() - print(result.results[0].data.counts) -""" - -try: - from qsharp.interop.qiskit import * # pyright: ignore[reportWildcardImportFromLibrary] -except Exception as ex: - raise ImportError( - "qdk.qiskit requires the qiskit extra. Install with 'pip install qdk[qiskit]'." - ) from ex diff --git a/source/qdk_package/qdk/qiskit/__init__.py b/source/qdk_package/qdk/qiskit/__init__.py new file mode 100644 index 0000000000..8a422db319 --- /dev/null +++ b/source/qdk_package/qdk/qiskit/__init__.py @@ -0,0 +1,108 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Qiskit interoperability for the Q# ecosystem. + +This module provides Qiskit backends backed by the local Q# simulator and +NeutralAtomDevice, allowing Qiskit circuits to be run locally without any +cloud connection. + +Available backends: + +- :class:`~qsharp.interop.qiskit.QSharpBackend` + Runs any Qiskit ``QuantumCircuit`` using the Q# simulator. Supports + noise-free simulation via QASM export and QIR compilation. + +- :class:`~qsharp.interop.qiskit.NeutralAtomBackend` + Runs Qiskit circuits on the local NeutralAtomDevice simulator. Decomposes + gates to the native ``{Rz, SX, CZ}`` gate set and optionally models + per-gate noise (including qubit loss). Loss shots are exposed separately + from accepted shots in the job result. + +- :class:`~qsharp.interop.qiskit.ResourceEstimatorBackend` + Estimates quantum resources (qubits, T-gates, etc.) for a Qiskit circuit + without running a full simulation. + +- :func:`~qsharp.interop.qiskit.estimate` + Convenience function that runs resource estimation on a Qiskit circuit + and returns an :class:`~qsharp.estimator.EstimatorResult` directly, without + needing to construct a backend or job manually. + +Usage: + + from qiskit import QuantumCircuit + from qsharp.interop.qiskit import NeutralAtomBackend + from qsharp._simulation import NoiseConfig + + circuit = QuantumCircuit(2, 2) + circuit.h(0) + circuit.cx(0, 1) + circuit.measure([0, 1], [0, 1]) + + noise = NoiseConfig() + noise.rz.loss = 0.05 # 5% qubit loss per Rz gate + + backend = NeutralAtomBackend() + job = backend.run(circuit, shots=1000, noise=noise, seed=42) + result = job.result() + print(result.results[0].data.counts) # accepted shots only + print(result.results[0].data.raw_counts) # includes loss shots +""" +from typing import Any, Dict, List, Optional, Union + +from ..estimator import EstimatorParams, EstimatorResult +from .._native import OutputSemantics, ProgramType, QasmError +from .backends import ( + NeutralAtomBackend, + QSharpBackend, + ResourceEstimatorBackend, + QirTarget, +) +from .jobs import QsJob, QsSimJob, ReJob, QsJobSet +from .execution import DetaultExecutor +from qiskit import QuantumCircuit + + +def estimate( + circuit: QuantumCircuit, + params: Optional[Union[Dict[str, Any], List, EstimatorParams]] = None, + **options, +) -> EstimatorResult: + """ + Estimates resources for Qiskit QuantumCircuit. + + :param circuit: The input Qiskit QuantumCircuit object. + :param params: The parameters to configure physical estimation. + :type params: EstimatorParams or dict or list + :param **options: Additional options for the transpiler, exporter, or Qiskit passes + configuration. Defaults to backend config values. Common options: + + - ``optimization_level`` (int): Transpiler optimization level. + - ``basis_gates`` (list): Basis gates for transpilation. + - ``includes`` (list): Include paths for QASM resolution. + - ``search_path`` (str): Search path for resolving file references. + :raises QasmError: If there is an error generating or parsing QASM. + :return: The estimated resources. + :rtype: EstimatorResult + """ + from .._qsharp import ipython_helper + + ipython_helper() + backend = ResourceEstimatorBackend() + job = backend.run(circuit, params=params, **options) + return job.result() + + +__all__ = [ + "NeutralAtomBackend", + "QSharpBackend", + "ResourceEstimatorBackend", + "QirTarget", + "QsJob", + "QsSimJob", + "ReJob", + "QsJobSet", + "estimate", + "EstimatorParams", + "EstimatorResult", +] diff --git a/source/qdk_package/qdk/qiskit/backends/__init__.py b/source/qdk_package/qdk/qiskit/backends/__init__.py new file mode 100644 index 0000000000..9e23c5bd40 --- /dev/null +++ b/source/qdk_package/qdk/qiskit/backends/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from .compilation import Compilation +from .errors import Errors +from .qirtarget import QirTarget +from .neutral_atom_target import NeutralAtomTarget +from .neutral_atom_backend import NeutralAtomBackend +from .qsharp_backend import QSharpBackend +from .re_backend import ResourceEstimatorBackend diff --git a/source/qdk_package/qdk/qiskit/backends/backend_base.py b/source/qdk_package/qdk/qiskit/backends/backend_base.py new file mode 100644 index 0000000000..ea2d401f01 --- /dev/null +++ b/source/qdk_package/qdk/qiskit/backends/backend_base.py @@ -0,0 +1,614 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from abc import ABC, abstractmethod +import datetime +import logging +from time import monotonic +from typing import Dict, Any, List, Optional, Union +from warnings import warn + +from qiskit import transpile +from qiskit.circuit import ( + QuantumCircuit, +) +from qiskit.version import get_version_info + +from qiskit.qasm3.exporter import Exporter +from qiskit.providers import BackendV2, Options +from qiskit.result import Result +from qiskit.transpiler import PassManager +from qiskit.transpiler.passes import RemoveBarriers, RemoveResetInZeroState +from qiskit.transpiler.target import Target + +from .compilation import Compilation +from .errors import Errors +from .qirtarget import QirTarget +from ..execution import DetaultExecutor +from ..jobs import QsJob, QsSimJob, QsJobSet +from ..passes import RemoveDelays +from ... import TargetProfile + +logger = logging.getLogger(__name__) + +_QISKIT_NON_GATE_INSTRUCTIONS = [ + "control_flow", + "if_else", + "switch_case", + "while_loop", + "break", + "continue", + "store", + "for_loop", + "measure", + "reset", +] + +_QISKIT_STDGATES = [ + "p", + "x", + "y", + "z", + "h", + "s", + "sdg", + "t", + "tdg", + "sx", + "rx", + "ry", + "rz", + "cx", + "cy", + "cz", + "cp", + "crx", + "cry", + "crz", + "ch", + "swap", + "ccx", + "cswap", + "cu", + "CX", + "phase", + "cphase", + "id", + "u1", + "u2", + "u3", + "U", +] + + +def filter_kwargs(func, **kwargs) -> Dict[str, Any]: + import inspect + + sig = inspect.signature(func) + supported_args = set(sig.parameters.keys()) + extracted_kwargs = { + k: kwargs.get(k) for k in list(kwargs.keys()) if k in supported_args + } + return extracted_kwargs + + +def get_transpile_options(**kwargs) -> Dict[str, Any]: + args = filter_kwargs(transpile, **kwargs) + return args + + +def get_exporter_options(**kwargs) -> Dict[str, Any]: + return filter_kwargs(Exporter.__init__, **kwargs) + + +class BackendBase(BackendV2, ABC): + """ + A virtual backend for transpiling to a Q# ecosystem compatible format. + """ + + def __init__( + self, + target: Optional[Target] = None, + qiskit_pass_options: Optional[Dict[str, Any]] = None, + transpile_options: Optional[Dict[str, Any]] = None, + qasm_export_options: Optional[Dict[str, Any]] = None, + skip_transpilation: bool = False, + **options, + ): + """ + :param target: The target to use for the backend. + :param qiskit_pass_options: Options for the Qiskit passes. + :param transpile_options: Options for the transpiler. + :param qasm_export_options: Options for the QASM3 exporter. + :param **options: Additional keyword arguments passed to subclasses. + """ + super().__init__( + name="QSharpBackend", + description="A virtual BackendV2 for transpiling to a Q# compatible format.", + backend_version="0.0.1", + ) + + if options is not None: + # we need to rename the seed_simulator to seed. This + # is a convenience for aer users. + # if the user passes in seed_simulator, we will rename it to seed + # but only if the seed field is defined in the backend options. + if "seed_simulator" in options and "seed" in self._options.data: + warn("seed_simulator passed, but field is called seed.") + options["seed"] = options.pop("seed_simulator") + + # updates the options with the fields passed in, if the backend + # doesn't have the field, it will raise an error. + self.set_options(**options) + + self._qiskit_pass_options = Options( + supports_barrier=False, + supports_delay=False, + remove_reset_in_zero_state=True, + ) + self._skip_transpilation = skip_transpilation + + # we need to set the target after the options are set + # so that the target_profile can be used to determine + # which gates/instructions are available + if target is not None: + # update the properties so that we are internally consistent + self._qiskit_pass_options.update_options( + **{ + "supports_barrier": target.instruction_supported("barrier"), + "supports_delay": target.instruction_supported("delay"), + "remove_reset_in_zero_state": True, + } + ) + + self._target = target + else: + self._target = self._build_target() + + self._transpile_options = {} + + basis_gates = None + if qasm_export_options is not None and "basis_gates" in qasm_export_options: + basis_gates = qasm_export_options.pop("basis_gates") + else: + # here we get the gates that are in the target but not in qasm's + # stdgates so that we can build the basis gates list for the exporter. + # A user can override this list by passing in a basis_gates list + # We also remove any non-gate instructions from the list. + target_gates = set(self.target.operation_names) + target_gates -= set(_QISKIT_NON_GATE_INSTRUCTIONS) + target_gates -= set(_QISKIT_STDGATES) + basis_gates = list(target_gates) + + # set the default options for the exporter + self._qasm_export_options = { + "includes": ("stdgates.inc",), + "alias_classical_registers": False, + "allow_aliasing": False, + "disable_constants": True, + "basis_gates": basis_gates, + } + + if qiskit_pass_options is not None: + self._qiskit_pass_options.update_options(**qiskit_pass_options) + if transpile_options is not None: + self._transpile_options.update(**transpile_options) + if qasm_export_options is not None: + self._qasm_export_options.update(**qasm_export_options) + + def _build_target(self) -> Target: + supports_barrier = self._qiskit_pass_options["supports_barrier"] + supports_delay = self._qiskit_pass_options["supports_delay"] + + # explicitly set ``num_qubits`` to ``None`` to indicate a :class:`Target` representing a + # simulator or other abstract machine that imposes no limits on the number of qubits. + return QirTarget.build_target( + num_qubits=None, + target_profile=self._options["target_profile"], + supports_barrier=supports_barrier, + supports_delay=supports_delay, + ) + + @property + def target(self) -> Target: + """Returns the target of the Backend object.""" + return self._target + + @property + def max_circuits(self): + """ + Returns the maximum number of circuits that can be executed simultaneously. + """ + return None + + @abstractmethod + def _execute(self, programs: List[Compilation], **input_params) -> Dict[str, Any]: + """Execute circuits on the backend. + + :param programs: Simulator input circuits. + :type programs: List[Compilation] + :param **input_params: Configuration for simulation/compilation. + :return: A dictionary of results. + :rtype: dict + """ + + @abstractmethod + def run( + self, + run_input: Union[QuantumCircuit, List[QuantumCircuit]], + **options, + ) -> QsJob: + pass + + def _run( + self, + run_input: List[QuantumCircuit], + **options, + ) -> QsJob: + if "name" not in options and len(run_input) == 1: + options["name"] = run_input[0].name + + # Get out default options + # Look at all of the kwargs and see if they match any of the options + # If they do, set the option to the value of the kwarg as an override + # We only to remove the options that are in the backend options for + # the run so that other options can be passed to other calls. + input_params: Dict[str, Any] = vars(self.options).copy() + input_params.update(options) + + return self._submit_job(run_input, **input_params) + + def run_job( + self, run_input: List[QuantumCircuit], job_id: str, **options + ) -> Result: + start = monotonic() + + compilations = self._compile(run_input, **options) + + output = self._execute(compilations, **options) + + if not isinstance(output, dict): + logger.error("%s: run failed.", self.name) + if output: + logger.error("Output: %s", output) + from ... import QSharpError + + raise QSharpError(str(Errors.RUN_TERMINATED_WITHOUT_OUTPUT)) + + output["job_id"] = job_id + output["date"] = str(datetime.datetime.now().isoformat()) + output["status"] = "COMPLETED" + output["backend_name"] = self.name + output["backend_version"] = self.backend_version + + duration = monotonic() - start + output["time_taken"] = str(duration) + output["config"] = { + "qasm_export_options": str(self._build_qasm_export_options(**options)), + "qiskit_pass_options": str(self._build_qiskit_pass_options(**options)), + "transpile_options": str(self._build_transpile_options(**options)), + } + output["header"] = {} + return self._create_results(output) + + def _validate_quantum_circuits( + self, run_input: Union[QuantumCircuit, List[QuantumCircuit]] + ) -> List[QuantumCircuit]: + """Normalize and validate run_input to a list of QuantumCircuits. + + Wraps a bare ``QuantumCircuit`` in a list and raises ``ValueError`` + if any element is not a ``QuantumCircuit``. + """ + if not isinstance(run_input, list): + run_input = [run_input] + for circuit in run_input: + if not isinstance(circuit, QuantumCircuit): + raise ValueError(str(Errors.INPUT_MUST_BE_QC)) + return run_input + + def _submit_job(self, run_input: List[QuantumCircuit], **options) -> QsJob: + """Default implementation for simulation backends. + + Submits a ``QsSimJob`` for a single circuit or a ``QsJobSet`` for + multiple circuits. Override for backends with different job types + (e.g. ``ResourceEstimatorBackend`` uses ``ReJob``). + """ + from uuid import uuid4 + + job_id = str(uuid4()) + executor = options.pop("executor", DetaultExecutor()) + if len(run_input) == 1: + job = QsSimJob(self, job_id, self.run_job, run_input, options, executor) + else: + job = QsJobSet(self, job_id, self.run_job, run_input, options, executor) + job.submit() + return job + + def _compile(self, run_input: List[QuantumCircuit], **options) -> List[Compilation]: + # for each run input, convert to qasm + compilations = [] + for circuit in run_input: + args = options.copy() + assert isinstance( + circuit, QuantumCircuit + ), "Input must be a QuantumCircuit." + start = monotonic() + qasm = self._qasm(circuit, **args) + end = monotonic() + + time_taken = end - start + compilation = Compilation(circuit, qasm, time_taken) + compilations.append(compilation) + return compilations + + def _create_results(self, output: Dict[str, Any]) -> Any: + """Default implementation: build a Qiskit ``Result`` from the output dict. + + Override for backends that return a different result type + (e.g. ``ResourceEstimatorBackend`` returns ``EstimatorResult``). + """ + return Result.from_dict(output) + + def _map_result_bit(self, v) -> str: + """Map a single QIR result value to a bit character. + + Override in subclasses to customize the mapping — for example, + to emit a loss marker instead of the default string fallback for + unknown values. + """ + from ... import Result as QSharpResult + + if v == QSharpResult.One: + return "1" + if v == QSharpResult.Zero: + return "0" + return str(v) + + def _shot_to_bitstring(self, value) -> str: + """Recursively convert a QIR shot result to a Qiskit-style bitstring. + + - ``tuple`` → space-joined register parts (multiple classical registers) + - ``list`` → concatenated bits via `_map_result_bit` + - anything else → ``str(value)`` + """ + if isinstance(value, tuple): + return " ".join(self._shot_to_bitstring(p) for p in value) + elif isinstance(value, list): + return "".join(self._map_result_bit(v) for v in value) + else: + return str(value) + + def _transpile(self, circuit: QuantumCircuit, **options) -> QuantumCircuit: + if options.get("skip_transpilation", self._skip_transpilation): + return circuit + + circuit = self.run_qiskit_passes(circuit, options) + + transpile_options = self._build_transpile_options(**options) + backend = transpile_options.pop("backend", self) + target = transpile_options.pop("target", self.target) + if get_version_info().startswith("1.2"): + # The older Qiskit version does not support the `qubits_initially_zero` option + transpiled_circuit = transpile( + circuit, + backend=backend, + target=target, + **transpile_options, + ) + else: + transpiled_circuit = transpile( + circuit, + backend=backend, + target=target, + qubits_initially_zero=True, + **transpile_options, + ) + return transpiled_circuit + + def run_qiskit_passes(self, circuit, options): + pass_options = self._build_qiskit_pass_options(**options) + + pass_manager = PassManager() + if not pass_options["supports_barrier"]: + pass_manager.append(RemoveBarriers()) + if not pass_options["supports_delay"]: + pass_manager.append(RemoveDelays()) + if pass_options["remove_reset_in_zero_state"]: + # when doing state initialization, qiskit will reset all qubits to 0 + # As our semantics are different, we can remove these resets + # as it will double the number of qubits if we have to reset them + # before using them when using the base profile. + pass_manager.append(RemoveResetInZeroState()) + + circuit = pass_manager.run(circuit) + return circuit + + def _build_qiskit_pass_options(self, **kwargs) -> Dict[str, Any]: + params: Dict[str, Any] = vars(self._qiskit_pass_options).copy() + for opt in params.copy(): + if opt in kwargs: + params[opt] = kwargs.pop(opt) + if "supports_barrier" not in params: + params["supports_barrier"] = False + if "supports_delay" not in params: + params["supports_delay"] = False + if "remove_reset_in_zero_state" not in params: + params["remove_reset_in_zero_state"] = True + + return params + + def _build_transpile_options(self, **kwargs) -> Dict[str, Any]: + # create the default options from the backend + args = self._transpile_options.copy() + # gather any remaining options that are not in the default list + transpile_args = get_transpile_options(**kwargs) + args.update(transpile_args) + return args + + def _build_qasm_export_options(self, **kwargs) -> Dict[str, Any]: + # Disable aliasing until we decide want to support it + # The exporter defaults to only having the U gate. + # When it sees the stdgates.inc in the default includes list, it adds + # bodyless symbols for that fixed gate set. + # We set the basis gates for any gates that we want that wouldn't + # be defined when stdgates.inc is included. + + # any gates that are not in the stdgates.inc file need to be defined + # in the basis gates list passed to the exporter. The exporter doesn't + # know about the gates defined in the backend's target. + # Anything in the basis_gates gets added to the qasm builder's global + # namespace as an opaque gate. All parameter information comes from the + # gate object itself in the circuit. + + # create the default options from the backend + args = self._qasm_export_options.copy() + # gather any remaining options that are not in the default list + exporter_args = get_exporter_options(**kwargs) + args.update(exporter_args) + return args + + def transpile(self, circuit: QuantumCircuit, **options) -> QuantumCircuit: + transpiled_circuit = self._transpile(circuit, **options) + return transpiled_circuit + + def _qasm(self, circuit: QuantumCircuit, **options) -> str: + """Converts a Qiskit QuantumCircuit to QASM 3 for the current backend. + + :param circuit: The QuantumCircuit to be executed. + :param **options: Additional options for the transpiler, exporter, or Qiskit passes. + Common values include: ``optimization_level``, ``basis_gates``, ``includes``, + ``search_path``. Defaults to backend config values. + :return: The converted QASM code as a string. Any supplied includes + are emitted as ``include`` statements at the top of the program. + :rtype: str + :raises QasmError: If there is an error generating or parsing QASM. + """ + transpiled_circuit = self.transpile(circuit, **options) + try: + export_options = self._build_qasm_export_options(**options) + exporter = Exporter(**export_options) + qasm3_source = exporter.dumps(transpiled_circuit) + # Qiskit QASM exporter doesn't handle experimental features correctly and always emits + # OPENQASM 3.0; even though switch case is not supported in QASM 3.0, so we bump + # the version to 3.1 for now. + qasm3_source = qasm3_source.replace("OPENQASM 3.0", "OPENQASM 3.1") + return qasm3_source + except Exception as ex: + from .. import QasmError + + raise QasmError(str(Errors.FAILED_TO_EXPORT_QASM)) from ex + + def _qsharp(self, circuit: QuantumCircuit, **kwargs) -> str: + """ + Converts a Qiskit QuantumCircuit to Q# for the current backend. + + The generated Q# code will not be idiomatic Q# code, but will be + a direct translation of the Qiskit circuit. + + :param circuit: The QuantumCircuit to be executed. + :param **kwargs: Additional options for the transpiler, exporter, or Qiskit passes. + Common values include: ``optimization_level``, ``basis_gates``, ``includes``, + ``search_path``, ``output_semantics``. Defaults to backend config values. + :return: The converted Q# code as a string. + :rtype: str + :raises QSharpError: If there is an error evaluating the source code. + :raises QasmError: If there is an error generating, parsing, or compiling QASM. + """ + + qasm_source = self._qasm(circuit, **kwargs) + + args = { + "name": kwargs.get("name", circuit.name), + } + + if search_path := kwargs.pop("search_path", "."): + args["search_path"] = search_path + + if output_semantics := kwargs.pop( + "output_semantics", self.options.get("output_semantics", default=None) + ): + args["output_semantics"] = output_semantics + + qsharp_source = self._qasm_to_qsharp(qasm_source, **args) + return qsharp_source + + def qir( + self, + circuit: QuantumCircuit, + **kwargs, + ) -> str: + """ + Converts a Qiskit QuantumCircuit to QIR (Quantum Intermediate Representation). + + :param circuit: The input Qiskit QuantumCircuit object. + :param **kwargs: Common options: + + - ``target_profile`` (TargetProfile): The target profile for the backend. Defaults to backend config value. + - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. Defaults to backend config value. + - ``search_path`` (str): The search path for the backend. Defaults to ``'.'``. + :return: The converted QIR code as a string. + :rtype: str + :raises QSharpError: If there is an error evaluating the source code. + :raises QasmError: If there is an error generating, parsing, or compiling QASM. + :raises ValueError: If the backend configuration does not support QIR generation. + """ + name = kwargs.pop("name", circuit.name) + target_profile = kwargs.pop("target_profile", self.options.target_profile) + if target_profile == TargetProfile.Unrestricted: + raise ValueError(str(Errors.UNRESTRICTED_INVALID_QIR_TARGET)) + + qasm_source = self._qasm(circuit, **kwargs) + + args = { + "name": name, + "target_profile": target_profile, + } + + if search_path := kwargs.pop("search_path", "."): + args["search_path"] = search_path + + if params := kwargs.pop("params", None): + args["params"] = params + + if output_semantics := kwargs.pop( + "output_semantics", self.options.get("output_semantics", default=None) + ): + args["output_semantics"] = output_semantics + + return self._qasm_to_qir(qasm_source, **args) + + def _qasm_to_qir( + self, + source: str, + **kwargs, + ) -> str: + from ..._native import compile_qasm_program_to_qir + from ..._fs import read_file, list_directory, resolve + from ..._http import fetch_github + + return compile_qasm_program_to_qir( + source, + read_file, + list_directory, + resolve, + fetch_github, + **kwargs, + ) + + def _qasm_to_qsharp( + self, + source: str, + **kwargs, + ) -> str: + from ..._native import compile_qasm_to_qsharp + from ..._fs import read_file, list_directory, resolve + from ..._http import fetch_github + + return compile_qasm_to_qsharp( + source, + read_file, + list_directory, + resolve, + fetch_github, + **kwargs, + ) diff --git a/source/qdk_package/qdk/qiskit/backends/compilation.py b/source/qdk_package/qdk/qiskit/backends/compilation.py new file mode 100644 index 0000000000..de0eba284f --- /dev/null +++ b/source/qdk_package/qdk/qiskit/backends/compilation.py @@ -0,0 +1,36 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from qiskit import QuantumCircuit + + +class Compilation(dict): + def __init__(self, circuit: QuantumCircuit, qasm: str, time_taken: str): + super().__init__() + self["circuit"] = circuit + self["qasm"] = qasm + self["compilation_time_taken"] = time_taken + + @property + def circuit(self) -> QuantumCircuit: + return self["circuit"] + + @circuit.setter + def circuit(self, value: QuantumCircuit): + self["circuit"] = value + + @property + def qasm(self) -> str: + return self["qasm"] + + @qasm.setter + def qasm(self, value: str): + self["qasm"] = value + + @property + def time_taken(self) -> str: + return self["compilation_time_taken"] + + @time_taken.setter + def time_taken(self, value: str): + self["compilation_time_taken"] = value diff --git a/source/qdk_package/qdk/qiskit/backends/errors.py b/source/qdk_package/qdk/qiskit/backends/errors.py new file mode 100644 index 0000000000..6468eddcd9 --- /dev/null +++ b/source/qdk_package/qdk/qiskit/backends/errors.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from enum import Enum + + +class Errors(Enum): + UNRESTRICTED_INVALID_QIR_TARGET = 1 + RUN_TERMINATED_WITHOUT_OUTPUT = 2 + FAILED_TO_EXPORT_QASM = 3 + MISSING_NUMBER_OF_SHOTS = 4 + INPUT_MUST_BE_QC = 5 + ONLY_ONE_CIRCUIT_ALLOWED = 6 + + def __str__(self): + if self == Errors.UNRESTRICTED_INVALID_QIR_TARGET: + return "The Unrestricted profile is not valid when generating QIR." + elif self == Errors.RUN_TERMINATED_WITHOUT_OUTPUT: + return "Run terminated without valid output." + elif self == Errors.FAILED_TO_EXPORT_QASM: + return "Failed to export QASM source." + elif self == Errors.MISSING_NUMBER_OF_SHOTS: + return "The number of shots must be specified." + elif self == Errors.INPUT_MUST_BE_QC: + return "Input must be a QuantumCircuit." + elif self == Errors.ONLY_ONE_CIRCUIT_ALLOWED: + return "Only one QuantumCircuit can be estimated at a time." + else: + return "Unknown option." diff --git a/source/qdk_package/qdk/qiskit/backends/neutral_atom_backend.py b/source/qdk_package/qdk/qiskit/backends/neutral_atom_backend.py new file mode 100644 index 0000000000..995615a4c7 --- /dev/null +++ b/source/qdk_package/qdk/qiskit/backends/neutral_atom_backend.py @@ -0,0 +1,288 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import logging +from typing import Any, Dict, List, Literal, Optional, Union +from uuid import uuid4 + +from qiskit import QuantumCircuit +from qiskit.providers import Options +from qiskit.transpiler.target import Target + +from ... import Result, TargetProfile +from .. import OutputSemantics +from ..execution import DetaultExecutor +from ..jobs import QsSimJob, QsJobSet +from .backend_base import BackendBase +from .compilation import Compilation +from .errors import Errors +from .neutral_atom_target import NeutralAtomTarget + +logger = logging.getLogger(__name__) + + +def _bitstring_has_qubit_loss(bitstring: str) -> bool: + """Return True if the bitstring contains a qubit-loss marker. + + Lost qubits may be represented using non-binary markers (e.g. '-', '2'). + We treat any shot containing those markers as lost-qubit affected. + """ + return "-" in bitstring or "2" in bitstring + + +class NeutralAtomBackend(BackendBase): + """A Qiskit backend that simulates circuits using the NeutralAtomDevice pipeline. + + Circuits are transpiled to OpenQASM 3 using the device's native gate set + (Rz, SX, CZ), compiled to QIR via the Q# compiler, then run through the + NeutralAtomDevice compilation and simulation pipeline. + The device handles single-qubit gate optimization and qubit movement scheduling. + An optional noise model can be applied to model realistic device behavior. + + The native gate set target ensures Qiskit's transpiler decomposes all non-native + gates before simulation, so noise configured on native gates (``noise.rz``, + ``noise.sx``, ``noise.cz``, ``noise.mresetz``) behaves as expected. + + The simulator backend (Clifford, CPU full-state, or GPU full-state) is + selected automatically unless overridden via the ``simulator_type`` option. + + Example:: + + from qiskit import QuantumCircuit + from qsharp.interop.qiskit import NeutralAtomBackend + from qsharp._simulation import NoiseConfig + + qc = QuantumCircuit(2) + qc.h(0) + qc.cx(0, 1) + qc.measure_all() + + # Noiseless simulation + backend = NeutralAtomBackend() + job = backend.run(qc, shots=1000) + print(job.result().get_counts()) + + # Noisy simulation + noise = NoiseConfig() + noise.cz.set_depolarizing(1e-3) + noise.mresetz.set_bitflip(1e-3) + + job = backend.run(qc, shots=1000, noise=noise, seed=42) + print(job.result().get_counts()) + """ + + def __init__( + self, + device=None, + target: Optional[Target] = None, + qiskit_pass_options: Optional[Dict[str, Any]] = None, + transpile_options: Optional[Dict[str, Any]] = None, + qasm_export_options: Optional[Dict[str, Any]] = None, + skip_transpilation: bool = False, + **options, + ): + """ + :param device: The NeutralAtomDevice instance to use for compilation and simulation. + A default-configured device is created automatically if not provided. + Pass a custom device to control the qubit layout (column count, zone dimensions, etc.). + :type device: NeutralAtomDevice + :param target: Qiskit transpiler target. Defaults to the NeutralAtomDevice native + gate set ``{rz, sx, cz, measure, reset}``. Override only if you need a custom + decomposition strategy. + :param qiskit_pass_options: Options forwarded to Qiskit pre-transpilation passes. + :type qiskit_pass_options: Dict + :param transpile_options: Options forwarded to ``qiskit.transpile()``. + :type transpile_options: Dict + :param qasm_export_options: Options forwarded to the Qiskit QASM3 exporter. + :type qasm_export_options: Dict + :param skip_transpilation: Skip Qiskit transpilation. Useful when the circuit is + already expressed in terms of the target gate set. + :type skip_transpilation: bool + :param **options: Default option overrides. These can also be overridden per-call via + :meth:`run`. Common options: + + - ``name`` (str): Backend name for job metadata. Defaults to the circuit name. + - ``shots`` (int): Number of shots. Defaults to ``1024``. + - ``seed`` (int): Random seed for reproducibility. Defaults to ``None``. + - ``noise`` (NoiseConfig): Optional per-gate noise model. Defaults to ``None`` (noiseless). + - ``simulator_type`` (str): Simulator to use — ``"clifford"`` (Clifford only), + ``"cpu"`` (CPU full-state), ``"gpu"`` (GPU full-state), or ``None`` to + auto-select (GPU if available, CPU otherwise). + - ``output_semantics`` (OutputSemantics): QIR output encoding. Defaults to ``OutputSemantics.Qiskit``. + - ``executor``: Executor for async job submission. + """ + self._device = device + super().__init__( + target, + qiskit_pass_options, + transpile_options, + qasm_export_options, + skip_transpilation, + **options, + ) + + def _get_device(self): + """Return the NeutralAtomDevice, creating a default one on first access.""" + if self._device is None: + from ..._device._atom import NeutralAtomDevice + + self._device = NeutralAtomDevice() + return self._device + + def _build_target(self) -> Target: + """Return a target restricted to the NeutralAtomDevice native gate set. + + Limiting the target to ``{rz, sx, cz, measure, reset}`` ensures Qiskit's + transpiler decomposes all non-native gates before QASM3 export, so the + circuit that reaches the simulator already uses only native gates. + """ + return NeutralAtomTarget.build_target(num_qubits=None) + + @classmethod + def _default_options(cls): + return Options( + search_path=".", + shots=1024, + seed=None, + noise=None, + simulator_type=None, + output_semantics=OutputSemantics.Qiskit, + executor=DetaultExecutor(), + ) + + def run( + self, + run_input: Union[QuantumCircuit, List[QuantumCircuit]], + **options, + ) -> Union[QsSimJob, QsJobSet]: + """Simulate the given circuit(s) using the NeutralAtomDevice pipeline. + + :param run_input: A single ``QuantumCircuit`` or a list of them. + :param **options: Per-call option overrides. Common options: + + - ``name`` (str): Backend name for job metadata. Defaults to the circuit name. + - ``shots`` (int): Number of shots. Defaults to ``1024``. + - ``seed`` (int): Random seed for reproducibility. Defaults to ``None``. + - ``noise`` (NoiseConfig): Optional per-gate noise model. Defaults to ``None`` (noiseless). + - ``simulator_type`` (str): Simulator to use — ``"clifford"`` (Clifford only), + ``"cpu"`` (CPU full-state), ``"gpu"`` (GPU full-state), or ``None`` to + auto-select (GPU if available, CPU otherwise). + - ``output_semantics`` (OutputSemantics): QIR output encoding. Defaults to ``OutputSemantics.Qiskit``. + - ``executor``: Executor for async job submission. + :return: A job object whose ``.result()`` returns a Qiskit ``Result``. + :rtype: QsSimJob + :raises ValueError: If ``run_input`` is not a ``QuantumCircuit`` or list thereof, + or if a ``target_profile`` other than ``TargetProfile.Base`` is provided. + """ + run_input = self._validate_quantum_circuits(run_input) + return self._run(run_input, **options) + + def _map_result_bit(self, v) -> str: + """Override: unknown values are qubit-loss markers (``"-"``).""" + if v == Result.One: + return "1" + if v == Result.Zero: + return "0" + return "-" + + def _execute(self, programs: List[Compilation], **input_params) -> Dict[str, Any]: + device = self._get_device() + + shots = input_params.get("shots") + if shots is None: + raise ValueError(str(Errors.MISSING_NUMBER_OF_SHOTS)) + + noise = input_params.get("noise") + simulator_type: Optional[Literal["clifford", "cpu", "gpu"]] = input_params.get( + "simulator_type" + ) + seed: Optional[int] = input_params.get("seed") + search_path: str = input_params.get("search_path", ".") + output_semantics = input_params.get("output_semantics") + + # NeutralAtomDevice always requires base-profile QIR — the device's + # compilation pipeline validates that no conditional branches exist. + # Raise explicitly if the caller passed a non-Base profile so the + # error is immediate and clear rather than silently ignored. + target_profile = input_params.get("target_profile") + if target_profile is not None and target_profile != TargetProfile.Base: + raise ValueError( + "NeutralAtomBackend only supports TargetProfile.Base. " + "The NeutralAtomDevice compilation pipeline does not support " + f"conditional branches produced by {target_profile}." + ) + + job_results = [] + for program in programs: + name = input_params.get("name", program.circuit.name) + + # Compile QASM3 → QIR (base profile). + qir = self._qasm_to_qir( + program.qasm, + name=name, + target_profile=TargetProfile.Base, + output_semantics=output_semantics, + search_path=search_path, + ) + + # Run through NeutralAtomDevice compilation + simulation pipeline. + sim_results = device.simulate( + qir, + shots=shots, + noise=noise, + type=simulator_type, + seed=seed, + ) + + raw_memory = [self._shot_to_bitstring(shot) for shot in sim_results] + + # Separate accepted shots (no loss markers) from raw shots. + # Qiskit-compatible fields (counts, memory, probabilities) + # contain only clean {0,1} outcomes; raw_* fields retain the + # full picture including loss. + memory = [s for s in raw_memory if not _bitstring_has_qubit_loss(s)] + accepted_total_count = len(memory) + raw_total_count = len(raw_memory) + + raw_counts: Dict[str, int] = {} + counts: Dict[str, int] = {} + for bs in raw_memory: + raw_counts[bs] = raw_counts.get(bs, 0) + 1 + if not _bitstring_has_qubit_loss(bs): + counts[bs] = counts.get(bs, 0) + 1 + + raw_probabilities = ( + {} + if raw_total_count == 0 + else {bs: c / raw_total_count for bs, c in raw_counts.items()} + ) + probabilities = ( + {} + if accepted_total_count == 0 + else {bs: c / accepted_total_count for bs, c in counts.items()} + ) + + job_results.append( + { + "data": { + # Qiskit-compatible fields: loss shots excluded. + "counts": counts, + "probabilities": probabilities, + "memory": memory, + # Raw fields: all shots, including loss markers. + "raw_counts": raw_counts, + "raw_probabilities": raw_probabilities, + "raw_memory": raw_memory, + }, + "success": True, + "header": { + "metadata": {"qasm": program.qasm}, + "name": program.circuit.name, + "compilation_time_taken": program.time_taken, + }, + # shots reflects accepted (non-loss) count. + "shots": accepted_total_count, + } + ) + + return {"results": job_results, "qobj_id": str(uuid4()), "success": True} diff --git a/source/qdk_package/qdk/qiskit/backends/neutral_atom_target.py b/source/qdk_package/qdk/qiskit/backends/neutral_atom_target.py new file mode 100644 index 0000000000..3502e95562 --- /dev/null +++ b/source/qdk_package/qdk/qiskit/backends/neutral_atom_target.py @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import logging + +from qiskit.circuit import Measure, Parameter, Reset +from qiskit.circuit.library.standard_gates import CZGate, RZGate, SXGate +from qiskit.transpiler.target import Target + +logger = logging.getLogger(__name__) + + +class NeutralAtomTarget: + """Factory for a Qiskit ``Target`` restricted to the NeutralAtomDevice native gate set. + + The native gate set is ``{rz, sx, cz, measure}`` — the only gates that survive + ``NeutralAtomDevice.compile()``'s decomposition pipeline. Using this target ensures + that Qiskit's transpiler decomposes all non-native gates (H, CX, X, etc.) into + native gates *before* QASM3 export, so the noise model fields that matter + (``noise.rz``, ``noise.sx``, ``noise.cz``, ``noise.mresetz``) align with the + gates actually present during simulation. + """ + + @classmethod + def build_target( + cls, + num_qubits: int | None = None, + ) -> Target: + """Return a Qiskit ``Target`` with only the NeutralAtomDevice native gates. + + :param num_qubits: Number of qubits. ``None`` means no limit (simulator). + :return: A ``Target`` containing ``{rz, sx, cz, measure, reset}``. + """ + target = Target(num_qubits=num_qubits) + + target.add_instruction(RZGate(Parameter("theta")), name="rz") + target.add_instruction(SXGate, name="sx") + target.add_instruction(CZGate, name="cz") + target.add_instruction(Measure, name="measure") + # Reset is used internally by NeutralAtomDevice (MResetZ), so include it + # so the transpiler can express mid-circuit resets. + target.add_instruction(Reset, name="reset") + + return target diff --git a/source/qdk_package/qdk/qiskit/backends/qirtarget.py b/source/qdk_package/qdk/qiskit/backends/qirtarget.py new file mode 100644 index 0000000000..340ed7152f --- /dev/null +++ b/source/qdk_package/qdk/qiskit/backends/qirtarget.py @@ -0,0 +1,191 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import logging +from typing import Union + +from qiskit.circuit import ( + Barrier, + Delay, + Measure, + Parameter, + Reset, + Store, +) +from qiskit.circuit.controlflow import ( + ControlFlowOp, + ForLoopOp, + IfElseOp, + SwitchCaseOp, + WhileLoopOp, +) +from qiskit.circuit.library.standard_gates import ( + CHGate, + CCXGate, + CXGate, + CYGate, + CZGate, + CRXGate, + CRYGate, + CRZGate, + RXGate, + RXXGate, + RYGate, + RYYGate, + RZGate, + RZZGate, + HGate, + SGate, + SdgGate, + SXGate, + SwapGate, + TGate, + TdgGate, + XGate, + YGate, + ZGate, + IGate, +) + +from qiskit.transpiler.target import Target +from ... import TargetProfile + +logger = logging.getLogger(__name__) + + +class QirTarget: + """Factory for QIR-compatible Qiskit ``Target`` instances.""" + + def __init__( + self, + num_qubits=None, + target_profile=TargetProfile.Base, + supports_barrier=False, + supports_delay=False, + ) -> None: + logger.warning( + "QirTarget should not be instantiated directly. Use the 'build_target' class method" + + " instead. This will be enforced in a future release. You can replace" + + " 'QirTarget(...)' with 'QirTarget.build_target(...)'." + ) + self._target = self.build_target( + num_qubits=num_qubits, + target_profile=target_profile, + supports_barrier=supports_barrier, + supports_delay=supports_delay, + ) + + def __getattr__(self, item): + """ + Delegate attribute access to the underlying _target object. + + This method is called when an attribute is not found in the current instance. + It forwards the attribute lookup to the internal _target object, effectively + making this class act as a proxy or wrapper around the target. + + :param item: The name of the attribute being accessed. + :return: The value of the requested attribute from the ``_target`` object. + :raises AttributeError: If the requested item is ``"_target"`` or if the attribute + does not exist on the ``_target`` object. + """ + if item == "_target": + raise AttributeError(item) + return getattr(self._target, item) + + def to_target(self) -> Target: + """Return the underlying Qiskit Target instance.""" + return self._target + + @classmethod + def build_target( + cls, + num_qubits: Union[int, None] = None, + target_profile=TargetProfile.Base, + supports_barrier=False, + supports_delay=False, + ) -> Target: + """ + Create a Qiskit Target object with quantum gates and operations for QIR compilation. + + This class method creates a Target instance that defines the available quantum + operations and gates that can be used when compiling Q#/OpenQASM code to QIR (Quantum + Intermediate Representation) format. + + :param num_qubits: The number of qubits for the target. + If ``None``, the target will support any number of qubits. Defaults to ``None``. + :param target_profile: The target profile that determines which control flow operations + are supported. If not ``TargetProfile.Base``, adds control flow operations like + ``if_else``, ``switch_case``, and ``while_loop``. Defaults to ``TargetProfile.Base``. + :param supports_barrier: Whether to include barrier operations in the target. + Defaults to ``False``. + :param supports_delay: Whether to include delay operations in the target. + Defaults to ``False``. + :return: A Qiskit ``Target`` object configured with quantum gates and operations. + """ + + target = Target(num_qubits=num_qubits) + + if target_profile != TargetProfile.Base: + target.add_instruction(ControlFlowOp, name="control_flow") + target.add_instruction(IfElseOp, name="if_else") + target.add_instruction(SwitchCaseOp, name="switch_case") + target.add_instruction(WhileLoopOp, name="while_loop") + + # We don't currently support break or continue statements in Q#, + # so we don't include them yet. + # target.add_instruction(BreakLoopOp, name="break") + # target.add_instruction(ContinueLoopOp, name="continue") + + target.add_instruction(Store, name="store") + + if supports_barrier: + target.add_instruction(Barrier, name="barrier") + if supports_delay: + target.add_instruction(Delay, name="delay") + + # For loops should be fully deterministic in Qiskit/QASM. + target.add_instruction(ForLoopOp, name="for_loop") + target.add_instruction(Measure, name="measure") + + # While reset is technically not supported in base profile, the + # compiler can use decompositions to implement workarounds. + target.add_instruction(Reset, name="reset") + + target.add_instruction(CCXGate, name="ccx") + target.add_instruction(CXGate, name="cx") + target.add_instruction(CYGate, name="cy") + target.add_instruction(CZGate, name="cz") + + target.add_instruction(RXGate(Parameter("theta")), name="rx") + target.add_instruction(RXXGate(Parameter("theta")), name="rxx") + target.add_instruction(CRXGate(Parameter("theta")), name="crx") + + target.add_instruction(RYGate(Parameter("theta")), name="ry") + target.add_instruction(RYYGate(Parameter("theta")), name="ryy") + target.add_instruction(CRYGate(Parameter("theta")), name="cry") + + target.add_instruction(RZGate(Parameter("theta")), name="rz") + target.add_instruction(RZZGate(Parameter("theta")), name="rzz") + target.add_instruction(CRZGate(Parameter("theta")), name="crz") + + target.add_instruction(HGate, name="h") + + target.add_instruction(SGate, name="s") + target.add_instruction(SdgGate, name="sdg") + + target.add_instruction(SXGate, name="sx") + + target.add_instruction(SwapGate, name="swap") + + target.add_instruction(TGate, name="t") + target.add_instruction(TdgGate, name="tdg") + + target.add_instruction(XGate, name="x") + target.add_instruction(YGate, name="y") + target.add_instruction(ZGate, name="z") + + target.add_instruction(IGate, name="id") + + target.add_instruction(CHGate, name="ch") + + return target diff --git a/source/qdk_package/qdk/qiskit/backends/qsharp_backend.py b/source/qdk_package/qdk/qiskit/backends/qsharp_backend.py new file mode 100644 index 0000000000..e7ebf589c7 --- /dev/null +++ b/source/qdk_package/qdk/qiskit/backends/qsharp_backend.py @@ -0,0 +1,233 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from collections import Counter +import logging +from typing import Any, Dict, List, Optional, Tuple, Union +from uuid import uuid4 + +from qiskit import QuantumCircuit +from qiskit.providers import Options +from qiskit.transpiler.target import Target +from ... import TargetProfile +from .. import OutputSemantics +from ..execution import DetaultExecutor +from ..jobs import QsSimJob +from .backend_base import BackendBase +from .compilation import Compilation +from .errors import Errors + +logger = logging.getLogger(__name__) + + +class QSharpBackend(BackendBase): + """ + A virtual backend for running Qiskit circuits using the Q# simulator. + """ + + # This init is included for the docstring + # pylint: disable=useless-parent-delegation + def __init__( + self, + target: Optional[Target] = None, + qiskit_pass_options: Optional[Dict[str, Any]] = None, + transpile_options: Optional[Dict[str, Any]] = None, + qasm_export_options: Optional[Dict[str, Any]] = None, + skip_transpilation: bool = False, + **options, + ): + """ + :param target: The target to use for the backend. + :param qiskit_pass_options: Options for the Qiskit passes. + :type qiskit_pass_options: Dict + :param transpile_options: Options for the transpiler. + :type transpile_options: Dict + :param qasm_export_options: Options for the QASM3 exporter. + :type qasm_export_options: Dict + :param skip_transpilation: Skip Qiskit transpilation. + :type skip_transpilation: bool + :param **options: Default option overrides. These can also be overridden per-call via + :meth:`run`. Common options: + + - ``name`` (str): The name of the circuit used as the entry point. Defaults to the circuit name. + - ``target_profile`` (TargetProfile): The target profile to use for the compilation. + - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. + Defaults to ``OutputSemantics.Qiskit``. + - ``shots`` (int): The number of shots to run the program for. Defaults to ``1024``. + - ``seed`` (int): The seed to use for the random number generator. Defaults to ``None``. + - ``search_path`` (str): The path to search for imports. Defaults to ``'.'``. + - ``output_fn`` (Callable): A callback function to receive the output of the circuit. + Defaults to ``None``. + - ``executor``: The executor to be used to submit the job. Defaults to ``SynchronousExecutor``. + """ + + super().__init__( + target, + qiskit_pass_options, + transpile_options, + qasm_export_options, + skip_transpilation, + **options, + ) + + @classmethod + def _default_options(cls): + return Options( + name="program", + params=None, + search_path=".", + shots=1024, + seed=None, + output_fn=None, + target_profile=TargetProfile.Unrestricted, + output_semantics=OutputSemantics.Qiskit, + executor=DetaultExecutor(), + ) + + def run( + self, + run_input: Union[QuantumCircuit, List[QuantumCircuit]], + **options, + ) -> QsSimJob: + """ + Runs the given QuantumCircuit using the Q# simulator. + + :param run_input: The QuantumCircuit to be executed. + :param **options: Per-call option overrides. Common options: + + - ``name`` (str): The name of the circuit used as the entry point. Defaults to the circuit name. + - ``target_profile`` (TargetProfile): The target profile to use for the compilation. + - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. + Defaults to ``OutputSemantics.Qiskit``. + - ``shots`` (int): The number of shots to run the program for. Defaults to ``1024``. + - ``seed`` (int): The seed to use for the random number generator. Defaults to ``None``. + - ``search_path`` (str): The path to search for imports. Defaults to ``'.'``. + - ``output_fn`` (Callable): A callback function to receive the output of the circuit. + Defaults to ``None``. + - ``executor``: The executor to be used to submit the job. Defaults to ``SynchronousExecutor``. + :return: The simulation job. + :rtype: QsSimJob + :raises QSharpError: If there is an error evaluating the source code. + :raises QasmError: If there is an error generating, parsing, or compiling QASM. + :raises ValueError: If run_input is not a QuantumCircuit or List[QuantumCircuit]. + """ + + run_input = self._validate_quantum_circuits(run_input) + return self._run(run_input, **options) + + def _execute(self, programs: List[Compilation], **input_params) -> Dict[str, Any]: + exec_results: List[Tuple[Compilation, Dict[str, Any]]] = [ + ( + program, + _run_qasm(program.qasm, vars(self.options).copy(), **input_params), + ) + for program in programs + ] + job_results = [] + + shots = input_params.get("shots") + if shots is None: + raise ValueError(str(Errors.MISSING_NUMBER_OF_SHOTS)) + + for program, exec_result in exec_results: + results = [self._shot_to_bitstring(result) for result in exec_result] + + counts = Counter(results) + counts_dict = dict(counts) + probabilities = { + bitstring: (count / shots) for bitstring, count in counts_dict.items() + } + + job_result = { + "data": {"counts": counts_dict, "probabilities": probabilities}, + "success": True, + "header": { + "metadata": {"qasm": program.qasm}, + "name": program.circuit.name, + "compilation_time_taken": program.time_taken, + }, + "shots": shots, + } + job_results.append(job_result) + + # All of these fields are required by the Result object + result_dict = { + "results": job_results, + "qobj_id": str(uuid4()), + "success": True, + } + + return result_dict + + +def _run_qasm( + qasm: str, + default_options: Options, + **options, +) -> Any: + """ + Runs the supplied OpenQASM 3 program. + Gates defined by stdgates.inc will be overridden with definitions + from the Q# compiler. + + Any gates, such as matrix unitaries, that are not able to be + transpiled will result in an error. + + :param source: The input OpenQASM 3 string to be processed. + :param default_options: Default backend option values. + :param **options: Common options: + + - ``target_profile`` (TargetProfile): The target profile to use for the compilation. + - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. + - ``name`` (str): The name of the circuit. Defaults to ``'program'``. + - ``search_path`` (str): The optional search path for resolving qasm imports. + - ``shots`` (int): The number of shots to run the program for. + - ``seed`` (int): The seed to use for the random number generator. + - ``output_fn`` (Callable): A callback for each output. Defaults to ``None``. + :return: A list of results or runtime errors. + :raises QSharpError: If there is an error evaluating the source code. + :raises QasmError: If there is an error generating, parsing, or compiling QASM. + """ + + from ..._native import run_qasm_program, Output # type: ignore + from ..._fs import read_file, list_directory, resolve + from ..._http import fetch_github + + def callback(output: Output) -> None: + print(output) + + output_fn = options.pop("output_fn", callback) + + def value_or_default(key: str) -> Any: + return options.pop(key, default_options[key]) + + # when passing the args into the rust layer, any kwargs with None values + # will cause an error, so we need to filter them out. + args = {} + if name := value_or_default("name"): + args["name"] = name + + if target_profile := value_or_default("target_profile"): + args["target_profile"] = target_profile + if output_semantics := value_or_default("output_semantics"): + args["output_semantics"] = output_semantics + + if search_path := value_or_default("search_path"): + args["search_path"] = search_path + if shots := value_or_default("shots"): + args["shots"] = shots + if seed := value_or_default("seed"): + args["seed"] = seed + + return run_qasm_program( + qasm, + output_fn, + None, + None, + None, + read_file, + list_directory, + resolve, + fetch_github, + **args, + ) diff --git a/source/qdk_package/qdk/qiskit/backends/re_backend.py b/source/qdk_package/qdk/qiskit/backends/re_backend.py new file mode 100644 index 0000000000..bc09d6544b --- /dev/null +++ b/source/qdk_package/qdk/qiskit/backends/re_backend.py @@ -0,0 +1,194 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from concurrent.futures import Executor +import json +import logging +from typing import Any, Dict, List, Optional, Union +from uuid import uuid4 + +from qiskit import QuantumCircuit +from qiskit.providers import Options +from qiskit.transpiler.target import Target + +from .compilation import Compilation +from .errors import Errors +from .backend_base import BackendBase +from .. import OutputSemantics +from ..jobs import ReJob +from ..execution import DetaultExecutor +from ..._fs import read_file, list_directory, resolve +from ..._http import fetch_github +from ..._native import resource_estimate_qasm_program +from ... import TargetProfile +from ...estimator import ( + EstimatorResult, + EstimatorParams, +) + +logger = logging.getLogger(__name__) + + +class ResourceEstimatorBackend(BackendBase): + """ + A virtual backend for resource estimating Qiskit circuits levaraging + Q# resource estimation capabilities. + """ + + # This init is included for the docstring + # pylint: disable=useless-parent-delegation + def __init__( + self, + target: Optional[Target] = None, + qiskit_pass_options: Optional[Dict[str, Any]] = None, + transpile_options: Optional[Dict[str, Any]] = None, + qasm_export_options: Optional[Dict[str, Any]] = None, + skip_transpilation: bool = False, + **options, + ): + """ + :param target: The target to use for the backend. + :param qiskit_pass_options: Options for the Qiskit passes. + :type qiskit_pass_options: Dict + :param transpile_options: Options for the transpiler. + :type transpile_options: Dict + :param qasm_export_options: Options for the QASM3 exporter. + :type qasm_export_options: Dict + :param skip_transpilation: Skip Qiskit transpilation. + :type skip_transpilation: bool + :param **options: Default option overrides. These can also be overridden per-call via + :meth:`run`. Common options: + + - ``params`` (EstimatorParams): Configuration values for resource estimation. + - ``name`` (str): The name of the circuit used as the entry point. Defaults to the circuit name. + - ``search_path`` (str): Path to search in for qasm imports. Defaults to ``'.'``. + - ``executor``: The executor to be used to submit the job. Defaults to ``SynchronousExecutor``. + """ + + super().__init__( + target, + qiskit_pass_options, + transpile_options, + qasm_export_options, + skip_transpilation, + **options, + ) + + @property + def max_circuits(self): + """ + Returns the maximum number of circuits that can be executed simultaneously. + """ + return 1 + + @classmethod + def _default_options(cls): + return Options( + params=None, + name="program", + search_path=".", + target_profile=TargetProfile.Unrestricted, + output_semantics=OutputSemantics.ResourceEstimation, + executor=DetaultExecutor(), + ) + + def run( + self, + run_input: Union[QuantumCircuit, List[QuantumCircuit]], + params: Optional[EstimatorParams] = None, + **options, + ) -> ReJob: + """ + Performs resource estimation on the supplied QuantumCircuit via conversion + to OpenQASM 3. + + :param run_input: The input Qiskit QuantumCircuit object. + :param params: Configuration values for resource estimation. + :type params: EstimatorParams + :param **options: Per-call option overrides. Common options: + + - ``params`` (EstimatorParams): Configuration values for resource estimation. + - ``name`` (str): The name of the circuit used as the entry point. Defaults to the circuit name. + - ``search_path`` (str): Path to search in for qasm imports. Defaults to ``'.'``. + - ``target_profile`` (TargetProfile): The target profile to use for the backend. + - ``executor``: The executor to be used to submit the job. Defaults to ``SynchronousExecutor``. + :return: The resource estimation job. + :rtype: ReJob + :raises QSharpError: If there is an error evaluating the source code. + :raises QasmError: If there is an error generating, parsing, or compiling QASM. + :raises ValueError: If run_input is not a QuantumCircuit. + """ + if isinstance(run_input, QuantumCircuit): + run_input = [run_input] + if len(run_input) != 1: + raise ValueError(str(Errors.ONLY_ONE_CIRCUIT_ALLOWED)) + + if params is not None: + options["params"] = params + return self._run(run_input, **options) + + def _estimate_qasm( + self, + source: str, + **input_params, + ) -> Dict[str, Any]: + """ + Estimates the resource usage of a QASM source code. + """ + params = input_params.pop("params", None) + if params is None: + params = [{}] + elif isinstance(params, EstimatorParams): + if params.has_items: + params = params.as_dict()["items"] + else: + params = [params.as_dict()] + elif isinstance(params, dict): + params = [params] + param_str = json.dumps(params) + kwargs = { + "name": input_params.pop("name"), + "search_path": input_params.pop("search_path", "."), + } + kwargs.update(input_params) + res_str = resource_estimate_qasm_program( + source, + param_str, + read_file, + list_directory, + resolve, + fetch_github, + **kwargs, + ) + res = json.loads(res_str) + return res + + def _execute(self, programs: List[Compilation], **input_params) -> Dict: + exec_results = [ + (program, self._estimate_qasm(program.qasm, **input_params)) + for program in programs + ] + success = ( + all( + "status" in res and res["status"] == "success" + for (_, res) in exec_results + ) + and len(exec_results) > 0 + ) + result_dict = { + "results": [res for (_, res) in exec_results], + "qobj_id": str(uuid4()), + "success": success, + } + + return result_dict + + def _create_results(self, output: Dict[str, Any]) -> EstimatorResult: + return EstimatorResult(output["results"][0]) + + def _submit_job(self, run_input: List[QuantumCircuit], **options) -> ReJob: + job_id = str(uuid4()) + executor: Executor = options.pop("executor", DetaultExecutor()) + job = ReJob(self, job_id, self.run_job, run_input, options, executor) + job.submit() + return job diff --git a/source/qdk_package/qdk/qiskit/execution/__init__.py b/source/qdk_package/qdk/qiskit/execution/__init__.py new file mode 100644 index 0000000000..3bef3d637f --- /dev/null +++ b/source/qdk_package/qdk/qiskit/execution/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from .default import DetaultExecutor diff --git a/source/qdk_package/qdk/qiskit/execution/default.py b/source/qdk_package/qdk/qiskit/execution/default.py new file mode 100644 index 0000000000..4eece33ecf --- /dev/null +++ b/source/qdk_package/qdk/qiskit/execution/default.py @@ -0,0 +1,10 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +from concurrent.futures import ThreadPoolExecutor + + +class DetaultExecutor(ThreadPoolExecutor): + def __init__(self) -> None: + super().__init__(max_workers=1) diff --git a/source/qdk_package/qdk/qiskit/jobs/__init__.py b/source/qdk_package/qdk/qiskit/jobs/__init__.py new file mode 100644 index 0000000000..a70db02097 --- /dev/null +++ b/source/qdk_package/qdk/qiskit/jobs/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from .qsjob import QsJob, QsSimJob, ReJob +from .qsjobset import QsJobSet diff --git a/source/qdk_package/qdk/qiskit/jobs/qsjob.py b/source/qdk_package/qdk/qiskit/jobs/qsjob.py new file mode 100644 index 0000000000..a5c1743b24 --- /dev/null +++ b/source/qdk_package/qdk/qiskit/jobs/qsjob.py @@ -0,0 +1,194 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from abc import ABC, abstractmethod +from concurrent.futures import Executor, Future +import logging +from time import monotonic +from typing import Callable, Dict, Optional, Any + +from qiskit.providers import BackendV2 +from qiskit.circuit import ( + QuantumCircuit, +) + +from qiskit.result import Result +from qiskit.providers import JobV1, JobStatus, JobError + +from ..execution import DetaultExecutor +from ... import telemetry_events +from ...estimator import EstimatorResult + +logger = logging.getLogger(__name__) + +RunInputCallable = Callable[[QuantumCircuit, str, Dict[str, Any]], Result] + + +class QsJob(JobV1, ABC): + """ + Abstract base class for Q# Qiskit jobs. + + Manages asynchronous execution of a quantum circuit via a callable submitted + to a thread pool. Concrete subclasses must implement :meth:`result` and + :meth:`_submit_duration`. + """ + + def __init__( + self, + backend: Optional[BackendV2], + job_id: str, + job_callable: RunInputCallable, + run_input: QuantumCircuit, + input_params: Dict[str, Any], + executor=None, + **kwargs, + ) -> None: + """ + :param backend: The backend on which the job is run. + :param job_id: A unique identifier for the job. + :type job_id: str + :param job_callable: The callable that executes the circuit and returns a result. + :param run_input: The quantum circuit to execute. + :param input_params: Parameters forwarded to ``job_callable`` at execution time. + :type input_params: Dict + :param executor: Thread pool executor. Uses a default single-threaded executor if + not provided. + :param **kwargs: Additional keyword arguments passed to ``qiskit.providers.JobV1``. + """ + + self._run_input = run_input + self._input_params = input_params + self._future: Optional[Future] = None + self._executor: Executor = executor or DetaultExecutor() + self._job_callable = job_callable + self._status = JobStatus.INITIALIZING + self._submit_start_time: Optional[float] = None + super().__init__(backend, job_id, **kwargs) + + def submit(self): + """Submit the job to the backend for execution. + + :raises JobError: If trying to re-submit the job. + """ + if self._future is not None: + raise JobError("Job has already been submitted.") + + self._submit_start_time = monotonic() + self._future = self._executor.submit( + self._job_callable, self._run_input, self.job_id(), **self._input_params + ) + self.add_done_callback(self._submit_duration) + + @abstractmethod + def result(self, timeout: Optional[float] = None) -> Any: + pass + + @abstractmethod + def _submit_duration(self, _future: Future): + pass + + def _result(self, timeout: Optional[float] = None) -> Any: + """Return the results of the job.""" + if self._future is None: + raise JobError("Job has not been submitted.") + + return self._future.result(timeout=timeout) + + def status(self) -> JobStatus: + """Return the status of the job, among the values of ``JobStatus``.""" + if self._future is None: + return JobStatus.INITIALIZING + if self._future.cancelled(): + return JobStatus.CANCELLED + if self._future.done(): + if self._future.exception() is None: + return JobStatus.DONE + else: + return JobStatus.ERROR + if self._future.running(): + return JobStatus.RUNNING + + return JobStatus.INITIALIZING + + def backend(self) -> BackendV2: + """Return the backend where this job was executed.""" + + return super().backend() + + def cancel(self): + """Attempt to cancel the job.""" + if self._future is not None: + self._future.cancel() + + def error(self) -> Optional[JobError]: + """Return the error that occurred during the execution of the job.""" + if self._future is not None: + return self._future.exception() + + def add_done_callback(self, fn: Callable[[Future[Result]], object]) -> None: + """Attaches a callable that will be called when the job finishes.""" + if self._future is not None: + self._future.add_done_callback(fn) + + +class QsSimJob(QsJob): + """ + A Qiskit job that runs a quantum circuit on the Q# simulator. + + Submits the circuit for simulation and returns a ``qiskit.result.Result`` + containing shot-level measurement outcomes. + """ + + def result(self, timeout: Optional[float] = None) -> Result: + return self._result(timeout=timeout) + + def submit(self): + """Submit the job to the backend for execution. + + :raises JobError: If trying to re-submit the job. + """ + shots = self._input_params.get("shots", -1) + telemetry_events.on_qiskit_run(shots, 1) + + super().submit() + + def _submit_duration(self, _future: Future): + end_time = monotonic() + # _submit_start_time is set in submit() before adding this callback + assert self._submit_start_time is not None + duration_in_sec = end_time - self._submit_start_time + duration_in_ms = duration_in_sec * 1000 + + shots = self._input_params.get("shots", -1) + telemetry_events.on_qiskit_run_end(shots, 1, duration_in_ms) + + +class ReJob(QsJob): + """ + A Qiskit job that runs the Q# Resource Estimator. + + Submits the circuit to the resource estimator and returns an + :class:`~qsharp.estimator.EstimatorResult` with the computed resource estimates. + """ + + def result(self, timeout: Optional[float] = None) -> EstimatorResult: + return self._result(timeout=timeout) + + def submit(self): + """Submit the job to the backend for execution. + + :raises JobError: If trying to re-submit the job. + """ + + telemetry_events.on_qiskit_run_re() + + super().submit() + + def _submit_duration(self, _future: Future): + end_time = monotonic() + # _submit_start_time is set in submit() before adding this callback + assert self._submit_start_time is not None + duration_in_sec = end_time - self._submit_start_time + duration_in_ms = duration_in_sec * 1000 + + telemetry_events.on_qiskit_run_re_end(duration_in_ms) diff --git a/source/qdk_package/qdk/qiskit/jobs/qsjobset.py b/source/qdk_package/qdk/qiskit/jobs/qsjobset.py new file mode 100644 index 0000000000..03956a1f5b --- /dev/null +++ b/source/qdk_package/qdk/qiskit/jobs/qsjobset.py @@ -0,0 +1,150 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +from concurrent.futures import Executor, Future +import datetime +from time import monotonic +import logging +from typing import Dict, List, Optional, Any +from uuid import uuid4 + + +from qiskit.circuit import QuantumCircuit +from qiskit.providers import JobV1 as Job +from qiskit.providers import BackendV2, JobStatus, JobError +from qiskit.result.result import Result, ExperimentResult + + +from .qsjob import QsSimJob, RunInputCallable +from ..execution import DetaultExecutor +from ... import telemetry_events + +logger = logging.getLogger(__name__) + + +class QsJobSet(Job): + """ + A Qiskit job set that runs multiple quantum circuits concurrently. + + Each circuit in ``run_input`` is submitted as an individual + :class:`~qsharp.interop.qiskit.jobs.qsjob.QsSimJob` and executed in a thread pool. + :meth:`result` blocks until all constituent + jobs are complete and aggregates their ``qiskit.result.Result`` objects into a single + combined result. + """ + + def __init__( + self, + backend: Optional[BackendV2], + job_id: str, + job_callable: RunInputCallable, + run_input: List[QuantumCircuit], + input_params: Dict[str, Any], + executor=None, + **kwargs, + ) -> None: + super().__init__(backend, job_id, **kwargs) + + self._run_input: List[QuantumCircuit] = run_input + self._input_params: Dict[str, Any] = input_params + self._jobs: List[QsSimJob] = [] + self._job_indexes: List[int] = [] + self._executor: Executor = executor or DetaultExecutor() + self._job_callable = job_callable + self._start_time: Optional[float] = None + self._end_time: Optional[float] = None + + def submit(self): + """Submit the job to the backend for execution. + + :raises JobError: If trying to re-submit the job. + """ + if len(self._jobs) > 0: + raise JobError("Jobs have already been submitted.") + self._start_time = monotonic() + shots = self._input_params.get("shots", -1) + telemetry_events.on_qiskit_run(shots, len(self._run_input)) + job_index = 0 + for circuit in self._run_input: + job_id = str(uuid4()) + job = QsSimJob( + self._backend, + job_id, + self._job_callable, + [circuit], + self._input_params, + self._executor, + ) + self._job_indexes.append(job_index) + job.submit() + job.add_done_callback(self._job_done) + + self._jobs.append(job) + + def _job_done(self, _future: Future): + self._end_time = monotonic() + if all(job.in_final_state() for job in self._jobs): + # all jobs are done, so we can log the telemetry event + shots = self._input_params.get("shots", -1) + # _start_time is set in submit() before adding this callback + assert self._start_time is not None + duration_in_ms = (self._end_time - self._start_time) * 1000 + num_circuits = len(self._run_input) + telemetry_events.on_qiskit_run_end(shots, num_circuits, duration_in_ms) + + def cancel(self): + """Attempt to cancel the job.""" + for future in self._jobs: + future.cancel() + + def status(self) -> JobStatus: + """Return the status of the job, among the values of ``JobStatus``.""" + if all(job.in_final_state() for job in self._jobs): + if any(job.status() == JobStatus.ERROR for job in self._jobs): + return JobStatus.ERROR + elif any(job.status() == JobStatus.CANCELLED for job in self._jobs): + return JobStatus.CANCELLED + assert all(job.status() == JobStatus.DONE for job in self._jobs) + return JobStatus.DONE + else: + if any(job.status() == JobStatus.RUNNING for job in self._jobs): + return JobStatus.RUNNING + if any(job.status() == JobStatus.QUEUED for job in self._jobs): + return JobStatus.QUEUED + return JobStatus.INITIALIZING + + def result(self, timeout: Optional[float] = None) -> Result: + results: List[Result] = [] + for job in self._jobs: + results.append(job.result(timeout=timeout)) + + if len(results) == 1: + return results[0] + + output = results[0].to_dict() + + output["job_id"] = self.job_id() + output["date"] = str(datetime.datetime.now().isoformat()) + output["backend_name"] = self.backend().name + output["backend_version"] = self.backend().backend_version + + # Times are set in submit() and _job_done() which must be called before result() + assert self._start_time is not None + assert self._end_time is not None + duration = self._end_time - self._start_time + output["time_taken"] = str(duration) + output["header"] = { + "metadata": {}, + } + output["qobj_id"] = str(uuid4()) + output["success"] = all(result.success for result in results) + agg_result: List[ExperimentResult] = [] + for result in results: + # The results of an experiment should not be empty + assert result.results is not None + for experiment_result in result.results: + agg_result.append(experiment_result.to_dict()) + output["results"] = agg_result + output = Result.from_dict(output) + return output diff --git a/source/qdk_package/qdk/qiskit/passes/__init__.py b/source/qdk_package/qdk/qiskit/passes/__init__.py new file mode 100644 index 0000000000..bc095ca4ba --- /dev/null +++ b/source/qdk_package/qdk/qiskit/passes/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from .remove_delay import RemoveDelays diff --git a/source/qdk_package/qdk/qiskit/passes/remove_delay.py b/source/qdk_package/qdk/qiskit/passes/remove_delay.py new file mode 100644 index 0000000000..65e8902d38 --- /dev/null +++ b/source/qdk_package/qdk/qiskit/passes/remove_delay.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +from qiskit.dagcircuit import DAGCircuit +from qiskit.transpiler.basepasses import TransformationPass +from qiskit.transpiler.passes.utils import control_flow + + +class RemoveDelays(TransformationPass): + """Return a circuit with any delay removed. + + This transformation is not semantics preserving. + """ + + @control_flow.trivial_recurse + def run(self, dag: DAGCircuit) -> DAGCircuit: + """Run the RemoveDelays pass on `dag`.""" + + dag.remove_all_ops_named("delay") + + return dag diff --git a/source/qdk_package/qdk/qre/__init__.py b/source/qdk_package/qdk/qre/__init__.py index 5d0695bb9f..0dbe8d4a9d 100644 --- a/source/qdk_package/qdk/qre/__init__.py +++ b/source/qdk_package/qdk/qre/__init__.py @@ -1,28 +1,86 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -# flake8: noqa F403 -# pyright: ignore[reportWildcardImportFromLibrary] +from ._application import Application +from ._architecture import Architecture, ISAContext +from ._estimation import estimate +from ._instruction import ( + LOGICAL, + PHYSICAL, + Encoding, + ISATransform, + constraint, + InstructionSource, +) +from ._isa_enumeration import ISAQuery, ISARefNode, ISA_ROOT +from ._qre import ( + ISA, + InstructionFrontier, + Constraint, + ConstraintBound, + EstimationResult, + FactoryResult, + ISARequirements, + Block, + Trace, + block_linear_function, + constant_function, + generic_function, + linear_function, + instruction_name, + property_name, + property_name_to_key, +) +from ._results import ( + EstimationTable, + EstimationTableColumn, + EstimationTableEntry, + plot_estimates, +) +from ._trace import LatticeSurgery, PSSPC, TraceQuery, TraceTransform -"""Quantum Resource Estimator (QRE) for the Q# ecosystem. +# Extend Rust Python types with additional Python-side functionality +from ._instruction import _isa_as_frame, _requirements_as_frame -This module re-exports all public symbols from [qsharp.qre](:mod:`qsharp.qre`), -making them available under the ``qdk.qre`` namespace. It provides tools for -estimating the resources required to run quantum applications on specific -hardware architectures. +ISA.as_frame = _isa_as_frame +ISARequirements.as_frame = _requirements_as_frame -Example: - - from qdk import qre - results = qre.estimate(app, arch, isa_query) - -Requires the ``qre`` extra: ``pip install qdk[qre]``. -""" - -try: - # Re-export the top-level qsharp.qre names. - from qsharp.qre import * -except Exception as ex: - raise ImportError( - "qdk.qre requires the qre extra. Install with 'pip install qdk[qre]'." - ) from ex +__all__ = [ + "block_linear_function", + "constant_function", + "constraint", + "estimate", + "linear_function", + "plot_estimates", + "Application", + "Architecture", + "Block", + "Constraint", + "ConstraintBound", + "Encoding", + "EstimationResult", + "EstimationTable", + "EstimationTableColumn", + "EstimationTableEntry", + "FactoryResult", + "generic_function", + "instruction_name", + "InstructionFrontier", + "InstructionSource", + "ISA", + "ISA_ROOT", + "ISAContext", + "ISAQuery", + "ISARefNode", + "ISARequirements", + "ISATransform", + "LatticeSurgery", + "PSSPC", + "property_name", + "property_name_to_key", + "Trace", + "TraceQuery", + "TraceTransform", + "LOGICAL", + "PHYSICAL", +] diff --git a/source/qdk_package/qdk/qre/_application.py b/source/qdk_package/qdk/qre/_application.py new file mode 100644 index 0000000000..6c20621b2b --- /dev/null +++ b/source/qdk_package/qdk/qre/_application.py @@ -0,0 +1,172 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations + +import types +from abc import ABC, abstractmethod +from concurrent.futures import ThreadPoolExecutor +from types import NoneType +from typing import ( + ClassVar, + Generic, + Protocol, + TypeVar, + Generator, + get_type_hints, + cast, +) + +from ._enumeration import _enumerate_instances +from ._qre import Trace, EstimationResult +from ._trace import TraceQuery + + +class DataclassProtocol(Protocol): + __dataclass_fields__: ClassVar[dict] + + +TraceParameters = TypeVar("TraceParameters", DataclassProtocol, types.NoneType) + + +class Application(ABC, Generic[TraceParameters]): + """ + An application defines a class of quantum computation problems along with a + method to generate traces for specific problem instances. + + We distinguish between application and trace parameters. The application + parameters define which particular instance of the application we want to + consider. The trace parameters define how to generate a trace. They change + the specific way in which we solve the problem, but not the problem itself. + + For example, in quantum cryptanalysis, the application parameters could + define the key size for an RSA prime product, while the trace parameters + define which algorithm to use to break the cryptography, as well as + parameters therein. + """ + + _parallel_traces: bool = True + + @abstractmethod + def get_trace(self, parameters: TraceParameters) -> Trace: + """Return the trace corresponding to this application and parameters. + + Args: + parameters (TraceParameters): The trace parameters. + + Returns: + Trace: The trace for this application instance and parameters. + """ + + @staticmethod + def q(**kwargs) -> TraceQuery: + """Create a trace query for this application. + + Args: + **kwargs: Domain overrides forwarded to trace parameter enumeration. + + Returns: + TraceQuery: A trace query for this application type. + """ + return TraceQuery(NoneType, **kwargs) + + def context(self) -> _Context: + """Create a new enumeration context for this application.""" + return _Context(self) + + def post_process( + self, parameters: TraceParameters, estimation: EstimationResult + ) -> EstimationResult: + """Post-process an estimation result for a given set of trace parameters.""" + return estimation + + def enumerate_traces( + self, + **kwargs, + ) -> Generator[Trace, None, None]: + """Yield all traces of an application given its dataclass parameters. + + Args: + **kwargs: Domain overrides forwarded to ``_enumerate_instances``. + + Yields: + Trace: A trace for each enumerated set of trace parameters. + """ + + param_type = get_type_hints(self.__class__.get_trace).get("parameters") + if param_type is types.NoneType: + yield self.get_trace(None) # type: ignore + return + + if isinstance(param_type, TypeVar): + for c in param_type.__constraints__: + if c is not types.NoneType: + param_type = c + break + + if self._parallel_traces: + instances = list(_enumerate_instances(cast(type, param_type), **kwargs)) + with ThreadPoolExecutor() as executor: + for trace in executor.map(self.get_trace, instances): + yield trace + else: + for instances in _enumerate_instances(cast(type, param_type), **kwargs): + yield self.get_trace(instances) + + def enumerate_traces_with_parameters( + self, + **kwargs, + ) -> Generator[tuple[TraceParameters, Trace], None, None]: + """Yield (parameters, trace) pairs for an application. + + Like ``enumerate_traces``, but each yielded trace is accompanied by the + trace parameters that were used to generate it. + + Args: + **kwargs: Domain overrides forwarded to ``_enumerate_instances``. + + Yields: + tuple[TraceParameters, Trace]: A pair of trace parameters and + the corresponding trace. + """ + + param_type = get_type_hints(self.__class__.get_trace).get("parameters") + if param_type is types.NoneType: + yield None, self.get_trace(None) # type: ignore + return + + if isinstance(param_type, TypeVar): + for c in param_type.__constraints__: + if c is not types.NoneType: + param_type = c + break + + if self._parallel_traces: + instances = list(_enumerate_instances(cast(type, param_type), **kwargs)) + with ThreadPoolExecutor() as executor: + for instance, trace in zip( + instances, executor.map(self.get_trace, instances) + ): + yield instance, trace + else: + for instance in _enumerate_instances(cast(type, param_type), **kwargs): + yield instance, self.get_trace(instance) + + def disable_parallel_traces(self): + """Disable parallel trace generation for this application.""" + self._parallel_traces = False + + +class _Context: + """Enumeration context wrapping an application instance.""" + + application: Application + + def __init__(self, application: Application, **kwargs): + """Initialize the context for the given application. + + Args: + application (Application): The application instance. + **kwargs: Additional keyword arguments (reserved for future use). + """ + self.application = application diff --git a/source/qdk_package/qdk/qre/_architecture.py b/source/qdk_package/qdk/qre/_architecture.py new file mode 100644 index 0000000000..cd8bb52e64 --- /dev/null +++ b/source/qdk_package/qdk/qre/_architecture.py @@ -0,0 +1,244 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations +import copy +from typing import cast, TYPE_CHECKING + +from abc import ABC, abstractmethod + +from ._qre import ( + ISA, + _ProvenanceGraph, + Instruction, + _IntFunction, + _FloatFunction, + constant_function, + property_name_to_key, +) + +if TYPE_CHECKING: + from typing import Optional + + from ._instruction import ISATransform, Encoding + + +class Architecture(ABC): + """Abstract base class for quantum hardware architectures.""" + + @abstractmethod + def provided_isa(self, ctx: ISAContext) -> ISA: + """ + Create the ISA provided by this architecture, adding instructions + directly to the context's provenance graph. + + Args: + ctx (ISAContext): The enumeration context whose provenance graph stores + the instructions. + + Returns: + ISA: The ISA backed by the context's provenance graph. + """ + ... + + def context(self) -> ISAContext: + """Create a new enumeration context for this architecture. + + Returns: + ISAContext: A new enumeration context. + """ + return ISAContext(self) + + +class ISAContext: + """ + Context passed through enumeration, holding shared state. + """ + + def __init__(self, arch: Architecture): + """Initialize the ISA context for the given architecture. + + Args: + arch (Architecture): The architecture providing the base ISA. + """ + self._provenance: _ProvenanceGraph = _ProvenanceGraph() + + # Let the architecture create instructions directly in the graph. + self._isa = arch.provided_isa(self) + + self._bindings: dict[str, ISA] = {} + self._transforms: dict[int, Architecture | ISATransform] = {0: arch} + + def _with_binding(self, name: str, isa: ISA) -> ISAContext: + """Return a new context with an additional binding (internal use).""" + ctx = copy.copy(self) + ctx._bindings = {**self._bindings, name: isa} + return ctx + + @property + def isa(self) -> ISA: + """The ISA provided by the architecture for this context.""" + return self._isa + + def add_instruction( + self, + id_or_instruction: int | Instruction, + encoding: Encoding = 0, # type: ignore + *, + arity: Optional[int] = 1, + time: int | _IntFunction = 0, + space: Optional[int] | _IntFunction = None, + length: Optional[int | _IntFunction] = None, + error_rate: float | _FloatFunction = 0.0, + transform: ISATransform | None = None, + source: list[Instruction] | None = None, + **kwargs: int, + ) -> int: + """ + Create an instruction and add it to the provenance graph. + + Can be called in two ways: + + 1. With keyword args to create a new instruction:: + + ctx.add_instruction(T, encoding=LOGICAL, time=1000, + error_rate=1e-8) + + 2. With a pre-existing ``Instruction`` object (e.g. from + ``with_id()``):: + + ctx.add_instruction(existing_instruction) + + Provenance is recorded when *transform* and/or *source* are + supplied: + + - **transform** — the ``ISATransform`` that produced the + instruction. + - **source** — input instructions consumed by the transform. + + Args: + id_or_instruction: Either an instruction ID (int) for creating + a new instruction, or an existing ``Instruction`` object. + encoding: The instruction encoding (0 = Physical, 1 = Logical). + Ignored when passing an existing ``Instruction``. + arity: The instruction arity. ``None`` for variable arity. + Ignored when passing an existing ``Instruction``. + time: Instruction time in ns (or ``_IntFunction`` for variable + arity). Ignored when passing an existing ``Instruction``. + space: Instruction space in physical qubits (or ``_IntFunction`` + for variable arity). Ignored when passing an existing + ``Instruction``. + length: Arity including ancilla qubits. Ignored when passing an + existing ``Instruction``. + error_rate: Instruction error rate (or ``_FloatFunction`` for + variable arity). Ignored when passing an existing + ``Instruction``. + transform: The ``ISATransform`` that produced the instruction. + source: List of source ``Instruction`` objects consumed by the + transform. + **kwargs: Additional properties (e.g. ``distance=9``). Ignored + when passing an existing ``Instruction``. + + Returns: + The node index in the provenance graph. + + Raises: + ValueError: If an unknown property name is provided in kwargs. + """ + if transform is None and source is None: + return self._provenance.add_instruction( + cast(int, id_or_instruction), + encoding, + arity=arity, + time=time, + space=space, + length=length, + error_rate=error_rate, + **kwargs, + ) + + if isinstance(id_or_instruction, Instruction): + instr = id_or_instruction + else: + instr = _make_instruction( + id_or_instruction, + int(encoding), + arity, + time, + space, + length, + error_rate, + kwargs, + ) + + transform_id = id(transform) if transform is not None else 0 + children = [inst.source for inst in source] if source else [] + + node_index = self._provenance.add_node(instr, transform_id, children) + + if transform is not None: + self._transforms[transform_id] = transform + + return node_index + + def make_isa(self, *node_indices: int) -> ISA: + """ + Create an ISA backed by this context's provenance graph from the + given node indices. + + Args: + *node_indices (int): Node indices in the provenance graph. + + Returns: + ISA: An ISA referencing the provenance graph. + """ + return self._provenance.make_isa(list(node_indices)) + + +def _make_instruction( + id: int, + encoding: int, + arity: int | None, + time: int | _IntFunction, + space: int | _IntFunction | None, + length: int | _IntFunction | None, + error_rate: float | _FloatFunction, + properties: dict[str, int], +) -> Instruction: + """Build an ``Instruction`` from keyword arguments.""" + if arity is not None: + instr = Instruction.fixed_arity( + id, + encoding, + arity, + cast(int, time), + cast(int | None, space), + cast(int | None, length), + cast(float, error_rate), + ) + else: + if isinstance(time, int): + time = constant_function(time) + if isinstance(space, int): + space = constant_function(space) + if isinstance(length, int): + length = constant_function(length) + if isinstance(error_rate, (int, float)): + error_rate = constant_function(float(error_rate)) + + instr = Instruction.variable_arity( + id, + encoding, + time, + cast(_IntFunction, space), + error_rate, + length, + ) + + for key, value in properties.items(): + prop_key = property_name_to_key(key) + if prop_key is None: + raise ValueError(f"Unknown property '{key}'.") + instr.set_property(prop_key, value) + + return instr diff --git a/source/qdk_package/qdk/qre/_enumeration.py b/source/qdk_package/qdk/qre/_enumeration.py new file mode 100644 index 0000000000..b01d706944 --- /dev/null +++ b/source/qdk_package/qdk/qre/_enumeration.py @@ -0,0 +1,242 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import types +from typing import ( + Generator, + Type, + TypeVar, + Literal, + Union, + cast, + get_args, + get_origin, + get_type_hints, +) +from dataclasses import MISSING +from itertools import product +from enum import Enum + + +T = TypeVar("T") + + +def _is_union_type(tp) -> bool: + """Check if a type is a Union or Python 3.10+ union (X | Y).""" + return get_origin(tp) is Union or isinstance(tp, types.UnionType) + + +def _is_type_filter(val, union_members: tuple) -> bool: + """ + Check if *val* is a union member type or a list of union member types, + i.e. a type filter for a union field (as opposed to a fixed value or + instance domain). + """ + member_set = set(union_members) + if isinstance(val, type) and val in member_set: + return True + if isinstance(val, list) and all( + isinstance(v, type) and v in member_set for v in val + ): + return True + return False + + +def _is_union_constraint_dict(val) -> bool: + """ + Check if *val* is a dict whose keys are all types, i.e. a per-member + constraint mapping for a union field. + + Example: ``{OptionA: {"number": [2, 3]}, OptionB: {}}`` + """ + return isinstance(val, dict) and all(isinstance(k, type) for k in val) + + +def _enumerate_union_members( + union_members: tuple, + val=None, +) -> list: + """ + Enumerate instances for a union-typed field. + + *val* controls which members are enumerated and how: + + - ``None`` - enumerate all members with their default domains. + - A single type (e.g. ``OptionB``) - enumerate only that member. + - A list of types (e.g. ``[OptionA, OptionB]``) - enumerate those members. + - A dict mapping types to constraint dicts + (e.g. ``{OptionA: {"number": [2, 3]}, OptionB: {}}``) - + enumerate only the listed members, forwarding the constraint dicts. + """ + # No override - enumerate all members with defaults + if val is None: + domain: list = [] + for member_type in union_members: + domain.extend(_enumerate_instances(member_type)) + return domain + + # Single type + if isinstance(val, type): + return list(_enumerate_instances(val)) + + # List of types + if isinstance(val, list) and all(isinstance(v, type) for v in val): + domain = [] + for member_type in val: + domain.extend(_enumerate_instances(member_type)) + return domain + + # Dict of type → constraint dict + if _is_union_constraint_dict(val): + domain = [] + for member_type, member_kwargs in cast(dict, val).items(): + domain.extend(_enumerate_instances(member_type, **member_kwargs)) + return domain + + raise ValueError( + f"Invalid value for union field: {val!r}. " + "Expected a union member type, a list of types, or a dict mapping " + "types to constraint dicts." + ) + + +def _enumerate_instances(cls: Type[T], **kwargs) -> Generator[T, None, None]: + """ + Yield all instances of a dataclass given its class. + + The enumeration logic supports defining domains for fields using the + ``domain`` metadata key. Additionally, boolean fields are automatically + enumerated with ``[True, False]``, Enum fields with all their members, + and Literal types with their defined values. + + **Nested dataclass fields** can be constrained by passing a dict:: + + _enumerate_instances(Outer, inner={"option": True}) + + **Union-typed fields** support several override forms: + + - A single type to select one member:: + + _enumerate_instances(Config, option=OptionB) + + - A list of types to select a subset:: + + _enumerate_instances(Config, option=[OptionA, OptionB]) + + - A dict mapping types to constraint dicts:: + + _enumerate_instances(Config, option={OptionA: {"number": [2, 3]}, OptionB: {}}) + + Args: + cls (Type[T]): The dataclass type to enumerate. + **kwargs: Fixed values or domains for fields. If a value is a list + and the corresponding field is kw_only, it is treated as a domain + to enumerate over. For nested dataclass fields a ``dict`` value + is forwarded as keyword arguments. For union-typed fields a type, + list of types, or ``dict[type, dict]`` controls member selection + and constraints. + + Returns: + Generator[T, None, None]: A generator yielding instances of the + dataclass. + + Raises: + ValueError: If a field cannot be enumerated (no domain found). + """ + + names = [] + values = [] + fixed_kwargs = {} + + if (fields := getattr(cls, "__dataclass_fields__", None)) is None: + # There are no fields defined for this class, so just yield a single + # instance + yield cls(**kwargs) + return + + # Resolve type hints to handle stringified types from __future__.annotations + type_hints = get_type_hints(cls) + + for field in fields.values(): # type: ignore + name = field.name + # Get resolved type or fallback to field.type + current_type = type_hints.get(name, field.type) + + if name in kwargs: + val = kwargs[name] + + is_union = _is_union_type(current_type) + union_members = get_args(current_type) if is_union else () + + # Union field with a type filter or constraint dict + if is_union and ( + _is_type_filter(val, union_members) or _is_union_constraint_dict(val) + ): + names.append(name) + values.append(_enumerate_union_members(union_members, val)) + continue + + # Nested dataclass field with a dict of constraints + if ( + isinstance(val, dict) + and not is_union + and isinstance(current_type, type) + and hasattr(current_type, "__dataclass_fields__") + ): + names.append(name) + values.append(list(_enumerate_instances(current_type, **val))) + continue + + # If kw_only and list, it's a domain to enumerate + if field.kw_only and isinstance(val, list): + names.append(name) + values.append(val) + else: + # Otherwise, it's a fixed value + fixed_kwargs[name] = val + continue + + if not field.kw_only: + # We don't enumerate non-kw-only fields that aren't in kwargs + continue + + # Derived domain logic + names.append(name) + + domain = field.metadata.get("domain", None) + if domain is not None: + values.append(domain) + continue + + if current_type is bool: + values.append([True, False]) + continue + + if isinstance(current_type, type) and issubclass(current_type, Enum): + values.append(list(current_type)) + continue + + if get_origin(current_type) is Literal: + values.append(list(get_args(current_type))) + continue + + # Union types (e.g., OptionA | OptionB or Union[OptionA, OptionB]) + if _is_union_type(current_type): + values.append(_enumerate_union_members(get_args(current_type), None)) + continue + + # Nested dataclass types + if isinstance(current_type, type) and hasattr( + current_type, "__dataclass_fields__" + ): + values.append(list(_enumerate_instances(current_type))) + continue + + if field.default is not MISSING: + values.append([field.default]) + continue + + raise ValueError(f"Cannot enumerate field {name}.") + + for instance_values in product(*values): + yield cls(**fixed_kwargs, **dict(zip(names, instance_values))) diff --git a/source/qdk_package/qdk/qre/_estimation.py b/source/qdk_package/qdk/qre/_estimation.py new file mode 100644 index 0000000000..228e139ede --- /dev/null +++ b/source/qdk_package/qdk/qre/_estimation.py @@ -0,0 +1,218 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations + +from typing import cast, Optional, Any + +from .. import telemetry_events +from ._application import Application +from ._architecture import Architecture +from ._qre import ( + _estimate_parallel, + _estimate_with_graph, + _EstimationCollection, + Trace, +) +from ._trace import TraceQuery, PSSPC, LatticeSurgery +from ._isa_enumeration import ISAQuery +from ._results import EstimationTable, EstimationTableEntry + + +def estimate( + application: Application, + architecture: Architecture, + isa_query: ISAQuery, + trace_query: Optional[TraceQuery] = None, + *, + max_error: float = 1.0, + post_process: bool = False, + use_graph: bool = True, + name: Optional[str] = None, +) -> EstimationTable: + """ + Estimate the resource requirements for a given application instance and + architecture. + + The application instance might return multiple traces. Each of the traces + is transformed by the trace query, which applies several trace transforms in + sequence. Each transform may return multiple traces. Similarly, the + architecture's ISA is transformed by the ISA query, which applies several + ISA transforms in sequence, each of which may return multiple ISAs. The + estimation is performed for each combination of transformed trace and ISA. + The results are collected into an EstimationTable and returned. + + The collection only contains the results that are optimal with respect to + the total number of qubits and the total runtime. + + Note: + The pruning strategy used when ``use_graph`` is set to True (default) + filters ISA instructions by comparing their per-instruction space, time, + and error independently. However, the total qubit count of a result + depends on the interaction between factory space and runtime: + ``factory_qubits = copies × factory_space`` where copies are determined + by ``count.div_ceil(runtime / factory_time)``. Because of this, an ISA + instruction that is dominated on per-instruction metrics can still + contribute to a globally Pareto-optimal result (e.g., a factory with + higher time may need fewer copies, leading to fewer total qubits). As a + consequence, ``use_graph=True`` may miss some results that + ``use_graph=False`` would find. Use ``use_graph=False`` when completeness of + the Pareto frontier is required. + + Args: + application (Application): The quantum application to be estimated. + architecture (Architecture): The target quantum architecture. + isa_query (ISAQuery): The ISA query to enumerate ISAs from the architecture. + trace_query (TraceQuery): The trace query to enumerate traces from the + application. + max_error (float): The maximum allowed error for the estimation results. + post_process (bool): If True, use the Python-threaded estimation path + (intended for future post-processing logic). If False (default), + use the Rust parallel estimation path. + use_graph (bool): If True (default), use the Rust estimation path that + builds a graph of ISAs and prunes suboptimal ISAs during estimation. + If False, use the Rust estimation path that does not perform any + pruning and simply enumerates all ISAs for each trace. + name (Optional[str]): An optional name for the estimation. If given, this + will be added as a first column to the results table for all entries. + + Returns: + EstimationTable: A table containing the optimal estimation results. + """ + + telemetry_events.on_qre_estimate(post_process=post_process, use_graph=use_graph) + + app_ctx = application.context() + arch_ctx = architecture.context() + + if trace_query is None: + trace_query = PSSPC.q() * LatticeSurgery.q() + + if post_process: + # Enumerate traces with their parameters so we can post-process later + params_and_traces = cast( + list[tuple[Any, Trace]], + list(trace_query.enumerate(app_ctx, track_parameters=True)), + ) + num_traces = len(params_and_traces) + + # Phase 1: Run all estimates in Rust (parallel, fast). + traces_only = [trace for _, trace in params_and_traces] + + if use_graph: + isa_query.populate(arch_ctx) + arch_ctx._provenance.build_pareto_index() + + num_isas = arch_ctx._provenance.total_isa_count() + + collection = _estimate_with_graph( + cast(list[Trace], traces_only), arch_ctx._provenance, max_error, True + ) + isas = collection.isas + else: + isas = list(isa_query.enumerate(arch_ctx)) + + num_isas = len(isas) + + collection = _estimate_parallel( + cast(list[Trace], traces_only), isas, max_error, True + ) + + total_jobs = collection.total_jobs + successful = collection.successful_estimates + summaries = collection.all_summaries # (trace_idx, isa_idx, qubits, runtime) + + # Phase 2: Learn per-trace runtime multiplier and qubit multiplier from + # one sample each: if post_process changes runtime or qubit count it + # will affect the Pareto optimality, but the changes depend only on the + # trace, not on the ISA. + trace_multipliers: dict[int, tuple[float, float]] = {} + trace_sample_isa: dict[int, int] = {} + for t_idx, isa_idx, _q, r in summaries: + if t_idx not in trace_sample_isa: + trace_sample_isa[t_idx] = isa_idx + for t_idx, isa_idx in trace_sample_isa.items(): + params, trace = params_and_traces[t_idx] + sample = trace.estimate(isas[isa_idx], max_error) + if sample is not None: + pre_q = sample.qubits + pre_r = sample.runtime + pp = app_ctx.application.post_process(params, sample) + if pp is not None and pre_r > 0 and pre_q > 0: + trace_multipliers[t_idx] = (pp.qubits / pre_q, pp.runtime / pre_r) + + # Phase 3: Estimate post-pp values and filter to Pareto candidates. + estimated_pp: list[tuple[int, int, int, int]] = ( + [] + ) # (t_idx, isa_idx, est_q, est_r) + for t_idx, isa_idx, q, r in summaries: + mult_q, mult_r = trace_multipliers.get(t_idx, (0.0, 0.0)) + est_q = int(q * mult_q) if mult_q > 0 else q + est_r = int(r * mult_r) if mult_r > 0 else r + estimated_pp.append((t_idx, isa_idx, est_q, est_r)) + + # Build approximate post-pp Pareto frontier to identify candidates. + estimated_pp.sort(key=lambda x: (x[2], x[3])) # sort by qubits, then runtime + approx_pareto: list[tuple[int, int, int, int]] = [] + min_r = float("inf") + for item in estimated_pp: + if item[3] < min_r: + approx_pareto.append(item) + min_r = item[3] + + # Phase 4: Re-estimate and post-process only the Pareto candidates. + pp_collection = _EstimationCollection() + for t_idx, isa_idx, _q, _r in approx_pareto: + params, trace = params_and_traces[t_idx] + result = trace.estimate(isas[isa_idx], max_error) + if result is not None: + pp_result = app_ctx.application.post_process(params, result) + if pp_result is not None: + pp_collection.insert(pp_result) + collection = pp_collection + else: + traces = list(trace_query.enumerate(app_ctx)) + num_traces = len(traces) + + if use_graph: + isa_query.populate(arch_ctx) + arch_ctx._provenance.build_pareto_index() + + num_isas = arch_ctx._provenance.total_isa_count() + + collection = _estimate_with_graph( + cast(list[Trace], traces), arch_ctx._provenance, max_error, False + ) + else: + isas = list(isa_query.enumerate(arch_ctx)) + + num_isas = len(isas) + + # Use the Rust parallel estimation path + collection = _estimate_parallel( + cast(list[Trace], traces), isas, max_error, False + ) + + total_jobs = collection.total_jobs + successful = collection.successful_estimates + + # Post-process the results and add them to a results table + table = EstimationTable() + + table.name = name + + if name is not None: + table.insert_column(0, "name", lambda entry: name) + + table.extend( + EstimationTableEntry.from_result(result, arch_ctx) for result in collection + ) + + # Fill in the stats for this estimation run + table.stats.num_traces = num_traces + table.stats.num_isas = num_isas + table.stats.total_jobs = total_jobs + table.stats.successful_estimates = successful + table.stats.pareto_results = len(collection) + + return table diff --git a/source/qdk_package/qdk/qre/_instruction.py b/source/qdk_package/qdk/qre/_instruction.py new file mode 100644 index 0000000000..4669a86d4c --- /dev/null +++ b/source/qdk_package/qdk/qre/_instruction.py @@ -0,0 +1,473 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations + +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from typing import Generator, Iterable, Optional +from enum import IntEnum + +import pandas as pd + +from ._architecture import ISAContext, Architecture +from ._enumeration import _enumerate_instances +from ._isa_enumeration import ( + ISA_ROOT, + _BindingNode, + _ComponentQuery, + ISAQuery, +) +from ._qre import ( + ISA, + Constraint, + ConstraintBound, + Instruction, + ISARequirements, + instruction_name, + property_name_to_key, +) + + +class Encoding(IntEnum): + PHYSICAL = 0 + LOGICAL = 1 + + +PHYSICAL = Encoding.PHYSICAL +LOGICAL = Encoding.LOGICAL + + +def constraint( + id: int, + encoding: Encoding = PHYSICAL, + *, + arity: Optional[int] = 1, + error_rate: Optional[ConstraintBound] = None, + **kwargs: bool, +) -> Constraint: + """ + Create an instruction constraint. + + Args: + id (int): The instruction ID. + encoding (Encoding): The instruction encoding. PHYSICAL (0) or LOGICAL (1). + arity (Optional[int]): The instruction arity. If None, instruction is + assumed to have variable arity. Default is 1. + error_rate (Optional[ConstraintBound]): The constraint on the error rate. + **kwargs (bool): Required properties that matching instructions must have. + Valid property names: distance. Set to True to require the property. + + Returns: + Constraint: The instruction constraint. + + Raises: + ValueError: If an unknown property name is provided in kwargs. + """ + c = Constraint(id, encoding, arity, error_rate) + + for key, value in kwargs.items(): + if value: + if (prop_key := property_name_to_key(key)) is None: + raise ValueError(f"Unknown property '{key}'") + + c.add_property(prop_key) + + return c + + +class ISATransform(ABC): + """ + Abstract base class for transformations between ISAs (e.g., QEC schemes). + + An ISA transform defines a mapping from a required input ISA (e.g., + architecture constraints) to a provided output ISA (logical instructions). + It supports enumeration of configuration parameters. + """ + + @staticmethod + @abstractmethod + def required_isa() -> ISARequirements: + """ + Return the requirements that an implementation ISA must satisfy. + + Returns: + ISARequirements: The requirements for the underlying ISA. + """ + ... + + @abstractmethod + def provided_isa( + self, impl_isa: ISA, ctx: ISAContext + ) -> Generator[ISA, None, None]: + """ + Yields ISAs provided by this transform given an implementation ISA. + + Args: + impl_isa (ISA): The implementation ISA that satisfies requirements. + ctx (ISAContext): The enumeration context whose provenance graph + stores the instructions. + + Yields: + ISA: A provided logical ISA. + """ + ... + + @classmethod + def enumerate_isas( + cls, + impl_isa: ISA | Iterable[ISA], + ctx: ISAContext, + **kwargs, + ) -> Generator[ISA, None, None]: + """ + Enumerate all valid ISAs for this transform given implementation ISAs. + + This method iterates over all instances of the transform class (enumerating + hyperparameters) and filters implementation ISAs against requirements. + + Args: + impl_isa (ISA | Iterable[ISA]): One or more implementation ISAs. + ctx (ISAContext): The enumeration context. + **kwargs: Arguments passed to parameter enumeration. + + Yields: + ISA: Valid provided ISAs. + """ + isas = [impl_isa] if isinstance(impl_isa, ISA) else impl_isa + for isa in isas: + if not isa.satisfies(cls.required_isa()): + continue + + for component in _enumerate_instances(cls, **kwargs): + ctx._transforms[id(component)] = component + yield from component.provided_isa(isa, ctx) + + @classmethod + def q(cls, *, source: ISAQuery | None = None, **kwargs) -> ISAQuery: + """ + Create an ISAQuery node for this transform. + + Args: + source (Node | None): The source node providing implementation ISAs. + Defaults to ISA_ROOT. + **kwargs: Fixed values or domains for dataclass fields. Keyword-only + fields with a ``metadata["domain"]`` are enumerated automatically; + passing a value for such a field overrides or restricts the + domain. Non-keyword-only fields passed here are used as fixed + values for all enumerated instances. + + For example, given a transform with a non-keyword-only field + ``threshold`` and a keyword-only field ``distance`` with a + domain, calling ``MyTransform.q(threshold=0.03)`` fixes + ``threshold`` to 0.03 while still enumerating over all values + in the ``distance`` domain. + + Returns: + ISAQuery: An enumeration node representing this transform. + """ + return _ComponentQuery( + cls, source=source if source is not None else ISA_ROOT, kwargs=kwargs + ) + + @classmethod + def bind(cls, name: str, node: ISAQuery) -> _BindingNode: + """ + Create a BindingNode for this transform. + + This is a convenience method equivalent to ``cls.q().bind(name, node)``. + + Args: + name (str): The name to bind the transform's output to. + node (Node): The child node that can reference this binding. + + Returns: + BindingNode: A binding node enclosing this transform. + """ + return cls.q().bind(name, node) + + +@dataclass(slots=True) +class InstructionSource: + nodes: list[_InstructionSourceNode] = field(default_factory=list, init=False) + roots: list[int] = field(default_factory=list, init=False) + + @classmethod + def from_isa(cls, ctx: ISAContext, isa: ISA) -> InstructionSource: + """ + Construct an InstructionSource graph from an ISA. + + The instruction source graph contains more information than the + provenance graph in the context, as it connects the instructions to the + transforms and architectures that generated them. + + Args: + ctx (ISAContext): The enumeration context containing the provenance graph. + isa (ISA): Instructions in the ISA will serve as root nodes in the source graph. + + Returns: + InstructionSource: The instruction source graph for the estimation result. + """ + + def _make_node( + graph: InstructionSource, source_table: dict[int, int], source: int + ) -> int: + if source in source_table: + return source_table[source] + + children = [ + _make_node(graph, source_table, child) + for child in ctx._provenance.children(source) + if child != 0 + ] + + node = graph.add_node( + ctx._provenance.instruction(source), + ctx._transforms.get(ctx._provenance.transform_id(source)), + children, + ) + + source_table[source] = node + return node + + graph = cls() + source_table: dict[int, int] = {} + + for inst in isa: + node_idx = isa.node_index(inst.id) + if node_idx is not None and node_idx != 0: + node = _make_node(graph, source_table, node_idx) + graph.add_root(node) + + return graph + + def add_root(self, node_id: int) -> None: + """Add a root node to the instruction source graph. + + Args: + node_id (int): The index of the node to add as a root. + """ + self.roots.append(node_id) + + def add_node( + self, + instruction: Instruction, + transform: Optional[ISATransform | Architecture], + children: list[int], + ) -> int: + """Add a node to the instruction source graph. + + Args: + instruction (Instruction): The instruction for this node. + transform (Optional[ISATransform | Architecture]): The transform + that produced the instruction. + children (list[int]): Indices of child nodes. + + Returns: + int: The index of the newly added node. + """ + node_id = len(self.nodes) + self.nodes.append(_InstructionSourceNode(instruction, transform, children)) + return node_id + + def __str__(self) -> str: + """Return a formatted string representation of the instruction source graph.""" + + def _format_node(node: _InstructionSourceNode, indent: int = 0) -> str: + result = " " * indent + f"{instruction_name(node.instruction.id) or '??'}" + if node.transform is not None: + result += f" @ {node.transform}" + for child_index in node.children: + result += "\n" + _format_node(self.nodes[child_index], indent + 2) + return result + + return "\n".join( + _format_node(self.nodes[root_index]) for root_index in self.roots + ) + + def __getitem__(self, id: int) -> _InstructionSourceNodeReference: + """ + Retrieve the first instruction source root node with the given + instruction ID. Raises KeyError if no such node exists. + + Args: + id (int): The instruction ID to search for. + + Returns: + _InstructionSourceNodeReference: The first instruction source node with the + given instruction ID. + """ + if (node := self.get(id)) is not None: + return node + + raise KeyError(f"Instruction ID {id} not found in instruction source graph.") + + def __contains__(self, id: int) -> bool: + """ + Check if there is an instruction source root node with the given + instruction ID. + + Args: + id (int): The instruction ID to search for. + + Returns: + bool: True if a node with the given instruction ID exists, False otherwise. + """ + for root in self.roots: + if self.nodes[root].instruction.id == id: + return True + + return False + + def get( + self, id: int, default: Optional[_InstructionSourceNodeReference] = None + ) -> Optional[_InstructionSourceNodeReference]: + """ + Retrieve the first instruction source root node with the given + instruction ID. Returns default if no such node exists. + + Args: + id (int): The instruction ID to search for. + default (Optional[_InstructionSourceNodeReference]): The value to return if no + node with the given ID is found. Default is None. + + Returns: + Optional[_InstructionSourceNodeReference]: The first instruction source node with the + given instruction ID, or default if no such node exists. + """ + for root in self.roots: + if self.nodes[root].instruction.id == id: + return _InstructionSourceNodeReference(self, root) + + return default + + +@dataclass(frozen=True, slots=True) +class _InstructionSourceNode: + """A node in the instruction source graph.""" + + instruction: Instruction + transform: Optional[ISATransform | Architecture] + children: list[int] + + +class _InstructionSourceNodeReference: + """Reference to a node in an InstructionSource graph.""" + + def __init__(self, graph: InstructionSource, node_id: int): + """Initialize a reference to a node in the instruction source graph. + + Args: + graph (InstructionSource): The owning instruction source graph. + node_id (int): The index of the referenced node. + """ + self.graph = graph + self.node_id = node_id + + @property + def instruction(self) -> Instruction: + """The instruction at this node.""" + return self.graph.nodes[self.node_id].instruction + + @property + def transform(self) -> Optional[ISATransform | Architecture]: + """The transform that produced this node's instruction, if any.""" + return self.graph.nodes[self.node_id].transform + + def __str__(self) -> str: + """Return a string representation of the referenced node.""" + return str(self.graph.nodes[self.node_id]) + + def __getitem__(self, id: int) -> _InstructionSourceNodeReference: + """ + Retrieve the first child instruction source node with the given + instruction ID. Raises KeyError if no such node exists. + + Args: + id (int): The instruction ID to search for. + + Returns: + _InstructionSourceNodeReference: The first child instruction source node with the + given instruction ID. + """ + if (node := self.get(id)) is not None: + return node + + raise KeyError( + f"Instruction ID {id} not found in children of instruction {instruction_name(self.instruction.id) or '??'}." + ) + + def get( + self, id: int, default: Optional[_InstructionSourceNodeReference] = None + ) -> Optional[_InstructionSourceNodeReference]: + """ + Retrieve the first child instruction source node with the given + instruction ID. Returns default if no such node exists. + + Args: + id (int): The instruction ID to search for. + default (Optional[_InstructionSourceNodeReference]): The value to return if no + node with the given ID is found. Default is None. + + Returns: + Optional[_InstructionSourceNodeReference]: The first child instruction source + node with the given instruction ID, or default if no such node + exists. + """ + + for child_id in self.graph.nodes[self.node_id].children: + if self.graph.nodes[child_id].instruction.id == id: + return _InstructionSourceNodeReference(self.graph, child_id) + + return default + + +def _isa_as_frame(self: ISA) -> pd.DataFrame: + """Convert an ISA to a pandas DataFrame. + + Args: + self (ISA): The ISA to convert. + + Returns: + pd.DataFrame: A DataFrame with columns for id, encoding, arity, + space, time, and error. + """ + data = { + "id": [instruction_name(inst.id) for inst in self], + "encoding": [Encoding(inst.encoding).name for inst in self], + "arity": [inst.arity for inst in self], + "space": [ + inst.expect_space() if inst.arity is not None else None for inst in self + ], + "time": [ + inst.expect_time() if inst.arity is not None else None for inst in self + ], + "error": [ + inst.expect_error_rate() if inst.arity is not None else None + for inst in self + ], + } + + df = pd.DataFrame(data) + df.set_index("id", inplace=True) + return df + + +def _requirements_as_frame(self: ISARequirements) -> pd.DataFrame: + """Convert ISA requirements to a pandas DataFrame. + + Args: + self (ISARequirements): The requirements to convert. + + Returns: + pd.DataFrame: A DataFrame with columns for id, encoding, and arity. + """ + data = { + "id": [instruction_name(inst.id) for inst in self], + "encoding": [Encoding(inst.encoding).name for inst in self], + "arity": [inst.arity for inst in self], + } + + df = pd.DataFrame(data) + df.set_index("id", inplace=True) + return df diff --git a/source/qdk_package/qdk/qre/_isa_enumeration.py b/source/qdk_package/qdk/qre/_isa_enumeration.py new file mode 100644 index 0000000000..7543c071ed --- /dev/null +++ b/source/qdk_package/qdk/qre/_isa_enumeration.py @@ -0,0 +1,428 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations + +import functools +import itertools +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from typing import Generator + +from ._architecture import ISAContext +from ._enumeration import _enumerate_instances +from ._qre import ISA + + +class ISAQuery(ABC): + """ + Abstract base class for all nodes in the ISA enumeration tree. + + Enumeration nodes define the structure of the search space for ISAs starting + from architectures and modified by ISA transforms such as error correction + schemes. They can be composed using operators like ``+`` (sum) and ``*`` + (product) to build complex enumeration strategies. + """ + + @abstractmethod + def enumerate(self, ctx: ISAContext) -> Generator[ISA, None, None]: + """ + Yields all ISA instances represented by this enumeration node. + + Args: + ctx (ISAContext): The enumeration context containing shared state, + e.g., access to the underlying architecture. + + Yields: + ISA: A possible ISA that can be generated from this node. + """ + pass + + def populate(self, ctx: ISAContext) -> int: + """ + Populate the provenance graph with instructions from this node. + + Unlike ``enumerate``, this does not yield ISA objects. Each transform + queries the graph for Pareto-optimal instructions matching its + requirements, and adds produced instructions directly to the graph. + + Args: + ctx (ISAContext): The enumeration context whose provenance graph + will be populated. + + Returns: + int: The starting node index of the instructions contributed by + this subtree. Used by consumers to scope graph queries to only + see their source's nodes. + """ + # Default implementation: consume enumerate for its side effects + start = ctx._provenance.raw_node_count() + for _ in self.enumerate(ctx): + pass + return start + + def __add__(self, other: ISAQuery) -> _SumNode: + """ + Perform a union of two enumeration nodes. + + Enumerating the sum node yields all ISAs from this node, followed by all + ISAs from the other node. Duplicate ISAs may be produced if both nodes + yield the same ISA. + + Args: + other (Node): The other enumeration node. + + Returns: + SumNode: A node representing the union of both enumerations. + + Example: + + The following enumerates ISAs from both SurfaceCode and ColorCode: + + .. code-block:: python + for isa in SurfaceCode.q() + ColorCode.q(): + ... + """ + if isinstance(self, _SumNode) and isinstance(other, _SumNode): + sources = self.sources + other.sources + return _SumNode(sources) + elif isinstance(self, _SumNode): + sources = self.sources + [other] + return _SumNode(sources) + elif isinstance(other, _SumNode): + sources = [self] + other.sources + return _SumNode(sources) + else: + return _SumNode([self, other]) + + def __mul__(self, other: ISAQuery) -> _ProductNode: + """ + Perform the cross product of two enumeration nodes. + + Enumerating the product node yields ISAs resulting from the Cartesian + product of ISAs from both nodes. The ISAs are combined using + concatenation (logical union). This means that instructions in the + other enumeration node with the same ID as an instruction in this + enumeration node will overwrite the instruction from this node. + + Args: + other (Node): The other enumeration node. + + Returns: + ProductNode: A node representing the product of both enumerations. + + Example: + + The following enumerates ISAs formed by combining ISAs from a + surface code and a factory: + + .. code-block:: python + + for isa in SurfaceCode.q() * Factory.q(): + ... + """ + if isinstance(self, _ProductNode) and isinstance(other, _ProductNode): + sources = self.sources + other.sources + return _ProductNode(sources) + elif isinstance(self, _ProductNode): + sources = self.sources + [other] + return _ProductNode(sources) + elif isinstance(other, _ProductNode): + sources = [self] + other.sources + return _ProductNode(sources) + else: + return _ProductNode([self, other]) + + def bind(self, name: str, node: ISAQuery) -> "_BindingNode": + """Create a BindingNode with this node as the component. + + Args: + name: The name to bind the component to. + node: The child enumeration node that may contain ISARefNodes. + + Returns: + A BindingNode with self as the component. + + Example: + + .. code-block:: python + ExampleErrorCorrection.q().bind("c", ISARefNode("c") * ISARefNode("c")) + """ + return _BindingNode(name=name, component=self, node=node) + + +@dataclass +class RootNode(ISAQuery): + """ + Represents the architecture's base ISA. + Reads from the context instead of holding a reference. + """ + + def enumerate(self, ctx: ISAContext) -> Generator[ISA, None, None]: + """ + Yields the architecture ISA from the context. + + Args: + ctx (Context): The enumeration context. + + Yields: + ISA: The architecture's provided ISA, called root. + """ + yield ctx._isa + + def populate(self, ctx: ISAContext) -> int: + """Architecture ISA is already in the graph from ``ISAContext.__init__``. + + Returns: + int: 1, since architecture nodes start at index 1. + """ + return 1 + + +# Singleton instance for convenience +ISA_ROOT = RootNode() + + +@dataclass +class _ComponentQuery(ISAQuery): + """ + Query node that enumerates ISAs based on a component type and source. + + This node takes a component type (which must have an ``enumerate_isas`` class + method) and a source node. It enumerates the source node to get base ISAs, + and then calls ``enumerate_isas`` on the component type for each base ISA + to generate derived ISAs. + + Attributes: + component: The component type to query (e.g., a QEC code class). + source: The source node providing input ISAs (default: ISA_ROOT). + kwargs: Additional keyword arguments passed to ``enumerate_isas``. + """ + + component: type + source: ISAQuery = field(default_factory=lambda: ISA_ROOT) + kwargs: dict = field(default_factory=dict) + + def enumerate(self, ctx: ISAContext) -> Generator[ISA, None, None]: + """ + Yields ISAs generated by the component from source ISAs. + + Args: + ctx (Context): The enumeration context. + + Yields: + ISA: A generated ISA instance. + """ + for isa in self.source.enumerate(ctx): + yield from self.component.enumerate_isas(isa, ctx, **self.kwargs) + + def populate(self, ctx: ISAContext) -> int: + """ + Populate the graph by querying matching instructions. + + Runs the source first to ensure dependency instructions are in + the graph, then queries the graph for all instructions matching + this component's requirements within the source's node range. + For each matching ISA × each hyperparameter instance, calls + ``provided_isa`` to add new instructions to the graph. + + Returns: + int: The starting node index of this component's own additions. + """ + source_start = self.source.populate(ctx) + impl_isas = ctx._provenance.query_satisfying( + self.component.required_isa(), min_node_idx=source_start + ) + own_start = ctx._provenance.raw_node_count() + for instance in _enumerate_instances(self.component, **self.kwargs): + ctx._transforms[id(instance)] = instance + for impl_isa in impl_isas: + for _ in instance.provided_isa(impl_isa, ctx): + pass + return own_start + + +@dataclass +class _ProductNode(ISAQuery): + """ + Node representing the Cartesian product of multiple source nodes. + + Attributes: + sources: A list of source nodes to combine. + """ + + sources: list[ISAQuery] + + def enumerate(self, ctx: ISAContext) -> Generator[ISA, None, None]: + """ + Yields ISAs formed by combining ISAs from all source nodes. + + Args: + ctx (Context): The enumeration context. + + Yields: + ISA: A combined ISA instance. + """ + source_generators = [source.enumerate(ctx) for source in self.sources] + yield from ( + functools.reduce(lambda a, b: a + b, isa_tuple) + for isa_tuple in itertools.product(*source_generators) + ) + + def populate(self, ctx: ISAContext) -> int: + """Populate the graph from each source sequentially (no cross product). + + Returns: + int: The starting node index before any source populated. + """ + first = ctx._provenance.raw_node_count() + for source in self.sources: + source.populate(ctx) + return first + + +@dataclass +class _SumNode(ISAQuery): + """ + Node representing the union of multiple source nodes. + + Attributes: + sources: A list of source nodes to enumerate sequentially. + """ + + sources: list[ISAQuery] + + def enumerate(self, ctx: ISAContext) -> Generator[ISA, None, None]: + """ + Yields ISAs from each source node in sequence. + + Args: + ctx (Context): The enumeration context. + + Yields: + ISA: An ISA instance from one of the sources. + """ + for source in self.sources: + yield from source.enumerate(ctx) + + def populate(self, ctx: ISAContext) -> int: + """Populate the graph from each source sequentially. + + Returns: + int: The starting node index before any source populated. + """ + first = ctx._provenance.raw_node_count() + for source in self.sources: + source.populate(ctx) + return first + + +@dataclass +class ISARefNode(ISAQuery): + """ + A reference to a bound ISA in the enumeration context. + + This node looks up the binding from the context and yields the bound ISA. + + Args: + name: The name of the bound ISA to reference. + """ + + name: str + + def enumerate(self, ctx: ISAContext) -> Generator[ISA, None, None]: + """ + Yields the bound ISA from the context. + + Args: + ctx (Context): The enumeration context containing bindings. + + Yields: + ISA: The bound ISA. + + Raises: + ValueError: If the name is not bound in the context. + """ + if self.name not in ctx._bindings: + raise ValueError(f"Undefined component reference: '{self.name}'") + yield ctx._bindings[self.name] + + def populate(self, ctx: ISAContext) -> int: + """Instructions already in graph from the bound component. + + Returns: + int: 1, since bound component nodes start at index 1. + """ + return 1 + + +@dataclass +class _BindingNode(ISAQuery): + """ + Enumeration node that binds a component to a name. + + This node enables the as_/ref pattern where multiple positions in the + enumeration tree share the same component instance. The bound component + is enumerated once, and its value is shared across all ISARefNodes with + the same name via the context. + + For multiple bindings, nest BindingNode instances. + + Args: + name: The name to bind the component to. + component: An EnumerationNode (e.g., _ComponentQuery) that produces the bound ISAs. + node: The child enumeration node that may contain ISARefNodes. + + Example: + + .. code-block:: python + ctx = EnumerationContext(architecture=arch) + + # Bind a code and reference it multiple times + BindingNode( + name="c", + component=ExampleErrorCorrection.q(), + node=ISARefNode("c") * ISARefNode("c"), + ).enumerate(ctx) + + # Multiple bindings via nesting + BindingNode( + name="c", + component=ExampleErrorCorrection.q(), + node=BindingNode( + name="f", + component=ExampleFactory.q(source=ISARefNode("c")), + node=ISARefNode("c") * ISARefNode("f"), + ), + ).enumerate(ctx) + """ + + name: str + component: ISAQuery + node: ISAQuery + + def enumerate(self, ctx: ISAContext) -> Generator[ISA, None, None]: + """ + Enumerate child nodes with the bound component in context. + + Args: + ctx (Context): The enumeration context. + + Yields: + ISA: An ISA instance from the child node. + """ + # Enumerate all ISAs from the component node + for isa in self.component.enumerate(ctx): + # Add binding to context and enumerate child node + new_ctx = ctx._with_binding(self.name, isa) + yield from self.node.enumerate(new_ctx) + + def populate(self, ctx: ISAContext) -> int: + """Populate the graph from both the component and the child node. + + Returns: + int: The starting node index of the component's additions. + """ + comp_start = self.component.populate(ctx) + self.node.populate(ctx) + return comp_start diff --git a/source/qdk_package/qdk/qre/_qre.py b/source/qdk_package/qdk/qre/_qre.py new file mode 100644 index 0000000000..2d1aaa7aa5 --- /dev/null +++ b/source/qdk_package/qdk/qre/_qre.py @@ -0,0 +1,36 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +# flake8: noqa E402 +# pyright: reportAttributeAccessIssue=false + +from .._native import ( + _binom_ppf, + block_linear_function, + Block, + constant_function, + Constraint, + ConstraintBound, + _estimate_parallel, + _estimate_with_graph, + _EstimationCollection, + EstimationResult, + FactoryResult, + _FloatFunction, + generic_function, + instruction_name, + Instruction, + InstructionFrontier, + _IntFunction, + ISA, + ISARequirements, + _ProvenanceGraph, + linear_function, + LatticeSurgery, + PSSPC, + Trace, + property_name_to_key, + property_name, + _float_to_bits, + _float_from_bits, +) diff --git a/source/qdk_package/qdk/qre/_qre.pyi b/source/qdk_package/qdk/qre/_qre.pyi new file mode 100644 index 0000000000..370bd2c886 --- /dev/null +++ b/source/qdk_package/qdk/qre/_qre.pyi @@ -0,0 +1,1679 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations +from typing import Any, Callable, Iterator, Optional, overload + +import pandas as pd + +class ISA: + def __add__(self, other: ISA) -> ISA: + """ + Concatenate two ISAs (logical union). Instructions in the second + operand overwrite instructions in the first operand if they have the + same ID. + """ + ... + + def __contains__(self, id: int) -> bool: + """ + Check if the ISA contains an instruction with the given ID. + + Args: + id (int): The instruction ID. + + Returns: + bool: True if the ISA contains an instruction with the given ID, False otherwise. + """ + ... + + def satisfies(self, requirements: ISARequirements) -> bool: + """ + Check if the ISA satisfies the given ISA requirements. + """ + ... + + def __getitem__(self, id: int) -> Instruction: + """ + Get an instruction by its ID. + + Args: + id (int): The instruction ID. + + Returns: + Instruction: The instruction. + """ + ... + + def get( + self, id: int, default: Optional[Instruction] = None + ) -> Optional[Instruction]: + """ + Get an instruction by its ID, or return a default value if not found. + + Args: + id (int): The instruction ID. + default (Optional[Instruction]): The default value to return if the + instruction is not found. + + Returns: + Optional[Instruction]: The instruction, or the default value if not found. + """ + ... + + def __len__(self) -> int: + """ + Return the number of instructions in the ISA. + + Returns: + int: The number of instructions. + """ + ... + + def node_index(self, id: int) -> Optional[int]: + """ + Return the provenance graph node index for the given instruction ID. + + Args: + id (int): The instruction ID. + + Returns: + Optional[int]: The node index, or None if not found. + """ + ... + + def add_node(self, instruction_id: int, node_index: int) -> None: + """ + Add a pre-existing provenance graph node to the ISA. + + Args: + instruction_id (int): The instruction ID. + node_index (int): The node index in the provenance graph. + """ + ... + + def as_frame(self) -> pd.DataFrame: + """ + Return a pandas DataFrame representation of the ISA. + + The DataFrame will have one row per instruction, with columns for + instruction properties such as time, space, and error rate. The exact + columns may vary based on the properties of the instructions in the ISA. + + Returns: + pd.DataFrame: A DataFrame representation of the ISA. + """ + ... + + def __iter__(self) -> Iterator[Instruction]: + """ + Return an iterator over the instructions. + + Note: + The order of instructions is not guaranteed. + + Returns: + Iterator[Instruction]: The instruction iterator. + """ + ... + + def __str__(self) -> str: + """ + Return a string representation of the ISA. + + Note: + The order of instructions in the output is not guaranteed. + + Returns: + str: A string representation of the ISA. + """ + ... + +class ISARequirements: + @overload + def __new__(cls, *constraints: Constraint) -> ISARequirements: ... + @overload + def __new__(cls, constraints: list[Constraint], /) -> ISARequirements: ... + def __new__(cls, *constraints: Constraint | list[Constraint]) -> ISARequirements: + """ + Create an ISA requirements specification from a list of instructions + constraints. + + Args: + constraints (list[Constraint] | *Constraint): The list of instruction + constraints. + """ + ... + + def __len__(self) -> int: + """ + Return the number of constraints in the requirements specification. + + Returns: + int: The number of constraints. + """ + ... + + def __iter__(self) -> Iterator[Constraint]: + """ + Return an iterator over the constraints. + + Note: + The order of constraints is not guaranteed. + + Returns: + Iterator[Constraint]: The constraint iterator. + """ + ... + + def as_frame(self) -> pd.DataFrame: + """ + Return a pandas DataFrame representation of the ISA requirements. + + The DataFrame will have one row per instruction, with columns for + constraint properties such as encoding. + + Returns: + pd.DataFrame: A DataFrame representation of the ISA requirements. + """ + ... + +class Instruction: + @staticmethod + def fixed_arity( + id: int, + encoding: int, + arity: int, + time: int, + space: Optional[int], + length: Optional[int], + error_rate: float, + ) -> Instruction: + """ + Create an instruction with a fixed arity. + + Note: + This function is not intended to be called directly by the user, use qre.instruction instead. + + Args: + id (int): The instruction ID. + encoding (int): The instruction encoding. 0 = Physical, 1 = Logical. + arity (int): The instruction arity. + time (int): The instruction time in ns. + space (Optional[int]): The instruction space in number of physical + qubits. If None, length is used. + length (Optional[int]): The arity including ancilla qubits. If None, + arity is used. + error_rate (float): The instruction error rate. + + Returns: + Instruction: The instruction. + """ + ... + + @staticmethod + def variable_arity( + id: int, + encoding: int, + time_fn: _IntFunction, + space_fn: _IntFunction, + error_rate_fn: _FloatFunction, + length_fn: Optional[_IntFunction], + ) -> Instruction: + """ + Create an instruction with variable arity. + + Note: + This function is not intended to be called directly by the user, use qre.instruction instead. + + Args: + id (int): The instruction ID. + encoding (int): The instruction encoding. 0 = Physical, 1 = Logical. + time_fn (_IntFunction): The time function. + space_fn (_IntFunction): The space function. + error_rate_fn (_FloatFunction): The error rate function. + length_fn (Optional[_IntFunction]): The length function. + If None, space_fn is used. + + Returns: + Instruction: The instruction. + """ + ... + + def with_id(self, id: int) -> Instruction: + """ + Return a copy of the instruction with the given ID. + + Note: + The created instruction will not inherit the source property of the + original instruction and must be set by the user if intended. + + Args: + id (int): The instruction ID. + + Returns: + Instruction: A copy of the instruction with the given ID. + """ + ... + + @property + def id(self) -> int: + """ + The instruction ID. + + Returns: + int: The instruction ID. + """ + ... + + @property + def encoding(self) -> int: + """ + The instruction encoding. 0 = Physical, 1 = Logical. + + Returns: + int: The instruction encoding. + """ + ... + + @property + def arity(self) -> Optional[int]: + """ + The instruction arity. + + Returns: + Optional[int]: The instruction arity. + """ + ... + + def space(self, arity: Optional[int] = None) -> Optional[int]: + """ + The instruction space in number of physical qubits. + + Args: + arity (Optional[int]): The specific arity to check. + + Returns: + Optional[int]: The instruction space in number of physical qubits. + """ + ... + + def time(self, arity: Optional[int] = None) -> Optional[int]: + """ + The instruction time in ns. + + Args: + arity (Optional[int]): The specific arity to check. + + Returns: + Optional[int]: The instruction time in ns. + """ + ... + + def error_rate(self, arity: Optional[int] = None) -> Optional[float]: + """ + The instruction error rate. + + Args: + arity (Optional[int]): The specific arity to check. + + Returns: + Optional[float]: The instruction error rate. + """ + ... + + def expect_space(self, arity: Optional[int] = None) -> int: + """ + The instruction space in number of physical qubits. Raises an error if not found. + + Args: + arity (Optional[int]): The specific arity to check. + + Returns: + int: The instruction space in number of physical qubits. + """ + ... + + def expect_time(self, arity: Optional[int] = None) -> int: + """ + The instruction time in ns. Raises an error if not found. + + Args: + arity (Optional[int]): The specific arity to check. + + Returns: + int: The instruction time in ns. + """ + ... + + def expect_error_rate(self, arity: Optional[int] = None) -> float: + """ + The instruction error rate. Raises an error if not found. + + Args: + arity (Optional[int]): The specific arity to check. + + Returns: + float: The instruction error rate. + """ + ... + + def set_source(self, index: int) -> None: + """ + Set the source index for the instruction. + + Args: + index (int): The source index to set. + """ + ... + + @property + def source(self) -> int: + """ + Get the source index for the instruction. + + Returns: + int: The source index for the instruction. + """ + ... + + def set_property(self, key: int, value: int) -> None: + """ + Set a property on the instruction. + + Args: + key (int): The property key. + value (int): The property value. + """ + ... + + def get_property(self, key: int) -> Optional[int]: + """ + Get a property by its key. + + Args: + key (int): The property key. + + Returns: + Optional[int]: The property value, or None if not found. + """ + ... + + def has_property(self, key: int) -> bool: + """ + Check if the instruction has a property with the given key. + + Args: + key (int): The property key. + + Returns: + bool: True if the instruction has the property, False otherwise. + """ + ... + + def get_property_or(self, key: int, default: int) -> int: + """ + Get a property by its key, or return a default value if not found. + + Args: + key (int): The property key. + default (int): The default value to return if the property is not found. + + Returns: + int: The property value, or the default value if not found. + """ + ... + + def __getitem__(self, key: int) -> int: + """ + Get a property by its key, or raise an error if not found. + + Args: + key (int): The property key. + + Returns: + int: The property value. + """ + ... + + def __str__(self) -> str: + """ + Return a string representation of the instruction. + + Returns: + str: A string representation of the instruction. + """ + ... + +class ConstraintBound: + """ + A bound for a constraint. + """ + + @staticmethod + def lt(value: float) -> ConstraintBound: + """ + Create a less than constraint bound. + + Args: + value (float): The value. + + Returns: + ConstraintBound: The constraint bound. + """ + ... + + @staticmethod + def le(value: float) -> ConstraintBound: + """ + Create a less equal constraint bound. + + Args: + value (float): The value. + + Returns: + ConstraintBound: The constraint bound. + """ + ... + + @staticmethod + def eq(value: float) -> ConstraintBound: + """ + Create an equal constraint bound. + + Args: + value (float): The value. + + Returns: + ConstraintBound: The constraint bound. + """ + ... + + @staticmethod + def gt(value: float) -> ConstraintBound: + """ + Create a greater than constraint bound. + + Args: + value (float): The value. + + Returns: + ConstraintBound: The constraint bound. + """ + ... + + @staticmethod + def ge(value: float) -> ConstraintBound: + """ + Create a greater equal constraint bound. + + Args: + value (float): The value. + + Returns: + ConstraintBound: The constraint bound. + """ + ... + +class Constraint: + """ + An instruction constraint that can be used to describe ISA requirements + for ISA transformations. + """ + + def __new__( + cls, + id: int, + encoding: int, + arity: Optional[int], + error_rate: Optional[ConstraintBound], + ) -> Constraint: + """ + Note: + This function is not intended to be called directly by the user, use qre.constraint instead. + + Args: + id (int): The instruction ID. + encoding (int): The instruction encoding. 0 = Physical, 1 = Logical. + arity (Optional[int]): The instruction arity. If None, instruction is + assumed to have variable arity. + error_rate (Optional[ConstraintBound]): The constraint on the error rate. + + Returns: + InstructionConstraint: The instruction constraint. + """ + ... + + @property + def id(self) -> int: + """ + The instruction ID. + + Returns: + int: The instruction ID. + """ + ... + + @property + def encoding(self) -> int: + """ + The instruction encoding. 0 = Physical, 1 = Logical. + + Returns: + int: The instruction encoding. + """ + ... + + @property + def arity(self) -> Optional[int]: + """ + The instruction arity. + + Returns: + Optional[int]: The instruction arity. + """ + ... + + @property + def error_rate(self) -> Optional[ConstraintBound]: + """ + The constraint on the instruction error rate. + + Returns: + Optional[ConstraintBound]: The constraint on the instruction error rate. + """ + ... + + def add_property(self, property: int) -> None: + """ + Add a property requirement to the constraint. + + Args: + property (int): The property key that must be present in matching instructions. + """ + ... + + def has_property(self, property: int) -> bool: + """ + Check if the constraint requires a specific property. + + Args: + property (int): The property key to check. + + Returns: + bool: True if the constraint requires this property, False otherwise. + """ + ... + +class _IntFunction: + def __call__(self, arity: int) -> int: ... + +class _FloatFunction: + def __call__(self, arity: int) -> float: ... + +@overload +def constant_function(value: int) -> _IntFunction: ... +@overload +def constant_function(value: float) -> _FloatFunction: ... +def constant_function( + value: int | float, +) -> _IntFunction | _FloatFunction: + """ + Create a constant function. + + Args: + value (int | float): The constant value. + + Returns: + _IntFunction | _FloatFunction: The constant function. + """ + ... + +@overload +def linear_function(slope: int) -> _IntFunction: ... +@overload +def linear_function(slope: float) -> _FloatFunction: ... +def linear_function( + slope: int | float, +) -> _IntFunction | _FloatFunction: + """ + Create a linear function. + + Args: + slope (int | float): The slope. + + Returns: + _IntFunction | _FloatFunction: The linear function. + """ + ... + +@overload +def block_linear_function( + block_size: int, slope: int, offset: Optional[int] = None +) -> _IntFunction: ... +@overload +def block_linear_function( + block_size: int, slope: float, offset: Optional[float] = None +) -> _FloatFunction: ... +def block_linear_function( + block_size: int, slope: int | float, offset: Optional[int | float] = None +) -> _IntFunction | _FloatFunction: + """ + Create a block linear function that takes an arity (number of qubits) as + input. Given an arity, it will compute the number of blocks `num_blocks` by + computing `ceil(arity / block_size)` and then return `slope * num_blocks + + offset`. + + Args: + block_size (int): The block size. + slope (int | float): The slope. + offset (Optional[int | float]): The offset. Default is `None`, which is + treated as 0 for int and 0.0 for float. + + Returns: + _IntFunction | _FloatFunction: The block linear function. + """ + ... + +@overload +def generic_function(func: Callable[[int], int]) -> _IntFunction: ... +@overload +def generic_function(func: Callable[[int], float]) -> _FloatFunction: ... +def generic_function( + func: Callable[[int], int | float], +) -> _IntFunction | _FloatFunction: + """ + Create a generic function from a Python callable. + + Note: + Only use this function if the other function constructors + (constant_function, linear_function, and block_linear_function) do not + meet your needs, as using a Python callable can have performance + implications. If using this function, keep the logic in the callable as + simple as possible to minimize overhead. + + Args: + func (Callable[[int], int | float]): The Python callable. + + Returns: + _IntFunction | _FloatFunction: The generic function. + """ + ... + +class _ProvenanceGraph: + """ + Represents the provenance graph of instructions in a trace. Each node in + the graph corresponds to an instruction and the transform from which it was + produced, and edges represent transformations applied to instructions during + enumeration. + """ + + def add_node( + self, instruction: Instruction, transform_id: int, children: list[int] + ) -> int: + """ + Add a node to the provenance graph. + + Args: + instruction (int): The instruction corresponding to the node. + transform_id (int): The transform ID corresponding to the node. + children (list[int]): The list of child node indices in the provenance graph. + + Returns: + int: The index of the added node in the provenance graph. + """ + ... + + def instruction(self, node_index: int) -> Instruction: + """ + Return the instruction for a given node index. + + Args: + node_index (int): The index of the node in the provenance graph. + + Returns: + int: The instruction corresponding to the node. + """ + ... + + def transform_id(self, node_index: int) -> int: + """ + Return the transform ID for a given node index. + + Args: + node_index (int): The index of the node in the provenance graph. + + Returns: + int: The transform ID corresponding to the node. + """ + ... + + def children(self, node_index: int) -> list[int]: + """ + Return the list of child node indices for a given node index. + + Args: + node_index (int): The index of the node in the provenance graph. + + Returns: + list[int]: The list of child node indices. + """ + ... + + def num_nodes(self) -> int: + """ + Return the number of nodes in the provenance graph. + + Returns: + int: The number of nodes in the provenance graph. + """ + ... + + def num_edges(self) -> int: + """ + Return the number of edges in the provenance graph. + + Returns: + int: The number of edges in the provenance graph. + """ + ... + + @overload + def add_instruction( + self, + instruction: Instruction, + ) -> int: ... + @overload + def add_instruction( + self, + id: int, + encoding: int = 0, + *, + arity: Optional[int] = 1, + time: int | _IntFunction = ..., + space: Optional[int | _IntFunction] = None, + length: Optional[int | _IntFunction] = None, + error_rate: float | _FloatFunction = ..., + **kwargs: int, + ) -> int: ... + def add_instruction( + self, + id_or_instruction: int | Instruction, + encoding: int = 0, + *, + arity: Optional[int] = 1, + time: int | _IntFunction = ..., + space: Optional[int | _IntFunction] = None, + length: Optional[int | _IntFunction] = None, + error_rate: float | _FloatFunction = ..., + **kwargs: int, + ) -> int: + """ + Add an instruction to the provenance graph with no transform or + children. + + Can be called with a pre-existing ``Instruction`` or with keyword + args to create one inline. + + Args: + id_or_instruction: An instruction ID (int) or ``Instruction``. + encoding: 0 = Physical, 1 = Logical. Ignored for ``Instruction``. + arity: Instruction arity, ``None`` for variable. Ignored for + ``Instruction``. + time: Time in ns (or ``_IntFunction``). Ignored for ``Instruction``. + space: Space in physical qubits (or ``_IntFunction``). Ignored for + ``Instruction``. + length: Arity including ancillas. Ignored for ``Instruction``. + error_rate: Error rate (or ``_FloatFunction``). Ignored for + ``Instruction``. + **kwargs: Additional properties (e.g. ``distance=9``). + + Returns: + int: The node index of the added instruction. + """ + ... + + def make_isa(self, node_indices: list[int]) -> ISA: + """ + Create an ISA backed by this provenance graph from the given node + indices. + + Args: + node_indices: A list of node indices in the provenance graph. + + Returns: + ISA: An ISA referencing this provenance graph. + """ + ... + + def build_pareto_index(self) -> None: + """ + Builds the per-instruction-ID Pareto index. + + For each instruction ID, retains only the Pareto-optimal nodes w.r.t. + (space, time, error_rate) evaluated at arity 1. Must be called after + all nodes have been added. + """ + ... + + def query_satisfying( + self, + requirements: ISARequirements, + min_node_idx: Optional[int] = None, + ) -> list[ISA]: + """ + Return ISAs formed from Pareto-optimal graph nodes satisfying the + given requirements. + + For each constraint in requirements, selects matching Pareto-optimal + nodes. Returns the Cartesian product of per-constraint matches, + augmented with one representative node per unconstrained instruction + ID. + + Must be called after ``build_pareto_index``. + + Args: + requirements: The ISA requirements to satisfy. + min_node_idx: If provided, only consider Pareto nodes at or above + this index for constrained groups. + + Returns: + list[ISA]: ISAs formed from matching Pareto-optimal nodes. + """ + ... + + def raw_node_count(self) -> int: + """ + Return the raw node count (including the sentinel at index 0). + + Returns: + int: The number of nodes in the graph. + """ + ... + + def total_isa_count(self) -> int: + """ + Return the total number of ISAs that can be formed from Pareto-optimal + nodes. + + Requires ``build_pareto_index`` to have been called. + + Returns: + int: The total number of ISAs that can be formed. + """ + ... + +class EstimationResult: + """ + Represents the result of a resource estimation. + """ + + def __new__( + cls, *, qubits: int = 0, runtime: int = 0, error: float = 0.0 + ) -> EstimationResult: + """ + Create a new estimation result. + + Args: + qubits (int): The number of logical qubits. + runtime (int): The runtime in nanoseconds. + error (float): The error probability of the computation. + + Returns: + EstimationResult: The estimation result. + """ + ... + + @property + def qubits(self) -> int: + """ + The number of logical qubits. + + Returns: + int: The number of logical qubits. + """ + ... + + @qubits.setter + def qubits(self, qubits: int) -> None: + """ + Set the number of logical qubits. + + Args: + qubits (int): The number of logical qubits to set. + """ + ... + + @property + def runtime(self) -> int: + """ + The runtime in nanoseconds. + + Returns: + int: The runtime in nanoseconds. + """ + ... + + @runtime.setter + def runtime(self, runtime: int) -> None: + """ + Set the runtime. + + Args: + runtime (int): The runtime in nanoseconds to set. + """ + ... + + @property + def error(self) -> float: + """ + The error probability of the computation. + + Returns: + float: The error probability of the computation. + """ + ... + + @error.setter + def error(self, error: float) -> None: + """ + Set the error probability. + + Args: + error (float): The error probability to set. + """ + ... + + @property + def factories(self) -> dict[int, FactoryResult]: + """ + The factory results. + + Returns: + dict[int, FactoryResult]: A dictionary mapping factory IDs to their results. + """ + ... + + @property + def isa(self) -> ISA: + """ + The ISA used for the estimation. + + Returns: + ISA: The ISA used for the estimation. + """ + ... + + @property + def properties(self) -> dict[int, bool | int | float | str]: + """ + Custom properties from application generation and trace transform. + + Returns: + dict[int, bool | int | float | str]: A dictionary mapping property keys to their values. + """ + ... + + def set_property(self, key: int, value: bool | int | float | str) -> None: + """ + Set a custom property. + + Args: + key (int) The property key. + value (bool | int | float | str): The property value. All values of type `int`, `float`, `bool`, and `str` + are supported. Any other value is converted to a string using its `__str__` method. + """ + ... + + def __str__(self) -> str: + """ + Return a string representation of the estimation result. + + Returns: + str: A string representation of the estimation result. + """ + ... + +class _EstimationCollection: + """ + Represents a collection of estimation results. Results are stored as a 2D + Pareto frontier with physical qubits and runtime as objectives. + """ + + def __new__(cls) -> _EstimationCollection: + """ + Create a new estimation collection. + + Returns: + _EstimationCollection: The estimation collection. + """ + ... + + def insert(self, result: EstimationResult) -> None: + """ + Insert an estimation result into the collection. + + Args: + result (EstimationResult): The estimation result to insert. + """ + ... + + def __len__(self) -> int: + """ + Return the number of estimation results in the collection. + + Returns: + int: The number of estimation results. + """ + ... + + def __iter__(self) -> Iterator[EstimationResult]: + """ + Return an iterator over the estimation results. + + Returns: + Iterator[EstimationResult]: The estimation result iterator. + """ + ... + + @property + def total_jobs(self) -> int: + """ + Return the total number of (trace, ISA) estimation jobs. + + Returns: + int: The total number of jobs. + """ + ... + + @property + def successful_estimates(self) -> int: + """ + Return the number of estimation jobs that completed successfully + (before Pareto filtering). + + Returns: + int: The number of successful estimates. + """ + ... + + @property + def all_summaries(self) -> list[tuple[int, int, int, int]]: + """ + Return lightweight summaries of ALL successful estimates as a list + of (trace_index, isa_index, qubits, runtime) tuples. + + Returns: + list[tuple[int, int, int, int]]: List of (trace_index, isa_index, + qubits, runtime) for every successful estimation. + """ + ... + + @property + def isas(self) -> list[ISA]: + """ + Return the list of ISAs for which estimates were performed. + + Returns: + list[ISA]: The list of ISAs. + """ + ... + +class FactoryResult: + """ + Represents the result of a factory used in resource estimation. + """ + + @property + def copies(self) -> int: + """ + The number of factory copies. + + Returns: + int: The number of factory copies. + """ + ... + + @property + def runs(self) -> int: + """ + The number of factory runs. + + Returns: + int: The number of factory runs. + """ + ... + + @property + def error_rate(self) -> float: + """ + The error rate of the factory. + + Returns: + float: The error rate of the factory. + """ + ... + + @property + def states(self) -> int: + """ + The number of states produced by the factory. + + Returns: + int: The number of states produced by the factory. + """ + ... + +class Trace: + """ + Represents a quantum program optimized for resource estimation. + + A trace originates from a quantum application and can be modified via trace + transformations. It consists of blocks of operations. + """ + + def __new__(cls, compute_qubits: int) -> Trace: + """ + Create a new trace. + + Returns: + Trace: The trace. + """ + ... + + def clone_empty(self, compute_qubits: Optional[int] = None) -> Trace: + """ + Create a new trace with the same metadata but empty block. + + Args: + compute_qubits (Optional[int]): The number of compute qubits. If None, + the number of compute qubits of the original trace is used. + + Returns: + Trace: The new trace. + """ + ... + + @classmethod + def from_json(cls, json: str) -> Trace: + """ + Create a trace from a JSON string. + + Args: + json (str): The JSON string. + + Returns: + Trace: The trace. + """ + ... + + def to_json(self) -> str: + """ + Serializes the trace to a JSON string. + + Returns: + str: The JSON string representation of the trace. + """ + ... + + @property + def compute_qubits(self) -> int: + """ + The number of compute qubits. + + Returns: + int: The number of compute qubits. + """ + ... + + @compute_qubits.setter + def compute_qubits(self, qubits: int) -> None: + """ + Set the number of compute qubits. + + Args: + qubits (int): The number of compute qubits to set. + """ + ... + + @property + def base_error(self) -> float: + """ + The base error of the trace. + + Returns: + float: The base error of the trace. + """ + ... + + def increment_base_error(self, amount: float) -> None: + """ + Increments the base error. + + Args: + amount (float): The amount to increment. + """ + ... + + @property + def memory_qubits(self) -> Optional[int]: + """ + The number of memory qubits, if set. + + Returns: + Optional[int]: The number of memory qubits, or None if not set. + """ + ... + + def has_memory_qubits(self) -> bool: + """ + Check if the trace has memory qubits set. + + Returns: + bool: True if memory qubits are set, False otherwise. + """ + ... + + @memory_qubits.setter + def memory_qubits(self, qubits: int) -> None: + """ + Set the number of memory qubits. + + Args: + qubits (int): The number of memory qubits. + """ + ... + + def increment_memory_qubits(self, amount: int) -> None: + """ + Increments the number of memory qubits. If memory qubits have not been + set, initializes them to 0 before incrementing. + + Args: + amount (int): The amount to increment. + """ + ... + + def increment_resource_state(self, resource_id: int, amount: int) -> None: + """ + Increments a resource state count. + + Args: + resource_id (int): The resource state ID. + amount (int): The amount to increment. + """ + ... + + def set_property(self, key: int, value: Any) -> None: + """ + Set a property. All values of type `int`, `float`, `bool`, and `str` + are supported. Any other value is converted to a string using its + `__str__` method. + + Args: + key (int): The property key. + value (Any): The property value. + """ + ... + + def get_property(self, key: int) -> Optional[int | float | bool | str]: + """ + Get a property. + + Args: + key (int): The property key. + + Returns: + Optional[int | float | bool | str]: The property value, or None if not found. + """ + ... + + def has_property(self, key: int) -> bool: + """ + Check if a property with the given key exists. + + Args: + key (int): The property key. + + Returns: + bool: True if the property exists, False otherwise. + """ + ... + + @property + def total_qubits(self) -> int: + """ + The total number of qubits (compute + memory). + + Returns: + int: The total number of qubits. + """ + ... + + @property + def depth(self) -> int: + """ + The trace depth. + + Returns: + int: The trace depth. + """ + ... + + @property + def num_gates(self) -> int: + """ + The total number of gates in the trace. + + Returns: + int: The total number of gates. + """ + ... + + def estimate( + self, isa: ISA, max_error: Optional[float] = None + ) -> Optional[EstimationResult]: + """ + Estimate resources for the trace given a logical ISA. + + Args: + isa (ISA): The logical ISA. + max_error (Optional[float]): The maximum allowed error. If None, + Pareto points are computed. + + Returns: + Optional[EstimationResult]: The estimation result if max_error is + provided, otherwise valid Pareto points. + """ + ... # The implementation in Rust returns Option, so it fits + + @property + def resource_states(self) -> dict[int, int]: + """ + The resource states used in the trace. + + Returns: + dict[int, int]: A dictionary mapping instruction IDs to their counts. + """ + ... + + def add_operation( + self, id: int, qubits: list[int], params: list[float] = [] + ) -> None: + """ + Add an operation to the trace. + + Args: + id (int): The operation ID. + qubits (list[int]): The qubits involved in the operation. + params (list[float]): The operation parameters. + """ + ... + + def root_block(self) -> Block: + """ + Return the root block of the trace. + + Returns: + Block: The root block of the trace. + """ + ... + + def add_block(self, repetitions: int = 1) -> Block: + """ + Add a block to the trace. + + Args: + repetitions (int): The number of times the block is repeated. + + Returns: + Block: The block. + """ + ... + + @property + def required_isa(self) -> ISARequirements: + """ + The required ISA for the trace. + + Returns: + ISARequirements: The required ISA for the trace. + """ + ... + + def __str__(self) -> str: + """ + Return a string representation of the trace. + + Returns: + str: A string representation of the trace. + """ + ... + +class Block: + """ + Represents a block of operations in a trace. + + An operation in a block can either refer to an instruction applied to some + qubits or can be another block to create a hierarchical structure. Blocks + can be repeated. + """ + + def add_operation( + self, id: int, qubits: list[int], params: list[float] = [] + ) -> None: + """ + Add an operation to the block. + + Args: + id (int): The operation ID. + qubits (list[int]): The qubits involved in the operation. + params (list[float]): The operation parameters. + """ + ... + + def add_block(self, repetitions: int = 1) -> Block: + """ + Add a nested block to the block. + + Args: + repetitions (int): The number of times the block is repeated. + + Returns: + Block: The block. + """ + ... + + def __str__(self) -> str: + """ + Return a string representation of the block. + + Returns: + str: A string representation of the block. + """ + ... + +class PSSPC: + def __new__(cls, num_ts_per_rotation: int, ccx_magic_states: bool) -> PSSPC: ... + def transform(self, trace: Trace) -> Optional[Trace]: ... + +class LatticeSurgery: + def __new__(cls, slow_down_factor: float) -> LatticeSurgery: ... + def transform(self, trace: Trace) -> Optional[Trace]: ... + +class InstructionFrontier: + """ + Represents a Pareto frontier of instructions with space, time, and error + rates as objectives. + """ + + def __new__(cls, *, with_error_objective: bool = True) -> InstructionFrontier: + """ + Create a new instruction frontier. + + Args: + with_error_objective (bool): If True (default), the frontier uses + three objectives (space, time, error rate). If False, it uses + two objectives (space, time). + """ + ... + + def insert(self, point: Instruction): + """ + Insert an instruction into the frontier. + + Args: + point (Instruction): The instruction to insert. + """ + ... + + def extend(self, points: list[Instruction]) -> None: + """ + Extend the frontier with a list of instructions. + + Args: + points (list[Instruction]): The instructions to insert. + """ + ... + + def __len__(self) -> int: + """ + Return the number of instructions in the frontier. + + Returns: + int: The number of instructions. + """ + ... + + def __iter__(self) -> Iterator[Instruction]: + """ + Return an iterator over the instructions in the frontier. + + Returns: + Iterator[Instruction]: The iterator. + """ + ... + + @staticmethod + def load( + filename: str, *, with_error_objective: bool = True + ) -> InstructionFrontier: + """ + Load an instruction frontier from a file. + + Args: + filename (str): The file name. + with_error_objective (bool): If True (default), the frontier uses + three objectives (space, time, error rate). If False, it uses + two objectives (space, time). + + Returns: + InstructionFrontier: The loaded instruction frontier. + """ + ... + + def dump(self, filename: str) -> None: + """ + Dump the instruction frontier to a file. + + Args: + filename (str): The file name. + """ + ... + +def _estimate_parallel( + traces: list[Trace], + isas: list[ISA], + max_error: float = 1.0, + post_process: bool = False, +) -> _EstimationCollection: + """ + Estimate resources for multiple traces and ISAs in parallel. + + Args: + traces (list[Trace]): The list of traces. + isas (list[ISA]): The list of ISAs. + max_error (float): The maximum allowed error. The default is 1.0. + post_process (bool): If True, computes auxiliary data such as result + summaries needed for post-processing after estimation. + + Returns: + _EstimationCollection: The estimation collection. + """ + ... + +def _estimate_with_graph( + traces: list[Trace], + graph: _ProvenanceGraph, + max_error: float = 1.0, + post_process: bool = False, +) -> _EstimationCollection: + """ + Estimate resources using a Pareto-filtered provenance graph. + + Instead of forming the full Cartesian product of ISAs × traces, this + function enumerates per-trace instruction combinations from the + Pareto-optimal subsets in the frozen graph. + + Args: + traces (list[Trace]): The list of traces to estimate. + graph (_ProvenanceGraph): The provenance graph to use for estimation. + max_error (float): The maximum allowed error. The default is 1.0. + post_process (bool): If True, computes auxiliary data such as result + summaries and ISAs needed for post-processing after estimation. + + Returns: + _EstimationCollection: The estimation collection. + """ + ... + +def _binom_ppf(q: float, n: int, p: float) -> int: + """ + A replacement for SciPy's binom.ppf that is faster and does not require + SciPy as a dependency. + """ + ... + +def _float_to_bits(f: float) -> int: + """Convert a float to its bit representation as an integer.""" + ... + +def _float_from_bits(b: int) -> float: + """Convert a float from its bit representation as an integer.""" + ... + +def instruction_name(id: int) -> Optional[str]: + """ + Return the name of an instruction given its ID, if known. + + Args: + id (int): The instruction ID. + + Returns: + Optional[str]: The name of the instruction, or None if the ID is not recognized. + """ + ... + +def property_name_to_key(name: str) -> Optional[int]: + """ + Convert a property name to its corresponding key, if known. + + Args: + name (str): The property name. + + Returns: + Optional[int]: The property key, or None if the name is not recognized. + """ + ... + +def property_name(id: int) -> Optional[str]: + """ + Convert a property key to its corresponding name, if known. + + Args: + id (int): The property key. + + Returns: + Optional[str]: The property name, or None if the key is not recognized. + """ + ... diff --git a/source/qdk_package/qdk/qre/_results.py b/source/qdk_package/qdk/qre/_results.py new file mode 100644 index 0000000000..2429f68787 --- /dev/null +++ b/source/qdk_package/qdk/qre/_results.py @@ -0,0 +1,418 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Optional, Callable, Any, Iterable + +import pandas as pd + +from ._architecture import ISAContext +from ._qre import FactoryResult, instruction_name, EstimationResult, property_name +from ._instruction import InstructionSource +from .property_keys import ( + PHYSICAL_COMPUTE_QUBITS, + PHYSICAL_MEMORY_QUBITS, + PHYSICAL_FACTORY_QUBITS, +) + + +class EstimationTable(list["EstimationTableEntry"]): + """A table of quantum resource estimation results. + + Extends ``list[EstimationTableEntry]`` and provides configurable columns for + displaying estimation data. By default the table includes *qubits*, + *runtime* (displayed as a ``pandas.Timedelta``), and *error* columns. + Additional columns can be added or inserted with ``add_column`` and + ``insert_column``. + """ + + def __init__(self): + """Initialize an empty estimation table with default columns.""" + super().__init__() + + self.name: Optional[str] = None + self.stats = EstimationTableStats() + + self._columns: list[tuple[str, EstimationTableColumn]] = [ + ("qubits", EstimationTableColumn(lambda entry: entry.qubits)), + ( + "runtime", + EstimationTableColumn( + lambda entry: entry.runtime, + formatter=lambda x: pd.Timedelta(x, unit="ns"), + ), + ), + ("error", EstimationTableColumn(lambda entry: entry.error)), + ] + + def add_column( + self, + name: str, + function: Callable[[EstimationTableEntry], Any], + formatter: Optional[Callable[[Any], Any]] = None, + ) -> None: + """Add a column to the estimation table. + + Args: + name (str): The name of the column. + function (Callable[[EstimationTableEntry], Any]): A function that + takes an EstimationTableEntry and returns the value for this + column. + formatter (Optional[Callable[[Any], Any]]): An optional function + that formats the output of ``function`` for display purposes. + """ + self._columns.append((name, EstimationTableColumn(function, formatter))) + + def insert_column( + self, + index: int, + name: str, + function: Callable[[EstimationTableEntry], Any], + formatter: Optional[Callable[[Any], Any]] = None, + ) -> None: + """Insert a column at the specified index in the estimation table. + + Args: + index (int): The index at which to insert the column. + name (str): The name of the column. + function (Callable[[EstimationTableEntry], Any]): A function that + takes an EstimationTableEntry and returns the value for this + column. + formatter (Optional[Callable[[Any], Any]]): An optional function + that formats the output of ``function`` for display purposes. + """ + self._columns.insert(index, (name, EstimationTableColumn(function, formatter))) + + def add_qubit_partition_column(self) -> None: + """Add columns for the physical compute, factory, and memory qubit counts.""" + self.add_column( + "physical_compute_qubits", + lambda entry: entry.properties.get(PHYSICAL_COMPUTE_QUBITS, 0), + ) + self.add_column( + "physical_factory_qubits", + lambda entry: entry.properties.get(PHYSICAL_FACTORY_QUBITS, 0), + ) + self.add_column( + "physical_memory_qubits", + lambda entry: entry.properties.get(PHYSICAL_MEMORY_QUBITS, 0), + ) + + def add_factory_summary_column(self) -> None: + """Add a column to the estimation table that summarizes the factories used in the estimation.""" + + def summarize_factories(entry: EstimationTableEntry) -> str: + if not entry.factories: + return "None" + return ", ".join( + f"{factory_result.copies}×{instruction_name(id)}" + for id, factory_result in entry.factories.items() + ) + + self.add_column("factories", summarize_factories) + + def add_property_column( + self, + property_key: int, + column_name: Optional[str] = None, + default_value: Any = None, + ) -> None: + """Add a column for a specific property key from the estimation results. + + Args: + property_key (int): The property key to add as a column. + column_name (Optional[str]): An optional name for the column. If not provided, the column will be named "property_{property_key}". + default_value (Any): The default value to use if the property key is not present in an entry's properties. Defaults to None. + """ + if column_name is None: + # property_name may return None for unknown and custom property keys + column_name = property_name(property_key) + + if column_name is None: + column_name = f"property_{property_key}" + else: + # Normalize the column name to lowercase + column_name = column_name.lower() + + self.add_column( + column_name, + lambda entry: entry.properties.get(property_key, default_value), + ) + + def as_frame(self): + """Convert the estimation table to a ``pandas.DataFrame``. + + Each row corresponds to an ``EstimationTableEntry`` and each + column is determined by the columns registered on this table. Column + formatters, when present, are applied to the values before they are + placed in the frame. + + Returns: + pandas.DataFrame: A DataFrame representation of the estimation + results. + """ + return pd.DataFrame( + [ + { + column_name: ( + column.formatter(column.function(entry)) + if column.formatter is not None + else column.function(entry) + ) + for column_name, column in self._columns + } + for entry in self + ] + ) + + def plot(self, **kwargs): + """Plot this table's results. + + Convenience wrapper around ``plot_estimates``. All keyword + arguments are forwarded. + + Returns: + matplotlib.figure.Figure: The figure containing the plot. + """ + return plot_estimates(self, **kwargs) + + +@dataclass(frozen=True, slots=True) +class EstimationTableColumn: + """Definition of a single column in an ``EstimationTable``. + + Attributes: + function: A callable that extracts the raw column value from an + ``EstimationTableEntry``. + formatter: An optional callable that transforms the raw value for + display purposes (e.g. converting nanoseconds to a + ``pandas.Timedelta``). + """ + + function: Callable[[EstimationTableEntry], Any] + formatter: Optional[Callable[[Any], Any]] = None + + +@dataclass(frozen=True, slots=True) +class EstimationTableEntry: + """A single row in an ``EstimationTable``. + + Each entry represents one Pareto-optimal estimation result for a + particular combination of application trace and architecture ISA. + + Attributes: + qubits: Total number of physical qubits required. + runtime: Total runtime of the algorithm in nanoseconds. + error: Total estimated error probability. + source: The instruction source derived from the architecture ISA used + for this estimation. + factories: A mapping from instruction id to the + ``FactoryResult`` describing the magic-state factory used + and the number of copies required. + properties: Additional key-value properties attached to the + estimation result. + """ + + qubits: int + runtime: int + error: float + source: InstructionSource + factories: dict[int, FactoryResult] = field(default_factory=dict) + properties: dict[int, int | float | bool | str] = field(default_factory=dict) + + @classmethod + def from_result( + cls, result: EstimationResult, ctx: ISAContext + ) -> EstimationTableEntry: + """Create an entry from an estimation result and architecture context. + + Args: + result (EstimationResult): The raw estimation result. + ctx (ISAContext): The architecture context used for the estimation. + + Returns: + EstimationTableEntry: A new table entry populated from the result. + """ + return cls( + qubits=result.qubits, + runtime=result.runtime, + error=result.error, + source=InstructionSource.from_isa(ctx, result.isa), + factories=result.factories.copy(), + properties=result.properties.copy(), + ) + + +@dataclass(slots=True) +class EstimationTableStats: + """Statistics for a single estimation run. + + Attributes: + num_traces (int): Number of traces evaluated. + num_isas (int): Number of ISAs evaluated. + total_jobs (int): Total estimation jobs executed. + successful_estimates (int): Number of jobs that produced a result. + pareto_results (int): Number of Pareto-optimal results retained. + """ + + num_traces: int = 0 + num_isas: int = 0 + total_jobs: int = 0 + successful_estimates: int = 0 + pareto_results: int = 0 + + +# Mapping from runtime unit name to its value in nanoseconds. +_TIME_UNITS: dict[str, float] = { + "ns": 1, + "µs": 1e3, + "us": 1e3, + "ms": 1e6, + "s": 1e9, + "min": 60e9, + "hours": 3600e9, + "days": 86_400e9, + "weeks": 604_800e9, + "months": 31 * 86_400e9, + "years": 365 * 86_400e9, + "decades": 10 * 365 * 86_400e9, + "centuries": 100 * 365 * 86_400e9, +} + +# Ordered subset of _TIME_UNITS used for default x-axis tick labels. +_TICK_UNITS: list[tuple[str, float]] = [ + ("1 ns", _TIME_UNITS["ns"]), + ("1 µs", _TIME_UNITS["µs"]), + ("1 ms", _TIME_UNITS["ms"]), + ("1 s", _TIME_UNITS["s"]), + ("1 min", _TIME_UNITS["min"]), + ("1 hour", _TIME_UNITS["hours"]), + ("1 day", _TIME_UNITS["days"]), + ("1 week", _TIME_UNITS["weeks"]), + ("1 month", _TIME_UNITS["months"]), + ("1 year", _TIME_UNITS["years"]), + ("1 decade", _TIME_UNITS["decades"]), + ("1 century", _TIME_UNITS["centuries"]), +] + + +def plot_estimates( + data: EstimationTable | Iterable[EstimationTable], + *, + runtime_unit: Optional[str] = None, + figsize: tuple[float, float] = (15, 8), + scatter_args: dict[str, Any] = {"marker": "x"}, +): + """Plot estimation results displaying qubits vs runtime. + + Creates a log-log scatter plot where the x-axis shows the total runtime and + the y-axis shows the total number of physical qubits. + + *data* may be a single ``EstimationTable`` or an iterable of tables. When + multiple tables are provided, each is plotted as a separate series. If a + table has a ``EstimationTable.name`` (set via the *name* parameter of + ``estimate``), it is used as the legend label for that series. + + When *runtime_unit* is ``None`` (the default), the x-axis uses + human-readable time-unit tick labels spanning nanoseconds to centuries. + When a unit string is given (e.g. ``"hours"``), all runtimes are scaled to + that unit and the x-axis label includes the unit while the ticks are plain + numbers. + + Supported *runtime_unit* values: ``"ns"``, ``"µs"`` (or ``"us"``), ``"ms"``, + ``"s"``, ``"min"``, ``"hours"``, ``"days"``, ``"weeks"``, ``"months"``, + ``"years"``. + + Args: + data: A single EstimationTable or an iterable of + EstimationTable objects to plot. + runtime_unit: Optional time unit to scale the x-axis to. + figsize: Figure dimensions in inches as ``(width, height)``. + scatter_args: Additional keyword arguments to pass to + ``matplotlib.axes.Axes.scatter`` when plotting the points. + + Returns: + matplotlib.figure.Figure: The figure containing the plot. + + Raises: + ImportError: If matplotlib is not installed. + ValueError: If all tables are empty or *runtime_unit* is not + recognised. + """ + try: + import matplotlib.pyplot as plt + except ImportError: + raise ImportError( + "Missing optional 'matplotlib' dependency. To install run: " + "pip install matplotlib" + ) + + # Normalize to a list of tables + if isinstance(data, EstimationTable): + tables = [data] + else: + tables = list(data) + + if not tables or all(len(t) == 0 for t in tables): + raise ValueError("Cannot plot an empty EstimationTable.") + + if runtime_unit is not None and runtime_unit not in _TIME_UNITS: + raise ValueError( + f"Unknown runtime_unit {runtime_unit!r}. " + f"Supported units: {', '.join(_TIME_UNITS)}" + ) + + fig, ax = plt.subplots(figsize=figsize) + ax.set_ylabel("Physical qubits") + ax.set_xscale("log") + ax.set_yscale("log") + + all_xs: list[float] = [] + has_labels = False + + for table in tables: + if len(table) == 0: + continue + + ys = [entry.qubits for entry in table] + + if runtime_unit is not None: + scale = _TIME_UNITS[runtime_unit] + xs = [entry.runtime / scale for entry in table] + else: + xs = [float(entry.runtime) for entry in table] + + all_xs.extend(xs) + + label = table.name + if label is not None: + has_labels = True + + ax.scatter(x=xs, y=ys, label=label, **scatter_args) + + if runtime_unit is not None: + ax.set_xlabel(f"Runtime ({runtime_unit})") + else: + ax.set_xlabel("Runtime") + + time_labels, time_units = zip(*_TICK_UNITS) + + cutoff = ( + next( + (i for i, x in enumerate(time_units) if x > max(all_xs)), + len(time_units) - 1, + ) + + 1 + ) + + ax.set_xticks(time_units[:cutoff]) + ax.set_xticklabels(time_labels[:cutoff], rotation=90) + + if has_labels: + ax.legend() + + plt.close(fig) + + return fig diff --git a/source/qdk_package/qdk/qre/_trace.py b/source/qdk_package/qdk/qre/_trace.py new file mode 100644 index 0000000000..49974e80d9 --- /dev/null +++ b/source/qdk_package/qdk/qre/_trace.py @@ -0,0 +1,195 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations +from abc import ABC, abstractmethod +from dataclasses import dataclass, KW_ONLY, field +from itertools import product +from types import NoneType +from typing import Any, Optional, Generator, Type, TYPE_CHECKING + +if TYPE_CHECKING: + from ._application import _Context +from ._enumeration import _enumerate_instances +from ._qre import PSSPC as _PSSPC, LatticeSurgery as _LatticeSurgery, Trace + + +class TraceTransform(ABC): + """Abstract base class for trace transformations.""" + + @abstractmethod + def transform(self, trace: Trace) -> Optional[Trace]: + """Apply this transformation to a trace. + + Args: + trace (Trace): The input trace. + + Returns: + Optional[Trace]: The transformed trace, or None if the + transformation is not applicable. + """ + ... + + @classmethod + def q(cls, **kwargs) -> TraceQuery: + """Create a trace query for this transform type. + + Args: + **kwargs: Domain overrides for parameter enumeration. + + Returns: + TraceQuery: A trace query wrapping this transform type. + """ + return TraceQuery(cls, **kwargs) + + +@dataclass +class PSSPC(TraceTransform): + """Pauli-based computation trace transform (PSSPC). + + Converts rotation gates and optionally CCX gates into T-state-based + operations suitable for lattice surgery resource estimation. + + Attributes: + num_ts_per_rotation (int): Number of T states used per rotation + gate. Default is 20. + ccx_magic_states (bool): If True, CCX gates are treated as magic + states rather than being decomposed into T gates. Default is + False. + """ + + _: KW_ONLY + num_ts_per_rotation: int = field( + default=20, metadata={"domain": list(range(5, 21))} + ) + ccx_magic_states: bool = field(default=False) + + def __post_init__(self): + self._psspc = _PSSPC(self.num_ts_per_rotation, self.ccx_magic_states) + + def transform(self, trace: Trace) -> Optional[Trace]: + """Apply the PSSPC transformation to a trace. + + Args: + trace (Trace): The input trace. + + Returns: + Optional[Trace]: The transformed trace. + """ + return self._psspc.transform(trace) + + +@dataclass +class LatticeSurgery(TraceTransform): + """Lattice surgery trace transform. + + Converts a trace into a form suitable for lattice-surgery-based + resource estimation. + + Attributes: + slow_down_factor (float): Multiplicative factor applied to the + trace depth. Default is 1.0. + """ + + _: KW_ONLY + slow_down_factor: float = field(default=1.0, metadata={"domain": [1.0]}) + + def __post_init__(self): + self._lattice_surgery = _LatticeSurgery(self.slow_down_factor) + + def transform(self, trace: Trace) -> Optional[Trace]: + """Apply the lattice surgery transformation to a trace. + + Args: + trace (Trace): The input trace. + + Returns: + Optional[Trace]: The transformed trace. + """ + return self._lattice_surgery.transform(trace) + + +class _Node(ABC): + """Abstract base class for trace enumeration nodes.""" + + @abstractmethod + def enumerate(self, ctx: _Context) -> Generator[Trace, None, None]: ... + + +class TraceQuery(_Node): + """A query that enumerates transformed traces from an application. + + A trace query chains a sequence of trace transforms, each with optional + keyword arguments to override their default parameter domains. + """ + + # This is a sequence of trace transforms together with possible kwargs to + # override their default domains. The first element might be + sequence: list[tuple[Type, dict[str, Any]]] + + def __init__(self, t: Type, **kwargs): + self.sequence = [(t, kwargs)] + + def enumerate( + self, ctx: _Context, track_parameters: bool = False + ) -> Generator[Trace | tuple[Any, Trace], None, None]: + """Enumerate transformed traces from the application context. + + Args: + ctx (_Context): The application enumeration context. + track_parameters (bool): If True, yield ``(parameters, trace)`` + tuples instead of plain traces. Default is False. + + Yields: + Trace | tuple[Any, Trace]: A transformed trace, or a + ``(parameters, trace)`` tuple when *track_parameters* is True. + """ + sequence = self.sequence + kwargs = {} + if len(sequence) > 0 and sequence[0][0] is NoneType: + kwargs = sequence[0][1] + sequence = sequence[1:] + + if track_parameters: + source = ctx.application.enumerate_traces_with_parameters(**kwargs) + else: + source = ((None, t) for t in ctx.application.enumerate_traces(**kwargs)) + + for params, trace in source: + if not sequence: + yield (params, trace) if track_parameters else trace + continue + + transformer_instances = [] + + for t, transformer_kwargs in sequence: + instances = _enumerate_instances(t, **transformer_kwargs) + transformer_instances.append(instances) + + for combination in product(*transformer_instances): + transformed = trace + for transformer in combination: + transformed = transformer.transform(transformed) + yield (params, transformed) if track_parameters else transformed + + def __mul__(self, other: TraceQuery) -> TraceQuery: + """Chain another trace query onto this one. + + Args: + other (TraceQuery): The trace query to append. + + Returns: + TraceQuery: A new query with the combined transform sequence. + + Raises: + ValueError: If *other* begins with a None transform. + """ + new_query = TraceQuery.__new__(TraceQuery) + + if len(other.sequence) > 0 and other.sequence[0][0] is NoneType: + raise ValueError( + "Cannot multiply with a TraceQuery that has a None transform at the beginning of its sequence." + ) + + new_query.sequence = self.sequence + other.sequence + return new_query diff --git a/source/qdk_package/qdk/qre/application.py b/source/qdk_package/qdk/qre/application.py deleted file mode 100644 index b7c33c4ef2..0000000000 --- a/source/qdk_package/qdk/qre/application.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -# flake8: noqa F403 -# pyright: ignore[reportWildcardImportFromLibrary] - -"""QRE application definitions. - -This module re-exports all public symbols from [qsharp.qre.application](:mod:`qsharp.qre.application`), -making them available under the ``qdk.qre.application`` namespace. It provides -classes for defining quantum applications to be passed to the resource estimator. - -Requires the ``qre`` extra: ``pip install qdk[qre]``. - -Example: - - from qdk.qre.application import QSharpApplication -""" - -try: - # Re-export the top-level qsharp.qre.application names. - from qsharp.qre.application import * -except Exception as ex: - raise ImportError( - "qdk.qre.application requires the qre extras. Install with 'pip install \"qdk[qre]\"'." - ) from ex diff --git a/source/qdk_package/qdk/qre/application/__init__.py b/source/qdk_package/qdk/qre/application/__init__.py new file mode 100644 index 0000000000..f6ee4c9f08 --- /dev/null +++ b/source/qdk_package/qdk/qre/application/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from ._cirq import CirqApplication +from ._qir import QIRApplication +from ._qsharp import QSharpApplication +from ._openqasm import OpenQASMApplication + +__all__ = [ + "CirqApplication", + "QIRApplication", + "QSharpApplication", + "OpenQASMApplication", +] diff --git a/source/qdk_package/qdk/qre/application/_cirq.py b/source/qdk_package/qdk/qre/application/_cirq.py new file mode 100644 index 0000000000..a49c58e317 --- /dev/null +++ b/source/qdk_package/qdk/qre/application/_cirq.py @@ -0,0 +1,58 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +from __future__ import annotations + +from dataclasses import dataclass + +import cirq + +from ... import telemetry_events +from .._application import Application +from .._qre import Trace +from ..interop import trace_from_cirq + + +@dataclass +class CirqApplication(Application[None]): + """Application that produces a resource estimation trace from a Cirq circuit. + + Accepts either a Cirq ``Circuit`` object or an OpenQASM string. When a + QASM string is provided, it is parsed into a circuit using + ``cirq.contrib.qasm_import`` (requires the optional ``ply`` dependency). + + Args: + circuit_or_qasm: A Cirq Circuit or an OpenQASM string. + classical_control_probability: Probability that a classically + controlled operation is included in the trace. Defaults to 0.5. + """ + + circuit_or_qasm: str | cirq.CIRCUIT_LIKE + classical_control_probability: float = 0.5 + + def __post_init__(self): + telemetry_events.on_qre_application_created("CirqApplication") + if isinstance(self.circuit_or_qasm, str): + try: + from cirq.contrib.qasm_import import circuit_from_qasm + + self._circuit = circuit_from_qasm(self.circuit_or_qasm) + except ImportError: + raise ImportError( + "Missing optional 'ply' dependency. To install run: " + "pip install ply" + ) + else: + self._circuit = self.circuit_or_qasm + + def get_trace(self, parameters: None = None) -> Trace: + """Return the resource estimation trace for the Cirq circuit. + + Args: + parameters (None): Unused. Defaults to None. + + Returns: + Trace: The resource estimation trace. + """ + return trace_from_cirq(self._circuit) diff --git a/source/qdk_package/qdk/qre/application/_openqasm.py b/source/qdk_package/qdk/qre/application/_openqasm.py new file mode 100644 index 0000000000..ed75112c53 --- /dev/null +++ b/source/qdk_package/qdk/qre/application/_openqasm.py @@ -0,0 +1,68 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +from __future__ import annotations + + +import random +from dataclasses import dataclass +from typing import Callable + +from ... import code +from ... import telemetry_events +from ...estimator import LogicalCounts +from .._qre import Trace +from .._application import Application +from ..interop import trace_from_entry_expr + + +@dataclass +class OpenQASMApplication(Application[None]): + """Application that produces a resource estimation trace from OpenQASM code. + + Accepts an OpenQASM program string or a callable. + + Attributes: + program (str | Callable): The OpenQASM program as string or callable. + args (tuple): The arguments to pass to the callable, if one is + provided. Default is an empty tuple. + """ + + program: str | Callable | LogicalCounts + args: tuple = () + + def __post_init__(self): + """Log telemetry for OpenQASMApplication creation.""" + telemetry_events.on_qre_application_created("OpenQASMApplication") + + def get_trace(self, parameters: None = None) -> Trace: + """Return the resource estimation trace for the OpenQASM program. + + Args: + parameters (None): Unused. Defaults to None. + + Returns: + Trace: The resource estimation trace. + """ + if isinstance(self.program, str): + from ...openqasm import import_openqasm, ProgramType + + name_found = False + for _ in range(1_000): + name = f"openqasm{random.randint(0, 1_000_000)}" + if not hasattr(code, "qasm_import") or not hasattr( + code.qasm_import, name + ): + name_found = True + break + + if not name_found: + raise RuntimeError( + "Failed to find a unique name for the OpenQASM program." + ) + + import_openqasm(self.program, name=name, program_type=ProgramType.File) + self.program = getattr(code.qasm_import, name) + + return trace_from_entry_expr(self.program, *self.args) diff --git a/source/qdk_package/qdk/qre/application/_qir.py b/source/qdk_package/qdk/qre/application/_qir.py new file mode 100644 index 0000000000..3536468fe7 --- /dev/null +++ b/source/qdk_package/qdk/qre/application/_qir.py @@ -0,0 +1,42 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +from __future__ import annotations + +from dataclasses import dataclass + +from ... import telemetry_events +from .._qre import Trace +from .._application import Application +from ..interop import trace_from_qir + + +@dataclass +class QIRApplication(Application[None]): + """Application that produces a resource estimation trace from base profile QIR code. + + Accepts QIR input as LLVM IR text or bitcode. The QIR input must adhere to + the base profile. + + Attributes: + input (str | bytes): QIR input as LLVM IR text (str) or + bitcode (bytes). + """ + + input: str | bytes + + def __post_init__(self): + """Log telemetry for QIRApplication creation.""" + telemetry_events.on_qre_application_created("QIRApplication") + + def get_trace(self, parameters: None = None) -> Trace: + """Return the resource estimation trace for the QIR program. + + Args: + parameters (None): Unused. Defaults to None. + + Returns: + Trace: The resource estimation trace. + """ + return trace_from_qir(self.input) diff --git a/source/qdk_package/qdk/qre/application/_qsharp.py b/source/qdk_package/qdk/qre/application/_qsharp.py new file mode 100644 index 0000000000..bfc11d1a98 --- /dev/null +++ b/source/qdk_package/qdk/qre/application/_qsharp.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + + +from __future__ import annotations + +from pathlib import Path +from dataclasses import dataclass, field +from typing import Callable + +from ...estimator import LogicalCounts +from ... import telemetry_events +from .._qre import Trace +from .._application import Application +from ..interop import trace_from_entry_expr_cached + + +@dataclass +class QSharpApplication(Application[None]): + """Application that produces a resource estimation trace from Q# code. + + Accepts a Q# entry expression string, a callable, or pre-computed + ``LogicalCounts``. + + Attributes: + entry_expr (str | Callable | LogicalCounts): The Q# entry + expression, a callable returning logical counts, or + pre-computed logical counts. + args (tuple): The arguments to pass to the callable, if one is + provided. Default is an empty tuple. + cache_dir (Path): Directory for caching compiled traces. + use_cache (bool): Whether to use the trace cache. Default is False. + """ + + entry_expr: str | Callable | LogicalCounts + args: tuple = () + cache_dir: Path = field( + default=Path.home() / ".cache" / "re3" / "qsharp", repr=False + ) + use_cache: bool = field(default=False, repr=False) + + def __post_init__(self): + """Log telemetry for QSharpApplication creation.""" + telemetry_events.on_qre_application_created("QSharpApplication") + + def get_trace(self, parameters: None = None) -> Trace: + """Return the resource estimation trace for the Q# program. + + Args: + parameters (None): Unused. Defaults to None. + + Returns: + Trace: The resource estimation trace. + """ + if self.use_cache and isinstance(self.entry_expr, str): + cache_path = self.cache_dir / f"{self.entry_expr}.json" + else: + cache_path = None + + return trace_from_entry_expr_cached(self.entry_expr, cache_path, *self.args) diff --git a/source/qdk_package/qdk/qre/instruction_ids.py b/source/qdk_package/qdk/qre/instruction_ids.py index 4b6f81c4b8..cec4a9c070 100644 --- a/source/qdk_package/qdk/qre/instruction_ids.py +++ b/source/qdk_package/qdk/qre/instruction_ids.py @@ -1,27 +1,10 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -# flake8: noqa F403 -# pyright: ignore[reportWildcardImportFromLibrary] +# pyright: reportAttributeAccessIssue=false -"""QRE instruction identifiers. -This module re-exports all public symbols from [qsharp.qre.instruction_ids](:mod:`qsharp.qre.instruction_ids`), -making them available under the ``qdk.qre.instruction_ids`` namespace. It provides -constants identifying the quantum instruction set operations used in resource -estimation traces. +from .._native import instruction_ids -Requires the ``qre`` extra: ``pip install qdk[qre]``. - -Example: - - from qdk.qre.instruction_ids import * -""" - -try: - # Re-export the top-level qsharp.qre.instruction_ids names. - from qsharp.qre.instruction_ids import * -except Exception as ex: - raise ImportError( - "qdk.qre.instruction_ids requires the qre extras. Install with 'pip install \"qdk[qre]\"'." - ) from ex +for name in instruction_ids.__all__: + globals()[name] = getattr(instruction_ids, name) diff --git a/source/qdk_package/qdk/qre/instruction_ids.pyi b/source/qdk_package/qdk/qre/instruction_ids.pyi new file mode 100644 index 0000000000..240146ec76 --- /dev/null +++ b/source/qdk_package/qdk/qre/instruction_ids.pyi @@ -0,0 +1,99 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +# Paulis +PAULI_I: int +PAULI_X: int +PAULI_Y: int +PAULI_Z: int + +# Clifford gates +H: int +H_XZ: int +H_XY: int +H_YZ: int +SQRT_X: int +SQRT_X_DAG: int +SQRT_Y: int +SQRT_Y_DAG: int +S: int +SQRT_Z: int +S_DAG: int +SQRT_Z_DAG: int +CNOT: int +CX: int +CY: int +CZ: int +SWAP: int + +# State preparation +PREP_X: int +PREP_Y: int +PREP_Z: int + +# Generic Cliffords +ONE_QUBIT_CLIFFORD: int +TWO_QUBIT_CLIFFORD: int +N_QUBIT_CLIFFORD: int + +# Measurements +MEAS_X: int +MEAS_Y: int +MEAS_Z: int +MEAS_RESET_X: int +MEAS_RESET_Y: int +MEAS_RESET_Z: int +MEAS_XX: int +MEAS_YY: int +MEAS_ZZ: int +MEAS_XZ: int +MEAS_XY: int +MEAS_YZ: int + +# Non-Clifford gates +SQRT_SQRT_X: int +SQRT_SQRT_X_DAG: int +SQRT_SQRT_Y: int +SQRT_SQRT_Y_DAG: int +SQRT_SQRT_Z: int +T: int +SQRT_SQRT_Z_DAG: int +T_DAG: int +CCX: int +CCY: int +CCZ: int +CSWAP: int +AND: int +AND_DAG: int +RX: int +RY: int +RZ: int +CRX: int +CRY: int +CRZ: int +RXX: int +RYY: int +RZZ: int + +# Generic unitary gates +ONE_QUBIT_UNITARY: int +TWO_QUBIT_UNITARY: int + +# Multi-qubit Pauli measurement +MULTI_PAULI_MEAS: int + +# Some generic logical instructions +LATTICE_SURGERY: int + +# Memory/compute operations (used in compute parts of memory-compute layouts) +READ_FROM_MEMORY: int +WRITE_TO_MEMORY: int +MEMORY: int + +# Some special hardware physical instructions +CYCLIC_SHIFT: int +PHYSICAL_MOVE: int +HAND_OFF: int + +# Generic operation (for unified RE) +GENERIC: int diff --git a/source/qdk_package/qdk/qre/interop.py b/source/qdk_package/qdk/qre/interop.py deleted file mode 100644 index 5d41c9fbb3..0000000000 --- a/source/qdk_package/qdk/qre/interop.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -# flake8: noqa F403 -# pyright: ignore[reportWildcardImportFromLibrary] - -"""QRE interoperability utilities. - -This module re-exports all public symbols from [qsharp.qre.interop](:mod:`qsharp.qre.interop`), -making them available under the ``qdk.qre.interop`` namespace. It provides -functions for generating resource estimation traces from Q#, Cirq, QIR, and -OpenQASM programs. - -Requires the ``qre`` extra: ``pip install qdk[qre]``. - -Example: - - from qdk.qre.interop import trace_from_qir -""" - -try: - # Re-export the top-level qsharp.qre.interop names. - from qsharp.qre.interop import * -except Exception as ex: - raise ImportError( - "qdk.qre.interop requires the qre extras. Install with 'pip install \"qdk[qre]\"'." - ) from ex diff --git a/source/qdk_package/qdk/qre/interop/__init__.py b/source/qdk_package/qdk/qre/interop/__init__.py new file mode 100644 index 0000000000..52917a3a42 --- /dev/null +++ b/source/qdk_package/qdk/qre/interop/__init__.py @@ -0,0 +1,35 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from ._cirq import ( + PeakUsageGreedyQubitManager, + PopBlock, + PushBlock, + QubitType, + ReadFromMemoryGate, + TypedQubit, + WriteToMemoryGate, + assert_qubits_type, + read_from_memory, + trace_from_cirq, + write_to_memory, +) +from ._qir import trace_from_qir +from ._qsharp import trace_from_entry_expr, trace_from_entry_expr_cached + +__all__ = [ + "trace_from_cirq", + "trace_from_entry_expr", + "trace_from_entry_expr_cached", + "trace_from_qir", + "PushBlock", + "PopBlock", + "QubitType", + "TypedQubit", + "PeakUsageGreedyQubitManager", + "ReadFromMemoryGate", + "WriteToMemoryGate", + "write_to_memory", + "read_from_memory", + "assert_qubits_type", +] diff --git a/source/qdk_package/qdk/qre/interop/_cirq.py b/source/qdk_package/qdk/qre/interop/_cirq.py new file mode 100644 index 0000000000..8665d8252a --- /dev/null +++ b/source/qdk_package/qdk/qre/interop/_cirq.py @@ -0,0 +1,822 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations + +import random +from dataclasses import dataclass +from enum import Enum +from math import pi +from typing import Iterable, Iterator, Sequence, cast + +import cirq +from cirq import ( + CCXPowGate, + CCZPowGate, + ClassicallyControlledOperation, + CXPowGate, + CZPowGate, + GateOperation, + HPowGate, + MeasurementGate, + PhaseGradientGate, + ResetChannel, + SwapPowGate, + XPowGate, + YPowGate, + ZPowGate, +) + +from qdk.qre import Block, Trace +from qdk.qre.instruction_ids import ( + CCX, + CCZ, + CX, + CZ, + MEAS_Z, + PAULI_X, + PAULI_Y, + PAULI_Z, + READ_FROM_MEMORY, + RX, + RY, + RZ, + S_DAG, + SQRT_SQRT_X, + SQRT_SQRT_X_DAG, + SQRT_SQRT_Y, + SQRT_SQRT_Y_DAG, + SQRT_X, + SQRT_X_DAG, + SQRT_Y, + SQRT_Y_DAG, + SWAP, + T_DAG, + WRITE_TO_MEMORY, + H, + S, + T, +) + +_TOLERANCE = 1e-8 + + +def _approx_eq(a: float, b: float) -> bool: + """Check whether two floats are approximately equal.""" + return abs(a - b) <= _TOLERANCE + + +def trace_from_cirq( + circuit: cirq.CIRCUIT_LIKE, + *, + classical_control_probability: float = 0.5, + rotation_threshold: float = 1e-6, + track_memory_qubits: bool = True, +) -> Trace: + """Convert a Cirq circuit into a resource estimation Trace. + + Iterates through all moments and operations in the circuit, converting + each gate into trace operations. Gates with a ``_to_trace`` method are + converted directly; others are recursively decomposed via Cirq's + ``_decompose_with_context_`` or ``_decompose_`` protocols. + + Args: + circuit: The Cirq circuit to convert. + classical_control_probability: Probability that a classically + controlled operation is included in the trace. Defaults to 0.5. + rotation_threshold: Rotation exponents with absolute value below + this threshold are treated as identity and omitted from the + trace. This applies to single-qubit rotations (RX, RY, RZ) as + well as to the rotation components of controlled-Z + decompositions. Defaults to 1e-6. + track_memory_qubits (bool): When True, memory qubits are tracked + separately from compute qubits. When False, all qubits are treated + as compute qubits. Also, if True, read-from-memory and + write-to-memory instructions are preserved in the trace, otherwise, + they are decompsed into SWAP and RESET instructions. Defaults to + True. + + Returns: + Trace: A Trace representing an execution profile of the circuit. + """ + + if isinstance(circuit, cirq.Circuit): + # circuit is already in the expected format, so we can process it directly. + pass + elif isinstance(circuit, cirq.Gate): + circuit = cirq.Circuit(circuit.on(*cirq.LineQid.for_gate(circuit))) + else: + # circuit is OP_TREE + circuit = cirq.Circuit(circuit) + + context = _CirqTraceBuilder( + circuit, classical_control_probability, rotation_threshold, track_memory_qubits + ) + + for moment in circuit: + for op in moment.operations: + context.handle_op(op) + + return context.trace + + +class _CirqTraceBuilder: + """Builds a resource estimation ``Trace`` from a Cirq circuit. + + This class walks the operations produced by ``trace_from_cirq`` and + translates each one into trace instructions. It maintains the state + needed during the conversion: + + * A ``Trace`` instance that accumulates the result. + * A stack of ``Block`` objects so that ``PushBlock`` / ``PopBlock`` + markers can create nested repeated sections. + * A qubit-id mapping (``_QidToTraceId``) that assigns each Cirq qubit + a sequential integer index. + * A Cirq ``DecompositionContext`` for gates that need recursive + decomposition. + + Args: + circuit: The Cirq circuit being converted. + classical_control_probability: Probability that a classically + controlled operation is included in the trace. + rotation_threshold: Rotation exponents with absolute value below + this threshold are treated as identity. + """ + + def __init__( + self, + circuit: cirq.Circuit, + classical_control_probability: float, + rotation_threshold: float, + track_memory_qubits: bool = True, + ): + self._circuit = circuit + self._trace = Trace(0) + self._classical_control_probability = classical_control_probability + self._rotation_threshold = rotation_threshold + self._track_memory_qubits = track_memory_qubits + self._blocks = [self._trace.root_block()] + self._q_to_id = _QidToTraceId(circuit.all_qubits()) + self._decomp_context = cirq.DecompositionContext( + qubit_manager=PeakUsageGreedyQubitManager( + "trace_from_cirq", size=0, maximize_reuse=True + ) + ) + + def push_block(self, repetitions: int): + """Open a new repeated block with the given number of repetitions.""" + block = self.block.add_block(repetitions) + self._blocks.append(block) + + def pop_block(self): + """Close the current repeated block, returning to the parent.""" + self._blocks.pop() + + @property + def trace(self) -> Trace: + """Determine compute and memory qubits from the circuit's qubits as well + as from the qubit manager before returning the trace.""" + + qm = cast(PeakUsageGreedyQubitManager, self._decomp_context.qubit_manager) + num_memory_qubits, num_compute_qubits = 0, 0 + + for q in self._circuit.all_qubits(): + if ( + self._track_memory_qubits + and isinstance(q, TypedQubit) + and q.qubit_type == QubitType.MEMORY + ): + num_memory_qubits += 1 + else: + # Untyped qubits are considered COMPUTE by default. + num_compute_qubits += 1 + + if self._track_memory_qubits: + num_memory_qubits += qm.memory_qubit_count() + else: + num_compute_qubits += qm.memory_qubit_count() + num_compute_qubits += qm.compute_qubit_count() + + self._trace.compute_qubits = num_compute_qubits + if self._track_memory_qubits and num_memory_qubits > 0: + self._trace.memory_qubits = num_memory_qubits + + return self._trace + + @property + def block(self) -> Block: + """The innermost open block in the trace.""" + return self._blocks[-1] + + @property + def q_to_id(self) -> _QidToTraceId: + """Mapping from Cirq ``Qid`` to integer trace qubit index.""" + return self._q_to_id + + @property + def classical_control_probability(self) -> float: + """Probability used to stochastically include classically controlled + operations.""" + return self._classical_control_probability + + @property + def rotation_threshold(self) -> float: + """Rotation exponents with absolute value below this threshold are + treated as identity.""" + return self._rotation_threshold + + @property + def decomp_context(self) -> cirq.DecompositionContext: + """Cirq decomposition context shared across all recursive + decompositions.""" + return self._decomp_context + + def handle_op( + self, + op: cirq.OP_TREE | TraceGate | PushBlock | PopBlock, + ) -> None: + """Recursively convert a single operation into trace instructions. + + Supported operation forms: + + - ``TraceGate``: A raw trace instruction, added directly to the + current block. + - ``PushBlock`` / ``PopBlock``: Control block nesting with + repetitions. + - ``GateOperation``: Dispatched via ``_to_trace`` if available on + the gate, otherwise decomposed via + ``_decompose_with_context_`` or ``_decompose_``. + - ``ClassicallyControlledOperation``: Included with the probability + given by ``classical_control_probability``. + - ``list`` / iterable: Each element is handled recursively. + - Any other ``cirq.Operation``: Decomposed via + ``_decompose_with_context_``. + + Args: + op: The operation to convert. + """ + if isinstance(op, TraceGate): + qs = [ + self.q_to_id[q] + for q in ([op.qubits] if isinstance(op.qubits, cirq.Qid) else op.qubits) + ] + + if op.params is None: + self.block.add_operation(op.id, qs) + else: + self.block.add_operation( + op.id, qs, op.params if isinstance(op.params, list) else [op.params] + ) + elif isinstance(op, PushBlock): + self.push_block(op.repetitions) + elif isinstance(op, PopBlock): + self.pop_block() + elif isinstance(op, cirq.Operation): + if isinstance(op, GateOperation): + gate = op.gate + + if hasattr(gate, "_to_trace"): + for sub_op in gate._to_trace(self, op): # type: ignore + self.handle_op(sub_op) + elif hasattr(gate, "_decompose_with_context_"): + for sub_op in gate._decompose_with_context_(op.qubits, self.decomp_context): # type: ignore + self.handle_op(sub_op) + elif hasattr(gate, "_decompose_"): + # decompose the gate and handle the resulting operations recursively + for sub_op in gate._decompose_(op.qubits): # type: ignore + self.handle_op(sub_op) + else: + for sub_op in op._decompose_with_context_(self.decomp_context): # type: ignore + self.handle_op(sub_op) + elif isinstance(op, ClassicallyControlledOperation): + if random.random() < self.classical_control_probability: + self.handle_op(op.without_classical_controls()) + elif isinstance(op, cirq.CircuitOperation): + if isinstance(op.repetitions, int): + self.push_block(op.repetitions) + for sub_op in op.circuit: # type: ignore + self.handle_op(sub_op) + self.pop_block() + else: + raise ValueError( + "Only integer repetitions are supported for CircuitOperation." + ) + else: + for sub_op in op._decompose_with_context_(self.decomp_context): # type: ignore + self.handle_op(sub_op) + else: + # op is Iterable[OP_TREE] + for sub_op in op: + self.handle_op(sub_op) + + +@dataclass(frozen=True, slots=True) +class PushBlock: + """Signals the start of a repeated block in the trace. + + Args: + repetitions: Number of times the block is repeated. + """ + + repetitions: int + + +@dataclass(frozen=True, slots=True) +class PopBlock: + """Signals the end of the current repeated block in the trace.""" + + ... + + +@dataclass(frozen=True, slots=True) +class TraceGate: + """A raw trace instruction emitted during Cirq circuit conversion. + + Attributes: + id (int): The instruction ID. + qubits (list[cirq.Qid] | cirq.Qid): The target qubits. + params (list[float] | float | None): Optional gate parameters. + """ + + id: int + qubits: list[cirq.Qid] | cirq.Qid + params: list[float] | float | None = None + + +class _QidToTraceId(dict): + """Mapping from Cirq qubits to integer trace qubit indices. + + Initialized with a set of known qubits. If an unknown qubit is looked + up, it is automatically assigned the next available index. + """ + + def __init__(self, init: Iterable[cirq.Qid]): + super().__init__({q: i for i, q in enumerate(init)}) + + def __getitem__(self, key: cirq.Qid) -> int: + """ + If the key is not present, add it to the mapping with the next available id. + """ + + if key not in self: + self[key] = len(self) + return super().__getitem__(key) + + +def h_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): + """Convert an HPowGate into trace instructions.""" + if _approx_eq(abs(self.exponent), 1): + yield TraceGate(H, [op.qubits[0]]) + else: + yield from op._decompose_with_context_(context.decomp_context) # type: ignore + + +def x_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): + """Convert an XPowGate into trace instructions.""" + q = [op.qubits[0]] + exp = self.exponent + if _approx_eq(exp, 1) or _approx_eq(exp, -1): + yield TraceGate(PAULI_X, q) + elif _approx_eq(exp, 0.5): + yield TraceGate(SQRT_X, q) + elif _approx_eq(exp, -0.5): + yield TraceGate(SQRT_X_DAG, q) + elif _approx_eq(exp, 0.25): + yield TraceGate(SQRT_SQRT_X, q) + elif _approx_eq(exp, -0.25): + yield TraceGate(SQRT_SQRT_X_DAG, q) + else: + if abs(exp) >= context.rotation_threshold: + yield TraceGate(RX, q, exp * pi) + + +def y_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): + """Convert a YPowGate into trace instructions.""" + q = [op.qubits[0]] + exp = self.exponent + if _approx_eq(exp, 1) or _approx_eq(exp, -1): + yield TraceGate(PAULI_Y, q) + elif _approx_eq(exp, 0.5): + yield TraceGate(SQRT_Y, q) + elif _approx_eq(exp, -0.5): + yield TraceGate(SQRT_Y_DAG, q) + elif _approx_eq(exp, 0.25): + yield TraceGate(SQRT_SQRT_Y, q) + elif _approx_eq(exp, -0.25): + yield TraceGate(SQRT_SQRT_Y_DAG, q) + else: + if abs(exp) >= context.rotation_threshold: + yield TraceGate(RY, q, exp * pi) + + +def z_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): + """Convert a ZPowGate into trace instructions.""" + q = [op.qubits[0]] + exp = self.exponent + if _approx_eq(exp, 1) or _approx_eq(exp, -1): + yield TraceGate(PAULI_Z, q) + elif _approx_eq(exp, 0.5): + yield TraceGate(S, q) + elif _approx_eq(exp, -0.5): + yield TraceGate(S_DAG, q) + elif _approx_eq(exp, 0.25): + yield TraceGate(T, q) + elif _approx_eq(exp, -0.25): + yield TraceGate(T_DAG, q) + else: + if abs(exp) >= context.rotation_threshold: + yield TraceGate(RZ, q, exp * pi) + + +def cx_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): + """Convert a CXPowGate into trace instructions.""" + if _approx_eq(abs(self.exponent), 1): + yield TraceGate(CX, [op.qubits[0], op.qubits[1]]) + else: + yield from op._decompose_with_context_(context.decomp_context) # type: ignore + + +def cz_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): + """Convert a CZPowGate into trace instructions.""" + exp = self.exponent + c, t = op.qubits[0], op.qubits[1] + if _approx_eq(abs(exp), 1): + yield TraceGate(CZ, [c, t]) + elif _approx_eq(exp, 0.5): + # controlled S gate + yield TraceGate(T, [c]) + yield TraceGate(T, [t]) + yield TraceGate(CZ, [c, t]) + yield TraceGate(T_DAG, [t]) + yield TraceGate(CZ, [c, t]) + elif _approx_eq(exp, -0.5): + # controlled S† gate + yield TraceGate(T_DAG, [c]) + yield TraceGate(T_DAG, [t]) + yield TraceGate(CZ, [c, t]) + yield TraceGate(T, [t]) + yield TraceGate(CZ, [c, t]) + else: + half_exp = exp / 2 + if abs(half_exp) >= context.rotation_threshold: + rads = half_exp * pi + yield TraceGate(RZ, [c], [rads]) + yield TraceGate(RZ, [t], [rads]) + yield TraceGate(CZ, [c, t]) + yield TraceGate(RZ, [t], [-rads]) + yield TraceGate(CZ, [c, t]) + + +def swap_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): + """Convert a SwapPowGate into trace instructions.""" + if _approx_eq(abs(self.exponent), 1): + yield TraceGate(SWAP, [op.qubits[0], op.qubits[1]]) + else: + yield from op._decompose_with_context_(context.decomp_context) # type: ignore + + +def ccx_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): + """Convert a CCXPowGate into trace instructions.""" + if _approx_eq(abs(self.exponent), 1): + yield TraceGate(CCX, [op.qubits[0], op.qubits[1], op.qubits[2]]) + else: + yield from op._decompose_with_context_(context.decomp_context) # type: ignore + + +def ccz_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): + """Convert a CCZPowGate into trace instructions.""" + if _approx_eq(abs(self.exponent), 1): + yield TraceGate(CCZ, [op.qubits[0], op.qubits[1], op.qubits[2]]) + else: + yield from op._decompose_with_context_(context.decomp_context) # type: ignore + + +def measurement_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): + """Convert a MeasurementGate into trace instructions.""" + for q in op.qubits: + yield TraceGate(MEAS_Z, [q]) + + +def reset_channel_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): + """Convert a ResetChannel into trace instructions (no-op).""" + yield from () + + +# Attach _to_trace methods to Cirq gate classes so that handle_op can +# convert them directly into trace instructions without decomposition. +HPowGate._to_trace = h_pow_gate_to_trace +XPowGate._to_trace = x_pow_gate_to_trace +YPowGate._to_trace = y_pow_gate_to_trace +ZPowGate._to_trace = z_pow_gate_to_trace +CXPowGate._to_trace = cx_pow_gate_to_trace +CZPowGate._to_trace = cz_pow_gate_to_trace +SwapPowGate._to_trace = swap_pow_gate_to_trace +CCXPowGate._to_trace = ccx_pow_gate_to_trace +CCZPowGate._to_trace = ccz_pow_gate_to_trace +MeasurementGate._to_trace = measurement_gate_to_trace +ResetChannel._to_trace = reset_channel_to_trace + +# Decomposition overrides + + +def phase_gradient_decompose(self, qubits): + """Override PhaseGradientGate._decompose_ to skip rotations with very small angles. + + The original implementation may lead to floating-point overflows for + large values of i. + """ + + for i, q in enumerate(qubits): + exp = self.exponent / 2**i + if abs(exp) < 1e-6: + break + yield cirq.Z(q) ** exp + + +PhaseGradientGate._decompose_ = phase_gradient_decompose + + +class QubitType(Enum): + """Qubit type. + + Each logical qubit can be either a compute or memory qubit. Compute qubits + can be used normally. + + Memory qubits have a restriction that gates cannot be applied to them. The + only allowed operations on memory qubits are reads/writes, where state is + moved from memory to compute gate or from compute to memory gate. + + We assume that when error correction is applied, memory qubits are encoded + with a more efficient error correction scheme requiring less resources, but + not allowing gate application (e.g. Yoked surface codes, + https://arxiv.org/abs/2312.04522). + """ + + COMPUTE = 1 + MEMORY = 2 + + +class TypedQubit(cirq.Qid): + """Qubit with type.""" + + def __init__( + self, + qubit: cirq.Qid, + qubit_type: QubitType, + ): + """Initializes typed qubit.""" + self._qubit = qubit + self.qubit_type = qubit_type + + def _comparison_key(self) -> object: + """Comparison key.""" + return self._qubit._comparison_key() + + @property + def dimension(self) -> int: + """Dimension.""" + return cast("int", self._qubit.dimension) + + def __repr__(self) -> str: + """String representation of the qubit.""" + return repr(self._qubit) + + +def _as_typed_qubit(q: cirq.Qid) -> TypedQubit: + """Converts qubit to TypedQubit.""" + assert isinstance(q, TypedQubit) + return q + + +def assert_qubits_type(qs: Sequence[cirq.Qid], qubit_type: QubitType) -> None: + """Asserts that qubits have specified type, but only if they are TypedQubits.""" + if len(qs) == 0 or not isinstance(qs[0], TypedQubit): + return + + for q in qs: + actual_type = _as_typed_qubit(q).qubit_type + assert ( + actual_type == qubit_type + ), f"{q} expected to be {qubit_type}, was {actual_type}." + + +class _TypedQubitManager(cirq.GreedyQubitManager): + """Qubit manager managing qubits of specified type. + + All allocated qubits will have specified type. + Tracks current and peak number of qubits. + """ + + def __init__( + self, prefix: str, qubit_type: QubitType, *, size: int, maximize_reuse: bool + ): + """Initialize the manager.""" + prefix = prefix + "_" + qubit_type.name[0] + super().__init__(prefix, size=size, maximize_reuse=maximize_reuse) + self.qubit_type = qubit_type + self.current_in_use = 0 + self.peak_in_use = 0 + + def _allocate_qid(self, name: str, dim: int) -> cirq.Qid: + """Allocates single qubit.""" + return TypedQubit(super()._allocate_qid(name, dim), self.qubit_type) + + def qalloc(self, n: int, dim: int) -> list[cirq.Qid]: + """Allocate ``n`` qubits and update the usage counters.""" + qs = super().qalloc(n, dim) + self.current_in_use += len(qs) + self.peak_in_use = max(self.peak_in_use, self.current_in_use) + return cast("list[cirq.Qid]", qs) + + def qfree(self, qubits: Iterable[cirq.Qid]) -> None: + """Free the given qubits and update the usage counters.""" + super().qfree(qubits) + self.current_in_use -= len(set(qubits)) + + +class PeakUsageGreedyQubitManager(cirq.QubitManager): + """A qubit manager tracking compute and memory qubits separately. + + It consists of two independent qubit managers for each qubit type. Each manager + uses greedy allocation strategy from ``cirq.GreedyQubitManager``. + + Qubits of one type, after freed, cannot be reused as qubits of different type. + Therefore, peak qubit count is equal to sum of peak qubit counts for each type. + """ + + def __init__(self, prefix: str, *, size: int, maximize_reuse: bool): + """Initialize the PeakUsageGreedyQubitManager. + + Args: + prefix: Naming prefix for allocated qubits. + size: Initial pool size passed through to ``cirq.GreedyQubitManager``. + Example: 0. + maximize_reuse: Flag to control qubit reuse strategy. If ``False``, this + mode uses a FIFO (First in First out) strategy s.t. next allocated qubit + is one which was freed the earliest. If ``True``, this mode uses a LIFO + (Last in First out) strategy s.t. the next allocated qubit is one which + was freed the latest. + + """ + self.typed_managers = { + qubit_type: _TypedQubitManager( + prefix, qubit_type, size=size, maximize_reuse=maximize_reuse + ) + for qubit_type in QubitType + } + + def qalloc( + self, n: int, dim: int, qubit_type: QubitType = QubitType.COMPUTE + ) -> list[cirq.Qid]: + """Allocate ``n`` qubits and update the usage counters. + + Args: + n: Number of qubits to allocate. + dim: Dimension of each qubit. Example: 2 for qubits. + qubit_type: Type of qubits (COMPUTE or MEMORY). + + Returns: + List of allocated qubits. + + """ + return self.typed_managers[qubit_type].qalloc(n, dim) + + def qborrow(self, n: int, dim: int = 2) -> list[cirq.Qid]: + """Borrow qubits (not supported).""" + raise NotImplementedError("qborrow is not supported.") + + def qfree(self, qubits: Iterable[cirq.Qid]) -> None: + """Free the given qubits.""" + qubits_by_type: dict[QubitType, list[cirq.Qid]] = {t: [] for t in QubitType} + for q in qubits: + qubits_by_type[_as_typed_qubit(q).qubit_type].append(q) + for qubit_type, qs in qubits_by_type.items(): + if len(qs) > 0: + self.typed_managers[qubit_type].qfree(qs) + + def current_in_use(self) -> int: + """Number of qubits currently in use.""" + return sum(qm.current_in_use for qm in self.typed_managers.values()) + + def qubit_count(self) -> int: + """Returns the peak number of qubits of all types. + + It is equal to sum of peak counts for each type, because qubits of one type + cannot be reused as qubits of a different type. + """ + return self.compute_qubit_count() + self.memory_qubit_count() + + def compute_qubit_count(self) -> int: + """Returns the peak number of simultaneously in-use COMPUTE qubits.""" + return self.typed_managers[QubitType.COMPUTE].peak_in_use + + def memory_qubit_count(self) -> int: + """Returns the peak number of simultaneously in-use MEMORY qubits.""" + return self.typed_managers[QubitType.MEMORY].peak_in_use + + +class ReadFromMemoryGate(cirq.Gate): + """Moves qubit states from MEMORY register to COMPUTE register. + + Assumes COMPUTE qubits are prepared in 0 state. Leaves MEMORY qubits in 0 state. + """ + + def __init__(self, n: int): + """Initializes ReadFromMemoryGate.""" + self.n = n + + def _num_qubits_(self) -> int: + """Number of qubits passed in to this gate.""" + return 2 * self.n + + def _decompose_(self, qubits: Sequence[cirq.Qid]) -> Iterator[cirq.Operation]: + """Decomposes this gate into equivalent SWAP gates.""" + comp_qs, mem_qs = self._get_qubits(qubits) + for i in range(self.n): + yield cirq.reset(comp_qs[i]) + yield cirq.SWAP(mem_qs[i], comp_qs[i]) + + def _to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation, **_kwargs): + """Convert this gate into trace instructions.""" + if context._track_memory_qubits: + comp_qs, mem_qs = self._get_qubits(op.qubits) + for i in range(self.n): + yield TraceGate(READ_FROM_MEMORY, [mem_qs[i], comp_qs[i]]) + else: + yield from self._decompose_(op.qubits) + + def _get_qubits( + self, qubits: Sequence[cirq.Qid] + ) -> tuple[Sequence[cirq.Qid], Sequence[cirq.Qid]]: + """Get qubits for this gate partitioned into compute and memory + qubits.""" + assert len(qubits) == 2 * self.n + mem_qs = qubits[0 : self.n] + comp_qs = qubits[self.n : 2 * self.n] + assert_qubits_type(mem_qs, QubitType.MEMORY) + assert_qubits_type(comp_qs, QubitType.COMPUTE) + return comp_qs, mem_qs + + +class WriteToMemoryGate(cirq.Gate): + """Moves qubit states from COMPUTE register to MEMORY register. + + Assumes MEMORY qubits are prepared in 0 state. Leaves COMPUTE qubits in 0 state. + """ + + def __init__(self, n: int): + """Initializes WriteToMemoryGate.""" + self.n = n + + def _num_qubits_(self) -> int: + """Number of qubits passed in to this gate.""" + return 2 * self.n + + def _decompose_(self, qubits: Sequence[cirq.Qid]) -> Iterator[cirq.Operation]: + """Decomposes this gate into equivalent SWAP gates.""" + comp_qs, mem_qs = self._get_qubits(qubits) + for i in range(self.n): + yield cirq.reset(mem_qs[i]) + yield cirq.SWAP(mem_qs[i], comp_qs[i]) + + def _to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation, **_kwargs): + """Convert this gate into trace instructions.""" + if context._track_memory_qubits: + comp_qs, mem_qs = self._get_qubits(op.qubits) + for i in range(self.n): + yield TraceGate(WRITE_TO_MEMORY, [comp_qs[i], mem_qs[i]]) + else: + yield from self._decompose_(op.qubits) + + def _get_qubits( + self, qubits: Sequence[cirq.Qid] + ) -> tuple[Sequence[cirq.Qid], Sequence[cirq.Qid]]: + assert len(qubits) == 2 * self.n + mem_qs = qubits[0 : self.n] + comp_qs = qubits[self.n : 2 * self.n] + assert_qubits_type(mem_qs, QubitType.MEMORY) + assert_qubits_type(comp_qs, QubitType.COMPUTE) + + return comp_qs, mem_qs + + +def write_to_memory( + memory_qubits: Sequence[cirq.Qid], compute_qubits: Sequence[cirq.Qid] +) -> cirq.Operation: + """Operation to write qubits to memory.""" + assert_qubits_type(memory_qubits, QubitType.MEMORY) + assert_qubits_type(compute_qubits, QubitType.COMPUTE) + n = len(memory_qubits) + assert n == len(compute_qubits) + return WriteToMemoryGate(n).on(*memory_qubits, *compute_qubits) + + +def read_from_memory( + memory_qubits: Sequence[cirq.Qid], compute_qubits: Sequence[cirq.Qid] +) -> cirq.Operation: + """Operation to read qubits from memory.""" + assert_qubits_type(memory_qubits, QubitType.MEMORY) + assert_qubits_type(compute_qubits, QubitType.COMPUTE) + n = len(memory_qubits) + assert n == len(compute_qubits) + return ReadFromMemoryGate(n).on(*memory_qubits, *compute_qubits) diff --git a/source/qdk_package/qdk/qre/interop/_qir.py b/source/qdk_package/qdk/qre/interop/_qir.py new file mode 100644 index 0000000000..ebfb9559d1 --- /dev/null +++ b/source/qdk_package/qdk/qre/interop/_qir.py @@ -0,0 +1,136 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations + +import pyqir + +from ..._native import QirInstructionId +from ..._simulation import AggregateGatesPass +from .. import instruction_ids as ids +from .._qre import Trace + +# Maps QirInstructionId to (instruction_id, arity) where arity is: +# 1 = single-qubit gate: tuple is (op, qubit) +# 2 = two-qubit gate: tuple is (op, qubit1, qubit2) +# 3 = three-qubit gate: tuple is (op, qubit1, qubit2, qubit3) +# -1 = single-qubit rotation: tuple is (op, angle, qubit) +# -2 = two-qubit rotation: tuple is (op, angle, qubit1, qubit2) +_GATE_MAP: list[tuple[QirInstructionId, int, int]] = [ + # Single-qubit gates + (QirInstructionId.I, ids.PAULI_I, 1), + (QirInstructionId.H, ids.H, 1), + (QirInstructionId.X, ids.PAULI_X, 1), + (QirInstructionId.Y, ids.PAULI_Y, 1), + (QirInstructionId.Z, ids.PAULI_Z, 1), + (QirInstructionId.S, ids.S, 1), + (QirInstructionId.SAdj, ids.S_DAG, 1), + (QirInstructionId.SX, ids.SQRT_X, 1), + (QirInstructionId.SXAdj, ids.SQRT_X_DAG, 1), + (QirInstructionId.T, ids.T, 1), + (QirInstructionId.TAdj, ids.T_DAG, 1), + # Two-qubit gates + (QirInstructionId.CNOT, ids.CNOT, 2), + (QirInstructionId.CX, ids.CX, 2), + (QirInstructionId.CY, ids.CY, 2), + (QirInstructionId.CZ, ids.CZ, 2), + (QirInstructionId.SWAP, ids.SWAP, 2), + # Three-qubit gates + (QirInstructionId.CCX, ids.CCX, 3), + # Single-qubit rotations (op, angle, qubit) + (QirInstructionId.RX, ids.RX, -1), + (QirInstructionId.RY, ids.RY, -1), + (QirInstructionId.RZ, ids.RZ, -1), + # Two-qubit rotations (op, angle, qubit1, qubit2) + (QirInstructionId.RXX, ids.RXX, -2), + (QirInstructionId.RYY, ids.RYY, -2), + (QirInstructionId.RZZ, ids.RZZ, -2), +] + +_MEAS_MAP: list[tuple[QirInstructionId, int]] = [ + (QirInstructionId.M, ids.MEAS_Z), + (QirInstructionId.MZ, ids.MEAS_Z), + (QirInstructionId.MResetZ, ids.MEAS_RESET_Z), +] + +_SKIP = ( + # Resets qubit to |0⟩ without measuring; we do not currently account for + # that in resource estimation + QirInstructionId.RESET, + # Runtime qubit state transfer; an implementation detail, not a logical operation + QirInstructionId.Move, + # Reads a measurement result from classical memory; purely classical I/O + QirInstructionId.ReadResult, + # The following are classical output recording operations that do not represent + # quantum operations and have no impact on resource estimation. + QirInstructionId.ResultRecordOutput, + QirInstructionId.BoolRecordOutput, + QirInstructionId.IntRecordOutput, + QirInstructionId.DoubleRecordOutput, + QirInstructionId.TupleRecordOutput, + QirInstructionId.ArrayRecordOutput, +) + + +def trace_from_qir(input: str | bytes) -> Trace: + """Convert a QIR program into a resource-estimation Trace. + + Parses the QIR module, extracts quantum gates, and builds a Trace that + can be used for resource estimation. Conditional branches are resolved + by always following the false path (assuming measurement results are Zero). + + Args: + input: QIR input as LLVM IR text (str) or bitcode (bytes). + + Returns: + A Trace containing the quantum operations from the QIR program. + """ + context = pyqir.Context() + + if isinstance(input, str): + mod = pyqir.Module.from_ir(context, input) + else: + mod = pyqir.Module.from_bitcode(context, input) + + gates, num_qubits, _ = AggregateGatesPass().run(mod) + + trace = Trace(compute_qubits=num_qubits) + + for gate in gates: + # NOTE: AggregateGatesPass does not return QirInstruction objects + assert isinstance(gate, tuple) + _add_gate(trace, gate) + + return trace + + +def _add_gate(trace: Trace, gate: tuple) -> None: + """Add a single QIR gate tuple to the trace.""" + op = gate[0] + + for qir_id, instr_id, arity in _GATE_MAP: + if op == qir_id: + if arity == 1: + trace.add_operation(instr_id, [gate[1]]) + elif arity == 2: + trace.add_operation(instr_id, [gate[1], gate[2]]) + elif arity == 3: + trace.add_operation(instr_id, [gate[1], gate[2], gate[3]]) + elif arity == -1: + trace.add_operation(instr_id, [gate[2]], [gate[1]]) + elif arity == -2: + trace.add_operation(instr_id, [gate[2], gate[3]], [gate[1]]) + return + + for qir_id, instr_id in _MEAS_MAP: + if op == qir_id: + trace.add_operation(instr_id, [gate[1]]) + return + + for skip_id in _SKIP: + if op == skip_id: + return + + # The only unhandled QirInstructionId is CorrelatedNoise + assert op == QirInstructionId.CorrelatedNoise, f"Unexpected QIR instruction: {op}" + raise NotImplementedError(f"Unsupported QIR instruction: {op}") diff --git a/source/qdk_package/qdk/qre/interop/_qsharp.py b/source/qdk_package/qdk/qre/interop/_qsharp.py new file mode 100644 index 0000000000..83c2cee60d --- /dev/null +++ b/source/qdk_package/qdk/qre/interop/_qsharp.py @@ -0,0 +1,155 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations + +from pathlib import Path +import time +from typing import Callable, Optional + +from ..._qsharp import logical_counts +from ...estimator import LogicalCounts +from .._qre import Trace +from ..instruction_ids import CCX, MEAS_Z, RZ, T, READ_FROM_MEMORY, WRITE_TO_MEMORY +from ..property_keys import ( + EVALUATION_TIME, + ALGORITHM_COMPUTE_QUBITS, + ALGORITHM_MEMORY_QUBITS, +) + + +def _bucketize_rotation_counts( + rotation_count: int, rotation_depth: int +) -> list[tuple[int, int]]: + """ + Return a list of (count, depth) pairs representing the rotation layers in + the trace. + + The following properties hold for the returned list ``result``: + - sum(depth for _, depth in result) == rotation_depth + - sum(count * depth for count, depth in result) == rotation_count + - count > 0 for each (count, _) in result + - count <= qubit_count for each (count, _) in result holds by definition + when rotation_count <= rotation_depth * qubit_count + + Args: + rotation_count: Total number of rotations. + rotation_depth: Total depth of the rotation layers. + + Returns: + A list of (count, depth) pairs, where 'count' is the number of + rotations in a layer and 'depth' is the depth of that layer. + """ + if rotation_depth == 0: + return [] + + base = rotation_count // rotation_depth + extra = rotation_count % rotation_depth + + result: list[tuple[int, int]] = [] + if extra > 0: + result.append((base + 1, extra)) + if rotation_depth - extra > 0: + result.append((base, rotation_depth - extra)) + return result + + +def trace_from_entry_expr(entry_expr: str | Callable | LogicalCounts, *args) -> Trace: + """Convert a Q# entry expression into a resource-estimation Trace. + + Evaluates the entry expression to obtain logical counts, then builds + a trace containing the corresponding quantum operations. + + Args: + entry_expr (str | Callable | LogicalCounts): A Q# entry expression + string, a callable, or pre-computed logical counts. + *args: The arguments to pass to the callable, if one is provided. + + Returns: + Trace: A trace representing the resource profile of the program. + """ + + start = time.time_ns() + counts = ( + logical_counts(entry_expr, *args) + if not isinstance(entry_expr, LogicalCounts) + else entry_expr + ) + evaluation_time = time.time_ns() - start + + ccx_count = counts.get("cczCount", 0) + counts.get("ccixCount", 0) + + # Q# logical counts report total number of qubits (compute + memory) + num_qubits = counts.get("numQubits", 0) + # Compute qubits may be reported separately + compute_qubits = counts.get("numComputeQubits", num_qubits) + memory_qubits = num_qubits - compute_qubits + + trace = Trace(compute_qubits) + + rotation_count = counts.get("rotationCount", 0) + rotation_depth = counts.get("rotationDepth", rotation_count) + + if rotation_count != 0 and rotation_depth != 0: + for count, depth in _bucketize_rotation_counts(rotation_count, rotation_depth): + block = trace.add_block(repetitions=depth) + for i in range(count): + block.add_operation(RZ, [i]) + + if t_count := counts.get("tCount", 0): + block = trace.add_block(repetitions=t_count) + block.add_operation(T, [0]) + + if ccx_count: + block = trace.add_block(repetitions=ccx_count) + block.add_operation(CCX, [0, 1, 2]) + + if meas_count := counts.get("measurementCount", 0): + block = trace.add_block(repetitions=meas_count) + block.add_operation(MEAS_Z, [0]) + + if memory_qubits != 0: + trace.memory_qubits = memory_qubits + + if rfm_count := counts.get("readFromMemoryCount", 0): + block = trace.add_block(repetitions=rfm_count) + block.add_operation(READ_FROM_MEMORY, [0, compute_qubits]) + + if wtm_count := counts.get("writeToMemoryCount", 0): + block = trace.add_block(repetitions=wtm_count) + block.add_operation(WRITE_TO_MEMORY, [0, compute_qubits]) + + trace.set_property(EVALUATION_TIME, evaluation_time) + trace.set_property(ALGORITHM_COMPUTE_QUBITS, compute_qubits) + trace.set_property(ALGORITHM_MEMORY_QUBITS, memory_qubits) + return trace + + +def trace_from_entry_expr_cached( + entry_expr: str | Callable | LogicalCounts, cache_path: Optional[Path], *args +) -> Trace: + """Convert a Q# entry expression into a Trace, with optional caching. + + If *cache_path* is provided and exists, the trace is loaded from disk. + Otherwise, the trace is computed via ``trace_from_entry_expr`` and + optionally written to *cache_path*. + + Args: + entry_expr (str | Callable | LogicalCounts): A Q# entry expression + string, a callable, or pre-computed logical counts. + cache_path (Optional[Path]): Path for reading/writing the cached + trace. If None, caching is disabled. + + Returns: + Trace: A trace representing the resource profile of the program. + """ + if cache_path and cache_path.exists(): + return Trace.from_json(cache_path.read_text()) + + trace = trace_from_entry_expr(entry_expr, *args) + + if cache_path: + cache_path.parent.mkdir(parents=True, exist_ok=True) + cache_path.write_text(trace.to_json()) + + return trace diff --git a/source/qdk_package/qdk/qre/models.py b/source/qdk_package/qdk/qre/models.py deleted file mode 100644 index 2477b79f1a..0000000000 --- a/source/qdk_package/qdk/qre/models.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -# flake8: noqa F403 -# pyright: ignore[reportWildcardImportFromLibrary] - -"""QRE hardware and QEC models. - -This module re-exports all public symbols from [qsharp.qre.models](:mod:`qsharp.qre.models`), -making them available under the ``qdk.qre.models`` namespace. It provides -classes representing hardware architectures, qubit models, and quantum error -correction schemes used in resource estimation. - -Requires the ``qre`` extra: ``pip install qdk[qre]``. - -Example: - - from qdk.qre.models import SurfaceCode -""" - -try: - # Re-export the top-level qsharp.qre.models names. - from qsharp.qre.models import * -except Exception as ex: - raise ImportError( - "qdk.qre.models requires the qre extras. Install with 'pip install \"qdk[qre]\"'." - ) from ex diff --git a/source/qdk_package/qdk/qre/models/__init__.py b/source/qdk_package/qdk/qre/models/__init__.py new file mode 100644 index 0000000000..3da76797ac --- /dev/null +++ b/source/qdk_package/qdk/qre/models/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from .factories import Litinski19Factory, MagicUpToClifford, RoundBasedFactory +from .qec import ( + SurfaceCode, + ThreeAux, + OneDimensionalYokedSurfaceCode, + TwoDimensionalYokedSurfaceCode, +) +from .qubits import GateBased, Majorana + +__all__ = [ + "GateBased", + "Litinski19Factory", + "Majorana", + "MagicUpToClifford", + "RoundBasedFactory", + "SurfaceCode", + "ThreeAux", + "OneDimensionalYokedSurfaceCode", + "TwoDimensionalYokedSurfaceCode", +] diff --git a/source/qdk_package/qdk/qre/models/factories/__init__.py b/source/qdk_package/qdk/qre/models/factories/__init__.py new file mode 100644 index 0000000000..e652dfc983 --- /dev/null +++ b/source/qdk_package/qdk/qre/models/factories/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from ._litinski import Litinski19Factory +from ._round_based import RoundBasedFactory +from ._utils import MagicUpToClifford + +__all__ = ["Litinski19Factory", "MagicUpToClifford", "RoundBasedFactory"] diff --git a/source/qdk_package/qdk/qre/models/factories/_litinski.py b/source/qdk_package/qdk/qre/models/factories/_litinski.py new file mode 100644 index 0000000000..ffe4b2558d --- /dev/null +++ b/source/qdk_package/qdk/qre/models/factories/_litinski.py @@ -0,0 +1,395 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations + +from dataclasses import dataclass +from math import ceil +from typing import Generator + +from ..._architecture import ISAContext +from ..._qre import ISARequirements, ConstraintBound, ISA +from ..._instruction import ISATransform, constraint, LOGICAL +from ...instruction_ids import T, CNOT, H, MEAS_Z, CCZ + + +@dataclass +class Litinski19Factory(ISATransform): + """ + T and CCZ factories based on the paper + [arXiv:1905.06903](https://arxiv.org/abs/1905.06903). + + It contains two categories of estimates. If the input T error rate is + similar to the Clifford error, it produces magic state instructions based on + Table 1 in the paper. If the input T error rate is at most 10 times higher + than the Clifford error rate, it produces magic state instructions based on + Table 2 in the paper. + + It requires Clifford error rates of at most 0.1% for CNOT, H, and MEAS_Z + instructions. If these instructions have different error rates, the maximum + error rate is assumed. + + References: + + - Daniel Litinski: Magic state distillation: not as costly as you think, + [arXiv:1905.06903](https://arxiv.org/abs/1905.06903) + """ + + def __post_init__(self): + self._initialize_entries() + + @staticmethod + def required_isa() -> ISARequirements: + return ISARequirements( + # T error rate may be at least 10x higher than Clifford error rates + constraint(T, error_rate=ConstraintBound.le(1e-2)), + constraint(H, error_rate=ConstraintBound.le(1e-3)), + constraint(CNOT, arity=2, error_rate=ConstraintBound.le(1e-3)), + constraint(MEAS_Z, error_rate=ConstraintBound.le(1e-3)), + ) + + def provided_isa( + self, impl_isa: ISA, ctx: ISAContext + ) -> Generator[ISA, None, None]: + """Yield ISAs with T and CCZ factory instructions. + + Args: + impl_isa (ISA): The implementation ISA providing physical gates. + ctx (ISAContext): The enumeration context. + + Yields: + ISA: An ISA containing distilled T and/or CCZ instructions. + """ + h = impl_isa[H] + cnot = impl_isa[CNOT] + meas_z = impl_isa[MEAS_Z] + t = impl_isa[T] + + clifford_error_rate = max( + h.expect_error_rate(), + cnot.expect_error_rate(), + meas_z.expect_error_rate(), + ) + + t_error_rate = t.expect_error_rate() + + entries_by_state = None + + if clifford_error_rate <= 1e-4: + if t_error_rate <= 1e-4: + entries_by_state = self._entries[1e-4][0] + elif t_error_rate <= 1e-3: + entries_by_state = self._entries[1e-4][1] + else: + # NOTE: This assertion is valid due to the constraint bound in the + # required_isa method + assert clifford_error_rate <= 1e-3 + if t_error_rate <= 1e-3: + entries_by_state = self._entries[1e-3][0] + elif t_error_rate <= 1e-2: + entries_by_state = self._entries[1e-3][1] + + if entries_by_state is None: + return + + t_entries = entries_by_state.get(T, []) + ccz_entries = entries_by_state.get(CCZ, []) + + syndrome_extraction_time = ( + 4 * impl_isa[CNOT].expect_time() + + impl_isa[H].expect_time() + + impl_isa[MEAS_Z].expect_time() + ) + + def make_node(entry: _Entry) -> int: + # Convert cycles (number of syndrome extraction cycles) to time + # based on fast surface code + time = ceil(syndrome_extraction_time * entry.cycles) + + # NOTE: If the protocol outputs multiple states, we assume that the + # space cost is divided by the number of output states. This is a + # simplification that allows us to fit all protocols in the ISA, but + # it may not be accurate for all protocols. + return ctx.add_instruction( + entry.state, + arity=3 if entry.state == CCZ else 1, + encoding=LOGICAL, + space=ceil(entry.space / entry.output_states), + time=time, + error_rate=entry.error_rate, + transform=self, + source=[cnot, h, meas_z, t], + ) + + # Yield combinations of T and CCZ entries + if ccz_entries: + for t_entry in t_entries: + for ccz_entry in ccz_entries: + yield ctx.make_isa( + make_node(t_entry), + make_node(ccz_entry), + ) + else: + # Table 2 scenarios: only T gates available + for t_entry in t_entries: + yield ctx.make_isa(make_node(t_entry)) + + def _initialize_entries(self): + """Initialize the distillation protocol lookup tables.""" + self._entries = { + # Assuming a Clifford error rate of at most 1e-4: + 1e-4: ( + # Assuming a T error rate of at most 1e-4 (Table 1): + { + T: [ + _Entry(_Protocol(15, 1, 7, 3, 3), 4.4e-8, 810, 18.1), + _Entry(_Protocol(15, 1, 9, 3, 3), 9.3e-10, 1_150, 18.1), + _Entry(_Protocol(15, 1, 11, 5, 5), 1.9e-11, 2_070, 30.0), + _Entry( + [ + (_Protocol(15, 1, 9, 3, 3), 4), + (_Protocol(20, 4, 15, 7, 9), 1), + ], + 2.4e-15, + 16_400, + 90.3, + ), + _Entry( + [ + (_Protocol(15, 1, 9, 3, 3), 4), + (_Protocol(15, 1, 25, 9, 9), 1), + ], + 6.3e-25, + 18_600, + 67.8, + ), + _Entry(_Protocol(15, 1, 9, 3, 3), 1.5e-9, 762, 36.2), + ], + CCZ: [ + _Entry( + [ + (_Protocol(15, 1, 7, 3, 3), 4), + (_Protocol(8, 1, 15, 7, 9, CCZ), 1), + ], + 7.2e-14, + 12_400, + 36.1, + ), + ], + }, + # Assuming a T error rate of at most 1e-3 (10x higher than Clifford, Table 2): + { + T: [ + _Entry(_Protocol(15, 1, 9, 3, 3), 2.1e-8, 1_150, 18.2), + _Entry( + [ + (_Protocol(15, 1, 7, 3, 3), 6), + (_Protocol(20, 4, 13, 5, 7), 1), + ], + 1.4e-12, + 13_200, + 70, + ), + _Entry( + [ + (_Protocol(15, 1, 9, 3, 3), 4), + (_Protocol(20, 4, 15, 7, 9), 1), + ], + 6.6e-15, + 16_400, + 91.2, + ), + _Entry( + [ + (_Protocol(15, 1, 9, 3, 3), 4), + (_Protocol(15, 1, 25, 9, 9), 1), + ], + 4.2e-22, + 18_600, + 68.4, + ), + ], + CCZ: [], + }, + ), + # Assuming a Clifford error rate of at most 1e-3: + 1e-3: ( + # Assuming a T error rate of at most 1e-3 (Table 1): + { + T: [ + _Entry(_Protocol(15, 1, 17, 7, 7), 4.5e-8, 4_620, 42.6), + _Entry( + [ + (_Protocol(15, 1, 13, 5, 5), 6), + (_Protocol(20, 4, 23, 11, 13), 1), + ], + 1.4e-10, + 43_300, + 130, + ), + _Entry( + [ + (_Protocol(15, 1, 13, 5, 5), 4), + (_Protocol(20, 4, 27, 13, 15), 1), + ], + 2.6e-11, + 46_800, + 157, + ), + _Entry( + [ + (_Protocol(15, 1, 11, 5, 5), 6), + (_Protocol(15, 1, 25, 11, 11), 1), + ], + 2.7e-12, + 30_700, + 82.5, + ), + _Entry( + [ + (_Protocol(15, 1, 13, 5, 5), 6), + (_Protocol(15, 1, 29, 11, 13), 1), + ], + 3.3e-14, + 39_100, + 97.5, + ), + _Entry( + [ + (_Protocol(15, 1, 15, 7, 7), 6), + (_Protocol(15, 1, 41, 17, 17), 1), + ], + 4.5e-20, + 73_400, + 128, + ), + ], + CCZ: [ + _Entry( + [ + (_Protocol(15, 1, 13, 7, 7), 6), + (_Protocol(8, 1, 25, 15, 15, CCZ), 1), + ], + 5.2e-11, + 47_000, + 60, + ), + ], + }, + # Assuming a T error rate of at most 1e-2 (10x higher than Clifford, Table 2): + { + T: [ + _Entry( + [ + (_Protocol(15, 1, 13, 5, 5), 6), + (_Protocol(20, 4, 21, 11, 13), 1), + ], + 5.7e-9, + 40_700, + 130, + ), + _Entry( + [ + (_Protocol(15, 1, 11, 5, 5), 6), + (_Protocol(15, 1, 21, 9, 11), 1), + ], + 2.1e-10, + 27_400, + 85.7, + ), + _Entry( + [ + (_Protocol(15, 1, 11, 5, 5), 6), + (_Protocol(15, 1, 23, 11, 11), 1), + ], + 2.5e-11, + 29_500, + 85.7, + ), + _Entry( + [ + (_Protocol(15, 1, 11, 5, 5), 6), + (_Protocol(15, 1, 25, 11, 11), 1), + ], + 6.4e-12, + 30_700, + 85.7, + ), + _Entry( + [ + (_Protocol(15, 1, 13, 7, 7), 8), + (_Protocol(15, 1, 29, 13, 13), 1), + ], + 1.5e-13, + 52_400, + 97.5, + ), + ], + CCZ: [], + }, + ), + } + + +@dataclass(frozen=True, slots=True) +class _Entry: + """A single distillation protocol entry from the Litinski tables. + + Attributes: + protocol (list[tuple[_Protocol, int]] | _Protocol): The distillation + protocol or pipeline of protocols. + error_rate (float): Output error rate of the protocol. + space (int): Space cost in physical qubits. + cycles (float): Number of syndrome extraction cycles. + """ + + protocol: list[tuple[_Protocol, int]] | _Protocol + error_rate: float + # Space estimation in number of physical qubits + space: int + # Number of code cycles to estimate time; a code cycle corresponds to + # measuring all surface-code check operators exactly once. + cycles: float + + @property + def output_states(self) -> int: + """Return the number of output magic states.""" + if isinstance(self.protocol, list): + return self.protocol[-1][0].output_states + else: + return self.protocol.output_states + + @property + def state(self) -> int: + """Return the magic state instruction ID (T or CCZ).""" + if isinstance(self.protocol, list): + return self.protocol[-1][0].state + else: + return self.protocol.state + + +@dataclass(frozen=True, slots=True) +class _Protocol: + """Parameters for a single distillation protocol. + + Attributes: + input_states (int): Number of input T states. + output_states (int): Number of output T states. + d_x (int): Spatial X distance. + d_z (int): Spatial Z distance. + d_m (int): Temporal distance. + state (int): Magic state instruction ID. Default is T. + """ + + # Number of input T states in protocol + input_states: int + # Number of output T states in protocol + output_states: int + # Spatial X distance (arXiv:1905.06903, Section 2) + d_x: int + # Spatial Z distance (arXiv:1905.06903, Section 2) + d_z: int + # Temporal distance (arXiv:1905.06903, Section 2) + d_m: int + # Magic state + state: int = T diff --git a/source/qdk_package/qdk/qre/models/factories/_round_based.py b/source/qdk_package/qdk/qre/models/factories/_round_based.py new file mode 100644 index 0000000000..982ce78cec --- /dev/null +++ b/source/qdk_package/qdk/qre/models/factories/_round_based.py @@ -0,0 +1,461 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations + +import copy +import hashlib +import logging +from dataclasses import dataclass, field +from itertools import combinations_with_replacement +from math import ceil +from pathlib import Path +from typing import Callable, Generator, Iterable, Optional, Sequence + +from ..._qre import ( + ISA, + InstructionFrontier, + ISARequirements, + Instruction, + _binom_ppf, + _ProvenanceGraph, +) +from ..._instruction import ( + LOGICAL, + PHYSICAL, + ISAQuery, + ISATransform, + constraint, +) +from ..._architecture import ISAContext +from ...instruction_ids import CNOT, LATTICE_SURGERY, T, MEAS_ZZ +from ..qec import SurfaceCode + + +logger = logging.getLogger(__name__) + + +@dataclass(frozen=True) +class RoundBasedFactory(ISATransform): + """ + A magic state factory that produces T gate instructions using round-based + distillation pipelines. + + This factory explores combinations of distillation units (such as "15-to-1 + RM prep" and "15-to-1 space efficient") to find optimal configurations that + minimize time and space while achieving target error rates. It supports + both physical-level distillation (when the input T gate is physically + encoded) and logical-level distillation (using lattice surgery via surface + codes). + + In order to account for the success probability of distillation rounds, the + factory models the pipeline using a failure probability requirement + (defaulting to 1%) that each round must meet. The number of distillation + units per round is adjusted to meet this requirement, which in turn affects + the overall space requirements. + + Space requirements are calculated using a user-provided function that + aggregates per-round space (e.g., sum or max). The ``sum`` function models + the case in which qubits are not reused across rounds, while the ``max`` + function models the case in which qubits are reused across rounds. + + For the enumeration of logical-level distillation units, the factory relies + on a user-provided ``ISAQuery`` (defaulting to ``SurfaceCode.q()``) to explore + different surface code configurations and their corresponding lattice + surgery instructions. These need to be provided by the user and cannot + automatically be derived from the provided implementation ISA, as they can + only contain a subset of the required instructions. The user needs to + ensure that the provided query matches the architecture for which this + factory is being used. + + Results are cached to disk for efficiency. + + Attributes: + code_query: ISAQuery + Query to enumerate QEC codes for logical distillation units. + Defaults to SurfaceCode.q(). + physical_qubit_calculation: Callable[[Iterable], int] + Function to calculate total physical qubits from per-round space + requirements, e.g., sum or max. Defaults to sum. + cache_dir: Path + Directory for caching computed factory configurations. Defaults to + ~/.cache/re3/round_based. + use_cache: bool + Whether to use cached results. Defaults to True. + + References: + + - Sergei Bravyi, Alexei Kitaev: Universal Quantum Computation with ideal + Clifford gates and noisy ancillas, + [arXiv:quant-ph/0403025](https://arxiv.org/abs/quant-ph/0403025) + - Michael E. Beverland, Prakash Murali, Matthias Troyer, Krysta M. Svore, + Torsten Hoefler, Vadym Kliuchnikov, Guang Hao Low, Mathias Soeken, Aarthi + Sundaram, Alexander Vaschillo: Assessing requirements to scale to + practical quantum advantage, + [arXiv:2211.07629](https://arxiv.org/pdf/2211.07629) + """ + + code_query: ISAQuery = field(default_factory=lambda: SurfaceCode.q()) + physical_qubit_calculation: Callable[[Iterable], int] = field(default=sum) + # optional: make cache directory configurable + cache_dir: Path = field( + default=Path.home() / ".cache" / "re3" / "round_based", repr=False + ) + use_cache: bool = field(default=True, repr=False) + + @staticmethod + def required_isa() -> ISARequirements: + # NOTE: A T gate is required, but a CNOT is only required to explore + # physical units. + return ISARequirements( + constraint(T), + ) + + def provided_isa( + self, impl_isa: ISA, ctx: ISAContext + ) -> Generator[ISA, None, None]: + cache_path = self._cache_path(impl_isa) + + # 1) Try to load from cache + if self.use_cache and cache_path.exists(): + cached_states = InstructionFrontier.load(str(cache_path)) + for state in cached_states: + yield ctx.make_isa( + ctx.add_instruction(state, transform=self, source=[impl_isa[T]]) + ) + return + + # 2) Compute as before + t_gate_error = impl_isa[T].expect_error_rate() + + units: list[_DistillationUnit] = [] + initial_unit = [] + + # Physical units? + if impl_isa[T].encoding == PHYSICAL: + clifford_gate = impl_isa.get(CNOT) or impl_isa.get(MEAS_ZZ) + if clifford_gate is None: + raise ValueError( + "CNOT or MEAS_ZZ instruction is required for physical units" + ) + + gate_time = clifford_gate.expect_time() + clifford_error = clifford_gate.expect_error_rate() + units.extend(self._physical_units(gate_time, clifford_error)) + else: + initial_unit.append( + _DistillationUnit( + 1, + impl_isa[T].expect_time(), + impl_isa[T].expect_space(), + [1, 0], + [0], + ) + ) + + # create a fresh inner context of the given one + inner_ctx = copy.copy(ctx) + inner_ctx._provenance = _ProvenanceGraph() + for code_isa in self.code_query.enumerate(inner_ctx): + units.extend(self._logical_units(code_isa[LATTICE_SURGERY])) + + optimal_states = InstructionFrontier() + + for r in range(1, 4 - len(initial_unit)): + for k in combinations_with_replacement(units, r): + pipeline = _Pipeline.try_create( + initial_unit + list(k), + t_gate_error, + physical_qubit_calculation=self.physical_qubit_calculation, + ) + if pipeline is not None: + state = self._state_from_pipeline(pipeline) + optimal_states.insert(state) + logger.debug(f"Optimal states after {r} rounds: {len(optimal_states)}") + + # 3) Save to cache, then yield + if self.use_cache: + optimal_states.dump(str(cache_path)) + + for state in optimal_states: + yield ctx.make_isa( + ctx.add_instruction(state, transform=self, source=[impl_isa[T]]) + ) + + def _physical_units(self, gate_time, clifford_error) -> list[_DistillationUnit]: + """Return physical distillation units for the given gate parameters.""" + return [ + _DistillationUnit( + num_input_states=15, + time=24 * gate_time, + space=31, + error_rate_coeffs=[35, 0.0, 0.0, 7.1 * clifford_error], + failure_probability_coeffs=[15, 356 * clifford_error], + name="15-to-1 RM prep", + ), + _DistillationUnit( + num_input_states=15, + time=45 * gate_time, + space=12, + error_rate_coeffs=[35, 0.0, 0.0, 7.1 * clifford_error], + failure_probability_coeffs=[15, 356 * clifford_error], + name="15-to-1 space efficient", + ), + ] + + def _logical_units( + self, lattice_surgery_instruction: Instruction + ) -> list[_DistillationUnit]: + """Return logical distillation units derived from a lattice surgery instruction.""" + logical_cycle_time = lattice_surgery_instruction.expect_time(1) + logical_error = lattice_surgery_instruction.expect_error_rate(1) + + return [ + _DistillationUnit( + num_input_states=15, + time=11 * logical_cycle_time, + space=lattice_surgery_instruction.expect_space(31), + error_rate_coeffs=[35, 0.0, 0.0, 7.1 * logical_error], + failure_probability_coeffs=[15, 356 * logical_error], + name="15-to-1 RM prep", + ), + _DistillationUnit( + num_input_states=15, + time=13 * logical_cycle_time, + space=lattice_surgery_instruction.expect_space(20), + error_rate_coeffs=[35, 0.0, 0.0, 7.1 * logical_error], + failure_probability_coeffs=[15, 356 * logical_error], + name="15-to-1 space efficient", + ), + ] + + def _state_from_pipeline(self, pipeline: _Pipeline) -> Instruction: + """Create a T-gate instruction from a distillation pipeline.""" + return Instruction.fixed_arity( + T, + int(LOGICAL), + 1, + pipeline.time, + pipeline.space, + None, + pipeline.error_rate, + ) + + def _cache_key(self, impl_isa: ISA) -> str: + """Build a deterministic key from factory configuration and impl_isa.""" + parts = [ + f"factory={type(self).__qualname__}", + f"code_query={repr(self.code_query)}", + f"physical_qubit_calculation={self.physical_qubit_calculation.__name__}", + ] + + # Include full instruction details, sorted by id for determinism + for instr in sorted(impl_isa, key=lambda i: i.id): + parts.append( + f"id={instr.id}|encoding={instr.encoding}|arity={instr.arity}" + f"|time={instr.time()}|space={instr.space()}" + f"|error_rate={instr.error_rate()}" + ) + + data = "\n".join(parts).encode("utf-8") + return hashlib.sha256(data).hexdigest() + + def _cache_path(self, impl_isa: ISA) -> Path: + """Return the cache file path for the given implementation ISA.""" + self.cache_dir.mkdir(parents=True, exist_ok=True) + return self.cache_dir / f"{self._cache_key(impl_isa)}.json" + + +class _Pipeline: + """A multi-round distillation pipeline.""" + + def __init__( + self, + units: Sequence[_DistillationUnit], + initial_input_error_rate: float, + *, + failure_probability_requirement: float = 0.01, + physical_qubit_calculation: Callable[[Iterable], int] = sum, + ): + self.failure_probability_requirement = failure_probability_requirement + self.rounds: list["_DistillationRound"] = [] + self.output_error_rate: float = initial_input_error_rate + self.physical_qubit_calculation = physical_qubit_calculation + + self._add_rounds(units) + + @classmethod + def try_create( + cls, + units: Sequence[_DistillationUnit], + initial_input_error_rate: float, + *, + failure_probability_requirement: float = 0.01, + physical_qubit_calculation: Callable[[Iterable], int] = sum, + ) -> Optional[_Pipeline]: + """Create a pipeline if the configuration is feasible. + + Returns: + Optional[_Pipeline]: The pipeline, or None if the required + number of units per round is infeasible. + """ + pipeline = cls( + units, + initial_input_error_rate, + failure_probability_requirement=failure_probability_requirement, + physical_qubit_calculation=physical_qubit_calculation, + ) + if not pipeline._compute_units_per_round(): + return None + return pipeline + + def _compute_units_per_round(self) -> bool: + """Adjust the number of units per round to meet output requirements.""" + if len(self.rounds) > 0: + states_needed_next = self.rounds[-1].unit.num_output_states + + for dist_round in reversed(self.rounds): + if not dist_round.adjust_num_units_to(states_needed_next): + return False + states_needed_next = dist_round.num_input_states + + return True + + def _add_rounds(self, units: Sequence[_DistillationUnit]): + """Append distillation rounds from the given units.""" + per_round_failure_prob_req = self.failure_probability_requirement / len(units) + + for unit in units: + self.rounds.append( + _DistillationRound( + unit, + per_round_failure_prob_req, + self.output_error_rate, + ) + ) + self.output_error_rate = unit.error_rate(self.output_error_rate) + + @property + def space(self) -> int: + """Total physical-qubit space of the pipeline.""" + return self.physical_qubit_calculation(round.space for round in self.rounds) + + @property + def time(self) -> int: + """Total time of the pipeline in nanoseconds.""" + return sum(round.unit.time for round in self.rounds) + + @property + def error_rate(self) -> float: + """Output error rate of the pipeline.""" + return self.output_error_rate + + @property + def num_output_states(self) -> int: + """Number of output magic states produced by the pipeline.""" + return self.rounds[-1].compute_num_output_states() + + +@dataclass(slots=True) +class _DistillationUnit: + """A single distillation unit with fixed input/output characteristics.""" + + num_input_states: int + time: int + space: int + error_rate_coeffs: Sequence[float] + failure_probability_coeffs: Sequence[float] + name: Optional[str] = None + num_output_states: int = 1 + + def error_rate(self, input_error_rate: float) -> float: + """Compute the output error rate for a given input error rate.""" + result = 0.0 + for c in self.error_rate_coeffs: + result = result * input_error_rate + c + return result + + def failure_probability(self, input_error_rate: float) -> float: + """Compute the failure probability for a given input error rate.""" + result = 0.0 + for c in self.failure_probability_coeffs: + result = result * input_error_rate + c + return result + + +@dataclass(slots=True) +class _DistillationRound: + """A single round in a distillation pipeline.""" + + unit: _DistillationUnit + failure_probability_requirement: float + input_error_rate: float + num_units: int = 1 + failure_probability: float = field(init=False) + + def __post_init__(self): + self.failure_probability = self.unit.failure_probability(self.input_error_rate) + + def adjust_num_units_to(self, output_states_needed_next: int) -> bool: + """Adjust the number of units to produce at least the required output states.""" + if self.failure_probability == 0.0: + self.num_units = output_states_needed_next + return True + + # Binary search to find the minimal number of units needed + self.num_units = ceil(output_states_needed_next / self.max_num_output_states) + + while True: + num_output_states = self.compute_num_output_states() + if num_output_states < output_states_needed_next: + self.num_units *= 2 + + # Distillation round requires unreasonably high number of units + if self.num_units >= 1_000_000_000_000_000: + return False + else: + break + + upper = self.num_units + lower = self.num_units // 2 + while lower < upper: + self.num_units = (lower + upper) // 2 + num_output_states = self.compute_num_output_states() + if num_output_states >= output_states_needed_next: + upper = self.num_units + else: + lower = self.num_units + 1 + self.num_units = upper + + return True + + @property + def space(self) -> int: + """Total physical-qubit space for this round.""" + return self.num_units * self.unit.space + + @property + def num_input_states(self) -> int: + """Total number of input states consumed by this round.""" + return self.num_units * self.unit.num_input_states + + @property + def max_num_output_states(self) -> int: + """Maximum number of output states this round can produce.""" + return self.num_units * self.unit.num_output_states + + def compute_num_output_states(self) -> int: + """Compute the expected number of output states accounting for failure probability.""" + failure_prob = self.failure_probability + + if failure_prob <= 1e-8: + return self.num_units * self.unit.num_output_states + + # A replacement for SciPy's binom.ppf that is faster + k = _binom_ppf( + self.failure_probability_requirement, + self.num_units, + 1.0 - failure_prob, + ) + + return int(k) * self.unit.num_output_states diff --git a/source/qdk_package/qdk/qre/models/factories/_utils.py b/source/qdk_package/qdk/qre/models/factories/_utils.py new file mode 100644 index 0000000000..0fbec26ed7 --- /dev/null +++ b/source/qdk_package/qdk/qre/models/factories/_utils.py @@ -0,0 +1,90 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from typing import Generator + +from ..._architecture import ISAContext +from ..._qre import ISARequirements, ISA +from ..._instruction import ISATransform +from ...instruction_ids import ( + SQRT_SQRT_X, + SQRT_SQRT_X_DAG, + SQRT_SQRT_Y, + SQRT_SQRT_Y_DAG, + SQRT_SQRT_Z, + SQRT_SQRT_Z_DAG, + CCX, + CCY, + CCZ, +) + + +class MagicUpToClifford(ISATransform): + """ + An ISA transform that adds Clifford equivalent representations of magic + states. For example, if the input ISA contains a T gate, the provided ISA + will also contain ``SQRT_SQRT_X``, ``SQRT_SQRT_X_DAG``, ``SQRT_SQRT_Y``, + ``SQRT_SQRT_Y_DAG``, and ``T_DAG``. The same is applied for ``CCZ`` gates and + their Clifford equivalents. + + Example: + + .. code-block:: python + app = SomeApplication() + arch = SomeArchitecture() + + # This will contain CCX states + trace_query = PSSPC.q(ccx_magic_states=True) * LatticeSurgery.q() + + # This will contain CCZ states + isa_query = SurfaceCode.q() * Litinski19Factory.q() + + # There will be no results from the estimation because there is no + # instruction to support CCX magic states in the query + results = estimate(app, arch, isa_query, trace_query) + assert len(results) == 0 + + # We solve this by wrapping the Litinski19Factory with the + # MagicUpToClifford transform, which transforms the CCZ states in the + # provided ISA into CCX states. + isa_query = SurfaceCode.q() * MagicUpToClifford.q(source=Litinski19Factory.q()) + + # Now we will get results + results = estimate(app, arch, isa_query, trace_query) + assert len(results) != 0 + """ + + @staticmethod + def required_isa() -> ISARequirements: + return ISARequirements() + + def provided_isa(self, impl_isa, ctx: ISAContext) -> Generator[ISA, None, None]: + # Families of equivalent gates under Clifford conjugation. + families = [ + [ + SQRT_SQRT_X, + SQRT_SQRT_X_DAG, + SQRT_SQRT_Y, + SQRT_SQRT_Y_DAG, + SQRT_SQRT_Z, + SQRT_SQRT_Z_DAG, + ], + [CCX, CCY, CCZ], + ] + + # For each family, if any member of the family is present in the input ISA, add all members of the family to the provided ISA. + for family in families: + for id in family: + if id in impl_isa: + instr = impl_isa[id] + for equivalent_id in family: + if equivalent_id != id: + node_idx = ctx.add_instruction( + instr.with_id(equivalent_id), + transform=self, + source=[instr], + ) + impl_isa.add_node(equivalent_id, node_idx) + break # Check next family + + yield impl_isa diff --git a/source/qdk_package/qdk/qre/models/qec/__init__.py b/source/qdk_package/qdk/qre/models/qec/__init__.py new file mode 100644 index 0000000000..4e4cf816f7 --- /dev/null +++ b/source/qdk_package/qdk/qre/models/qec/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from ._surface_code import SurfaceCode +from ._three_aux import ThreeAux +from ._yoked import OneDimensionalYokedSurfaceCode, TwoDimensionalYokedSurfaceCode + +__all__ = [ + "SurfaceCode", + "ThreeAux", + "OneDimensionalYokedSurfaceCode", + "TwoDimensionalYokedSurfaceCode", +] diff --git a/source/qdk_package/qdk/qre/models/qec/_surface_code.py b/source/qdk_package/qdk/qre/models/qec/_surface_code.py new file mode 100644 index 0000000000..079187635a --- /dev/null +++ b/source/qdk_package/qdk/qre/models/qec/_surface_code.py @@ -0,0 +1,154 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations +from dataclasses import KW_ONLY, dataclass, field +from typing import Generator, Optional +from ..._instruction import ( + ISA, + ISARequirements, + ISATransform, + constraint, + ConstraintBound, + LOGICAL, +) +from ..._isa_enumeration import ISAContext +from ..._qre import linear_function +from ...instruction_ids import CNOT, H, LATTICE_SURGERY, MEAS_Z +from ...property_keys import ( + SURFACE_CODE_ONE_QUBIT_TIME_FACTOR, + SURFACE_CODE_TWO_QUBIT_TIME_FACTOR, +) + + +@dataclass +class SurfaceCode(ISATransform): + """ + This class models the gate-based rotated surface code. + + Attributes: + crossing_prefactor: float + The prefactor for logical error rate due to error correction + crossings. (Default is 0.03, see Eq. (11) in + [arXiv:1208.0928](https://arxiv.org/abs/1208.0928)) + error_correction_threshold: float + The error correction threshold for the surface code. (Default is + 0.01 (1%), see [arXiv:1009.3686](https://arxiv.org/abs/1009.3686)) + one_qubit_gate_depth: int + The depth of one-qubit gates in each syndrome extraction cycle. + (Default is 1, see Fig. 2 in [arXiv:1009.3686](https://arxiv.org/abs/1009.3686)) + two_qubit_gate_depth: int + The depth of two-qubit gates in each syndrome extraction cycle. + (Default is 4, see Fig. 2 in [arXiv:1009.3686](https://arxiv.org/abs/1009.3686)) + code_cycle_override: Optional[int] + If provided, this value will be used as the time for each syndrome + extraction cycle instead of the default calculation based on gate + times and depths. (Default is None) + code_cycle_offset: int + An additional time offset to add to the syndrome extraction cycle + time. (Default is 0) + + Hyper parameters: + distance: int + The code distance of the surface code. + + References: + + - Dominic Horsman, Austin G. Fowler, Simon Devitt, Rodney Van Meter: Surface + code quantum computing by lattice surgery, + [arXiv:1111.4022](https://arxiv.org/abs/1111.4022) + - Austin G. Fowler, Matteo Mariantoni, John M. Martinis, Andrew N. Cleland: + Surface codes: Towards practical large-scale quantum computation, + [arXiv:1208.0928](https://arxiv.org/abs/1208.0928) + - David S. Wang, Austin G. Fowler, Lloyd C. L. Hollenberg: Quantum computing + with nearest neighbor interactions and error rates over 1%, + [arXiv:1009.3686](https://arxiv.org/abs/1009.3686) + """ + + crossing_prefactor: float = 0.03 + error_correction_threshold: float = 0.01 + one_qubit_gate_depth: int = 1 + two_qubit_gate_depth: int = 4 + code_cycle_override: Optional[int] = None + code_cycle_offset: int = 0 + _: KW_ONLY + distance: int = field(default=3, metadata={"domain": range(3, 26, 2)}) + + @staticmethod + def required_isa() -> ISARequirements: + return ISARequirements( + constraint(H, error_rate=ConstraintBound.lt(0.01)), + constraint(CNOT, arity=2, error_rate=ConstraintBound.lt(0.01)), + constraint(MEAS_Z, error_rate=ConstraintBound.lt(0.01)), + ) + + def provided_isa( + self, impl_isa: ISA, ctx: ISAContext + ) -> Generator[ISA, None, None]: + cnot = impl_isa[CNOT] + h = impl_isa[H] + meas_z = impl_isa[MEAS_Z] + + cnot_time = cnot.expect_time() + h_time = h.expect_time() + meas_time = meas_z.expect_time() + + physical_error_rate = max( + cnot.expect_error_rate(), + h.expect_error_rate(), + meas_z.expect_error_rate(), + ) + + # There are d^2 data qubits and (d^2 - 1) ancilla qubits in the rotated + # surface code. (See Section 7.1 in arXiv:1111.4022) + space_formula = linear_function(2 * self.distance**2 - 1) + + # Each syndrome extraction cycle consists of ancilla preparation, 4 + # rounds of CNOTs, and measurement. (See Fig. 2 in arXiv:1009.3686); + # these may be modified by the one_qubit_gate_depth and + # two_qubit_gate_depth parameters, or scaled by the time factors + # provided in the instruction properties. The syndrome extraction cycle + # is repeated d times for a distance-d code. + one_qubit_gate_depth = self.one_qubit_gate_depth * h.get_property_or( + SURFACE_CODE_ONE_QUBIT_TIME_FACTOR, 1 + ) + two_qubit_gate_depth = self.two_qubit_gate_depth * cnot.get_property_or( + SURFACE_CODE_TWO_QUBIT_TIME_FACTOR, 1 + ) + + if self.code_cycle_override is not None: + code_cycle_time = self.code_cycle_override + else: + code_cycle_time = ( + one_qubit_gate_depth * h_time + + two_qubit_gate_depth * cnot_time + + meas_time + ) + code_cycle_time += self.code_cycle_offset + time_value = code_cycle_time * self.distance + + # See Eqs. (10) and (11) in arXiv:1208.0928 + error_formula = linear_function( + self.crossing_prefactor + * ( + (physical_error_rate / self.error_correction_threshold) + ** ((self.distance + 1) // 2) + ) + ) + + # We provide a generic lattice surgery instruction (See Section 3 in + # arXiv:1111.4022) + yield ctx.make_isa( + ctx.add_instruction( + LATTICE_SURGERY, + encoding=LOGICAL, + arity=None, + space=space_formula, + time=time_value, + error_rate=error_formula, + transform=self, + source=[cnot, h, meas_z], + distance=self.distance, + code_cycle_time=code_cycle_time, + ), + ) diff --git a/source/qdk_package/qdk/qre/models/qec/_three_aux.py b/source/qdk_package/qdk/qre/models/qec/_three_aux.py new file mode 100644 index 0000000000..80136e4126 --- /dev/null +++ b/source/qdk_package/qdk/qre/models/qec/_three_aux.py @@ -0,0 +1,120 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from __future__ import annotations + +from dataclasses import KW_ONLY, dataclass, field +from typing import Generator + +from ..._architecture import ISAContext +from ..._instruction import ( + LOGICAL, + ISATransform, + constraint, +) +from ..._qre import ( + ISA, + ISARequirements, + linear_function, +) +from ...instruction_ids import ( + LATTICE_SURGERY, + MEAS_X, + MEAS_XX, + MEAS_Z, + MEAS_ZZ, +) + + +@dataclass +class ThreeAux(ISATransform): + """ + This class models the pairwise measurement-based surface code with three + auxiliary qubits per stabilizer measurement. + + Hyper parameters: + distance: int + The code distance of the surface code. + single_rail: bool + Whether to use single-rail encoding. + + References: + + - Linnea Grans-Samuelsson, Ryan V. Mishmash, David Aasen, Christina Knapp, + Bela Bauer, Brad Lackey, Marcus P. da Silva, Parsa Bonderson: Improved + Pairwise Measurement-Based Surface Code, + [arXiv:2310.12981](https://arxiv.org/abs/2310.12981) + """ + + _: KW_ONLY + distance: int = field(default=3, metadata={"domain": range(3, 26, 2)}) + single_rail: bool = field(default=False) + + @staticmethod + def required_isa() -> ISARequirements: + return ISARequirements( + constraint(MEAS_X), + constraint(MEAS_Z), + constraint(MEAS_XX, arity=2), + constraint(MEAS_ZZ, arity=2), + ) + + def provided_isa( + self, impl_isa: ISA, ctx: ISAContext + ) -> Generator[ISA, None, None]: + meas_x = impl_isa[MEAS_X] + meas_z = impl_isa[MEAS_Z] + meas_xx = impl_isa[MEAS_XX] + meas_zz = impl_isa[MEAS_ZZ] + + gate_time = max(meas_xx.expect_time(), meas_zz.expect_time()) + + physical_error_rate = max( + meas_x.expect_error_rate(), + meas_z.expect_error_rate(), + meas_xx.expect_error_rate(), + meas_zz.expect_error_rate(), + ) + + # See arXiv:2310.12981, Table 1 and Figs. 2, 3, 4, 6, and 7 + depth = 5 if self.single_rail else 4 + + # See arXiv:2310.12981, Table 1 + error_correction_threshold = 0.0051 if self.single_rail else 0.0066 + + # See arXiv:2310.12981, Fig. 23 + crossing_prefactor = 0.05 + + # d^2 data qubits and 3 qubits for each of the d^2 - 1 stabilizer + # measurements + space_formula = linear_function(4 * self.distance**2 - 3) + + # The measurement circuits do not overlap perfectly, so there is an + # additional 4 steps that need to be accounted for independent of the + # distance (see Section 2 between Eqs. (2) and (3) in arXiv:2310.12981) + time_value = gate_time * (depth * self.distance + 4) + + # Typical fitting curve for surface code logical error (see + # arXiv:1208.0928) + error_formula = linear_function( + crossing_prefactor + * ( + (physical_error_rate / error_correction_threshold) + ** ((self.distance + 1) // 2) + ) + ) + + yield ctx.make_isa( + ctx.add_instruction( + LATTICE_SURGERY, + encoding=LOGICAL, + arity=None, + space=space_formula, + time=time_value, + error_rate=error_formula, + transform=self, + source=[meas_x, meas_z, meas_xx, meas_zz], + distance=self.distance, + code_cycle_time=gate_time * depth * self.distance, + ) + ) diff --git a/source/qdk_package/qdk/qre/models/qec/_yoked.py b/source/qdk_package/qdk/qre/models/qec/_yoked.py new file mode 100644 index 0000000000..9cb1b26527 --- /dev/null +++ b/source/qdk_package/qdk/qre/models/qec/_yoked.py @@ -0,0 +1,243 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from dataclasses import dataclass +from math import ceil +from typing import Generator + +from ..._instruction import ISATransform, constraint, LOGICAL +from ..._qre import ISA, ISARequirements, generic_function +from ..._architecture import ISAContext +from ...instruction_ids import LATTICE_SURGERY, MEMORY +from ...property_keys import DISTANCE + + +@dataclass +class OneDimensionalYokedSurfaceCode(ISATransform): + """ + This class models the Yoked surface code to provide a generic memory + instruction based on lattice surgery instructions from a surface code like + error correction code. + + Attributes: + crossing_prefactor: float + The prefactor for logical error rate (Default is 0.016) + error_correction_threshold: float + The error correction threshold for the surface code (Default is + 0.064) + + Hyper parameters: + shape_heuristic: ShapeHeuristic + The heuristic to determine the shape of the surface code patch for a + given number of logical qubits. (Default is ShapeHeuristic.MIN_AREA) + + References: + + - Craig Gidney, Michael Newman, Peter Brooks, Cody Jones: Yoked surface + codes, [arXiv:2312.04522](https://arxiv.org/abs/2312.04522) + """ + + # NOTE: The crossing_prefactor is relative to that of the underlying surface + # code. That is if the surface code model is p(SC) = + # A*(p(phy)/th(SC))^((d+1)/2), then multiplier for its yoked extension is + # crossing_prefactor*A + crossing_prefactor: float = 8 / 15 + + # NOTE: The threshold is relative to that of the underlying surface code. + # Namely, as the yoking doubles the distance, one would expect the yoked + # surface code to have a threshold of sqrt(th(SC)). However modeling shows + # it falls short of this. + error_correction_threshold: float = 64 / 10 + + @staticmethod + def required_isa() -> ISARequirements: + # We require a lattice surgery instruction that also provides the code + # distance as a property. This is necessary to compute the time + # and error rate formulas for the provided memory instruction. + return ISARequirements( + constraint(LATTICE_SURGERY, LOGICAL, arity=None, distance=True), + ) + + def provided_isa( + self, impl_isa: ISA, ctx: ISAContext + ) -> Generator[ISA, None, None]: + lattice_surgery = impl_isa[LATTICE_SURGERY] + distance = lattice_surgery.get_property(DISTANCE) + assert distance is not None + + def space(arity: int) -> int: + a, b = self._min_area_shape(arity) + return lattice_surgery.expect_space(a * b) + + space_fn = generic_function(space) + + def time(arity: int) -> int: + a, b = self._min_area_shape(arity) + s = lattice_surgery.expect_time(a * b) + return s * (8 * distance * (a - 1) + 2 * distance) + + time_fn = generic_function(time) + + def error_rate(arity: int) -> float: + a, b = self._min_area_shape(arity) + rounds = 2 * (a - 2) + # logical error rate on a single surface code patch + p = lattice_surgery.expect_error_rate(1) + return ( + rounds**2 + * (a * b) ** 2 + * self.crossing_prefactor + * p + * (1 / self.error_correction_threshold) ** ((distance + 1) // 2) + ) + + error_rate_fn = generic_function(error_rate) + + yield ctx.make_isa( + ctx.add_instruction( + MEMORY, + arity=None, + encoding=LOGICAL, + space=space_fn, + time=time_fn, + error_rate=error_rate_fn, + transform=self, + source=[lattice_surgery], + distance=distance, + ) + ) + + @staticmethod + def _min_area_shape(num_qubits: int) -> tuple[int, int]: + """ + Given a number of qubits num_qubits, returns numbers (a + 1) and (b + 2) + such that a * b >= num_qubits and a * b is as small as possible. + """ + + best_a = None + best_b = None + best_qubits = num_qubits**2 + + for a in range(1, num_qubits): + # Compute required number of columns to reach the required number + # of logical qubits + b = ceil(num_qubits / a) + + qubits = (a + 1) * (b + 2) + if qubits < best_qubits: + best_qubits = qubits + best_a = a + best_b = b + + assert best_a is not None + assert best_b is not None + return best_a + 1, best_b + 2 + + +@dataclass +class TwoDimensionalYokedSurfaceCode(ISATransform): + """ + This class models the Yoked surface code to provide a generic memory + instruction based on lattice surgery instructions from a surface code like + error correction code. + + Attributes: + crossing_prefactor: float + The prefactor for logical error rate (Default is 0.016) + error_correction_threshold: float + The error correction threshold for the surface code (Default is + 0.064) + + Hyper parameters: + shape_heuristic: ShapeHeuristic + The heuristic to determine the shape of the surface code patch for a + given number of logical qubits. (Default is ShapeHeuristic.MIN_AREA) + + References: + + - Craig Gidney, Michael Newman, Peter Brooks, Cody Jones: Yoked surface + codes, [arXiv:2312.04522](https://arxiv.org/abs/2312.04522) + """ + + # NOTE: The crossing_prefactor is relative to that of the underlying surface + # code. That is if the surface code model is p(SC) = + # A*(p(phy)/th(SC))^((d+1)/2), then multiplier for its yoked extension is + # crossing_prefactor*A + crossing_prefactor: float = 5 / 600 + + # NOTE: The threshold is relative to that of the underlying surface code. + # Namely, as the yoking doubles the distance, one would expect the yoked + # surface code to have a threshold of sqrt(th(SC)). However modeling shows + # it falls short of this. + error_correction_threshold: float = 2500 / 10 + + @staticmethod + def required_isa() -> ISARequirements: + # We require a lattice surgery instruction that also provides the code + # distance as a property. This is necessary to compute the time + # and error rate formulas for the provided memory instruction. + return ISARequirements( + constraint(LATTICE_SURGERY, LOGICAL, arity=None, distance=True), + ) + + def provided_isa( + self, impl_isa: ISA, ctx: ISAContext + ) -> Generator[ISA, None, None]: + lattice_surgery = impl_isa[LATTICE_SURGERY] + distance = lattice_surgery.get_property(DISTANCE) + assert distance is not None + + def space(arity: int) -> int: + a, b = self._square_shape(arity) + return lattice_surgery.expect_space(a * b) + + space_fn = generic_function(space) + + def time(arity: int) -> int: + a, b = self._square_shape(arity) + s = lattice_surgery.expect_time(a * b) + return s * (8 * distance * max(a - 2, b - 2) + 2 * distance) + + time_fn = generic_function(time) + + def error_rate(arity: int) -> float: + a, b = self._square_shape(arity) + rounds = 2 * max(a - 3, b - 3) + # logical error rate on a single surface code patch + p = lattice_surgery.expect_error_rate(1) + return ( + rounds**4 + * (a * b) ** 2 + * self.crossing_prefactor + * p + * (1 / self.error_correction_threshold) ** ((distance + 1) // 2) + ) + + error_rate_fn = generic_function(error_rate) + + yield ctx.make_isa( + ctx.add_instruction( + MEMORY, + arity=None, + encoding=LOGICAL, + space=space_fn, + time=time_fn, + error_rate=error_rate_fn, + transform=self, + source=[lattice_surgery], + distance=distance, + ) + ) + + @staticmethod + def _square_shape(num_qubits: int) -> tuple[int, int]: + """ + Given a number of qubits num_qubits, returns numbers (a + 2) and (b + 2) + such that a * b >= num_qubits and a and b are as close as possible. + """ + + a = int(num_qubits**0.5) + while num_qubits % a != 0: + a -= 1 + b = num_qubits // a + return a + 2, b + 2 diff --git a/source/qdk_package/qdk/qre/models/qubits/__init__.py b/source/qdk_package/qdk/qre/models/qubits/__init__.py new file mode 100644 index 0000000000..ab7887faf3 --- /dev/null +++ b/source/qdk_package/qdk/qre/models/qubits/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from ._gate_based import GateBased +from ._msft import Majorana + +__all__ = ["GateBased", "Majorana"] diff --git a/source/qdk_package/qdk/qre/models/qubits/_gate_based.py b/source/qdk_package/qdk/qre/models/qubits/_gate_based.py new file mode 100644 index 0000000000..d9ee589485 --- /dev/null +++ b/source/qdk_package/qdk/qre/models/qubits/_gate_based.py @@ -0,0 +1,139 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from dataclasses import KW_ONLY, dataclass, field +from typing import Optional + +from ..._architecture import Architecture, ISAContext +from ..._instruction import ISA, Encoding +from ...instruction_ids import ( + CNOT, + CZ, + MEAS_X, + MEAS_Y, + MEAS_Z, + PAULI_I, + PAULI_X, + PAULI_Y, + PAULI_Z, + RX, + RY, + RZ, + S_DAG, + SQRT_X, + SQRT_X_DAG, + SQRT_Y, + SQRT_Y_DAG, + SQRT_SQRT_X, + SQRT_SQRT_X_DAG, + SQRT_SQRT_Y, + SQRT_SQRT_Y_DAG, + T_DAG, + H, + S, + T, +) + + +@dataclass +class GateBased(Architecture): + """ + A generic gate-based architecture. The error rate can be set arbitrarily + and is either 1e-3 or 1e-4 in the reference. + + Args: + error_rate: The error rate for all gates. Defaults to 1e-4. + gate_time: The time (in ns) for single-qubit gates. + measurement_time: The time (in ns) for measurement operations. + two_qubit_gate_time: The time (in ns) for two-qubit gates (CNOT, CZ). + If not provided, defaults to the value of ``gate_time``. + + References: + + - Michael E. Beverland, Prakash Murali, Matthias Troyer, Krysta M. Svore, + Torsten Hoefler, Vadym Kliuchnikov, Guang Hao Low, Mathias Soeken, Aarthi + Sundaram, Alexander Vaschillo: Assessing requirements to scale to + practical quantum advantage, + [arXiv:2211.07629](https://arxiv.org/abs/2211.07629) + - Jens Koch, Terri M. Yu, Jay Gambetta, A. A. Houck, D. I. Schuster, J. + Majer, Alexandre Blais, M. H. Devoret, S. M. Girvin, R. J. Schoelkopf: + Charge insensitive qubit design derived from the Cooper pair box, + [arXiv:cond-mat/0703002](https://arxiv.org/abs/cond-mat/0703002) + """ + + _: KW_ONLY + error_rate: float = field(default=1e-4) + gate_time: int + measurement_time: int + two_qubit_gate_time: Optional[int] = field(default=None) + + def __post_init__(self): + if self.two_qubit_gate_time is None: + self.two_qubit_gate_time = self.gate_time + + def provided_isa(self, ctx: ISAContext) -> ISA: + # Value is initialized in __post_init__ + assert self.two_qubit_gate_time is not None + + # NOTE: This can be improved with instruction coercion once implemented. + instructions = [] + + # Single-qubit gates + single = [ + PAULI_I, + PAULI_X, + PAULI_Y, + PAULI_Z, + H, + SQRT_X, + SQRT_X_DAG, + SQRT_Y, + SQRT_Y_DAG, + S, + S_DAG, + SQRT_SQRT_X, + SQRT_SQRT_X_DAG, + SQRT_SQRT_Y, + SQRT_SQRT_Y_DAG, + T, + T_DAG, + RX, + RY, + RZ, + ] + + for instr in single: + instructions.append( + ctx.add_instruction( + instr, + encoding=Encoding.PHYSICAL, + arity=1, + time=self.gate_time, + error_rate=self.error_rate, + ) + ) + + for instr in [MEAS_X, MEAS_Y, MEAS_Z]: + instructions.append( + ctx.add_instruction( + instr, + encoding=Encoding.PHYSICAL, + arity=1, + time=self.measurement_time, + error_rate=self.error_rate, + ) + ) + + # Two-qubit gates + for instr in [CNOT, CZ]: + instructions.append( + ctx.add_instruction( + instr, + encoding=Encoding.PHYSICAL, + arity=2, + time=self.two_qubit_gate_time, + error_rate=self.error_rate, + ) + ) + + return ctx.make_isa(*instructions) diff --git a/source/qdk_package/qdk/qre/models/qubits/_msft.py b/source/qdk_package/qdk/qre/models/qubits/_msft.py new file mode 100644 index 0000000000..1d74300e3e --- /dev/null +++ b/source/qdk_package/qdk/qre/models/qubits/_msft.py @@ -0,0 +1,70 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from dataclasses import KW_ONLY, dataclass, field + +from ..._architecture import Architecture, ISAContext +from ...instruction_ids import ( + T, + PREP_X, + PREP_Z, + MEAS_XX, + MEAS_ZZ, + MEAS_X, + MEAS_Z, +) +from ..._instruction import ISA + + +@dataclass +class Majorana(Architecture): + """ + This class models physical instructions that may be relevant for future + Majorana qubits. For these qubits, we assume that measurements + and the physical T gate each take 1 µs. Owing to topological protection in + the hardware, we assume single and two-qubit measurement error rates + (Clifford error rates) in $10^{-4}$, $10^{-5}$, and $10^{-6}$ as a range + between realistic and optimistic targets. Non-Clifford operations in this + architecture do not have topological protection, so we assume a 5%, 1.5%, + and 1% error rate for non-Clifford physical T gates for the three cases, + respectively. + + References: + + - Torsten Karzig, Christina Knapp, Roman M. Lutchyn, Parsa Bonderson, + Matthew B. Hastings, Chetan Nayak, Jason Alicea, Karsten Flensberg, + Stephan Plugge, Yuval Oreg, Charles M. Marcus, Michael H. Freedman: + Scalable Designs for Quasiparticle-Poisoning-Protected Topological Quantum + Computation with Majorana Zero Modes, + [arXiv:1610.05289](https://arxiv.org/abs/1610.05289) + - Alexei Kitaev: Unpaired Majorana fermions in quantum wires, + [arXiv:cond-mat/0010440](https://arxiv.org/abs/cond-mat/0010440) + - Sankar Das Sarma, Michael Freedman, Chetan Nayak: Majorana Zero Modes and + Topological Quantum Computation, + [arXiv:1501.02813](https://arxiv.org/abs/1501.02813) + """ + + _: KW_ONLY + error_rate: float = field(default=1e-5, metadata={"domain": [1e-4, 1e-5, 1e-6]}) + + def provided_isa(self, ctx: ISAContext) -> ISA: + if abs(self.error_rate - 1e-4) <= 1e-8: + t_error_rate = 0.05 + elif abs(self.error_rate - 1e-5) <= 1e-8: + t_error_rate = 0.015 + elif abs(self.error_rate - 1e-6) <= 1e-8: + t_error_rate = 0.01 + + return ctx.make_isa( + ctx.add_instruction(PREP_X, time=1000, error_rate=self.error_rate), + ctx.add_instruction(PREP_Z, time=1000, error_rate=self.error_rate), + ctx.add_instruction( + MEAS_XX, arity=2, time=1000, error_rate=self.error_rate + ), + ctx.add_instruction( + MEAS_ZZ, arity=2, time=1000, error_rate=self.error_rate + ), + ctx.add_instruction(MEAS_X, time=1000, error_rate=self.error_rate), + ctx.add_instruction(MEAS_Z, time=1000, error_rate=self.error_rate), + ctx.add_instruction(T, time=1000, error_rate=t_error_rate), + ) diff --git a/source/qdk_package/qdk/qre/property_keys.py b/source/qdk_package/qdk/qre/property_keys.py index 3288809e9b..a4ac1bdbef 100644 --- a/source/qdk_package/qdk/qre/property_keys.py +++ b/source/qdk_package/qdk/qre/property_keys.py @@ -1,30 +1,13 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -# flake8: noqa F403 -# pyright: ignore[reportWildcardImportFromLibrary] +# pyright: reportAttributeAccessIssue=false -"""QRE property key constants. -This module re-exports all public symbols from [qsharp.qre.property_keys](:mod:`qsharp.qre.property_keys`), -making them available under the ``qdk.qre.property_keys`` namespace. It also -provides helpers for defining custom property keys that don't conflict with -built-in ones. +from .._native import property_keys -Requires the ``qre`` extra: ``pip install qdk[qre]``. - -Example: - - from qdk.qre.property_keys import * -""" - -try: - # Re-export the top-level qsharp.qre.property_keys names. - from qsharp.qre.property_keys import * -except Exception as ex: - raise ImportError( - "qdk.qre.property_keys requires the qre extras. Install with 'pip install \"qdk[qre]\"'." - ) from ex +for name in property_keys.__all__: + globals()[name] = getattr(property_keys, name) # Some starting index for custom properties, to avoid conflicts with the # built-in ones. We do not expect to have more than 1 million built-in diff --git a/source/qdk_package/qdk/qre/property_keys.pyi b/source/qdk_package/qdk/qre/property_keys.pyi new file mode 100644 index 0000000000..f4a097f3f7 --- /dev/null +++ b/source/qdk_package/qdk/qre/property_keys.pyi @@ -0,0 +1,23 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +DISTANCE: int +SURFACE_CODE_ONE_QUBIT_TIME_FACTOR: int +SURFACE_CODE_TWO_QUBIT_TIME_FACTOR: int +ACCELERATION: int +NUM_TS_PER_ROTATION: int +EXPECTED_SHOTS: int +RUNTIME_SINGLE_SHOT: int +EVALUATION_TIME: int +PHYSICAL_COMPUTE_QUBITS: int +PHYSICAL_FACTORY_QUBITS: int +PHYSICAL_MEMORY_QUBITS: int +MOLECULE: int +LOGICAL_COMPUTE_QUBITS: int +LOGICAL_MEMORY_QUBITS: int +ALGORITHM_COMPUTE_QUBITS: int +ALGORITHM_MEMORY_QUBITS: int +NAME: int +LOSS: int +LOGICAL_CYCLE_TIME: int +CODE_CYCLE_TIME: int diff --git a/source/qdk_package/qdk/qsharp.py b/source/qdk_package/qdk/qsharp.py index 5953233775..9d54036c6f 100644 --- a/source/qdk_package/qdk/qsharp.py +++ b/source/qdk_package/qdk/qsharp.py @@ -17,5 +17,5 @@ For full API documentation see [qsharp](:mod:`qsharp`). """ -from qsharp import * # pyright: ignore[reportWildcardImportFromLibrary] -from qsharp.utils import dump_operation # pyright: ignore[reportUnusedImport] +from ._qsharp import * # pyright: ignore[reportWildcardImportFromLibrary] +from .utils import dump_operation # pyright: ignore[reportUnusedImport] diff --git a/source/qdk_package/qdk/simulation.py b/source/qdk_package/qdk/simulation.py index 76c846b941..a54d535321 100644 --- a/source/qdk_package/qdk/simulation.py +++ b/source/qdk_package/qdk/simulation.py @@ -16,5 +16,5 @@ or correlated noise channels. """ -from qsharp._device._atom import NeutralAtomDevice -from qsharp._simulation import NoiseConfig +from ._device._atom import NeutralAtomDevice +from ._simulation import NoiseConfig diff --git a/source/qdk_package/qdk/telemetry.py b/source/qdk_package/qdk/telemetry.py new file mode 100644 index 0000000000..4114c69878 --- /dev/null +++ b/source/qdk_package/qdk/telemetry.py @@ -0,0 +1,310 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +This module sends telemetry directly to Azure Monitor using a similar mechanism and +format to the Azure Monitor OpenTelemetry Python SDK. It only supports custom metrics of +type "counter" and "histogram" for now. It's goal is to be minimal in size and dependencies, +and easy to read to understand exactly what data is being sent. + +To use this API, simply call `log_telemetry` with the metric name, value, and any other +optional properties. The telemetry will be batched and sent at a regular intervals (60 sec), +and when the process is about to exit. + +Disable qsharp Python telemetry by setting the environment variable `QSHARP_PYTHON_TELEMETRY=none`. +""" + +import atexit +import json +import locale +import logging +import os +import platform +import time +import urllib.request +import warnings + +from datetime import datetime, timezone +from queue import SimpleQueue, Empty +from threading import Thread +from typing import Any, Dict, Literal, List, TypedDict, Union + +logger = logging.getLogger(__name__) + +QSHARP_VERSION = "0.0.0.dev0" + +AIKEY = os.environ.get("QSHARP_PYTHON_AI_KEY") or "95d25b22-8b6d-448e-9677-78ad4047a95a" +AIURL = ( + os.environ.get("QSHARP_PYTHON_AI_URL") + or "https://westus2-2.in.applicationinsights.azure.com//v2.1/track" +) + +# If explicitly disabled via either environment variable, disable telemetry. This takes precedence. +# If explicitly enabled via either environment variable, enable telemetry. +# Otherwise, enable telemetry only in release builds. +_disable_values = {"0", "false", "disabled", "none"} +_enable_values = {"1", "true", "enabled"} +_env_values = { + (os.environ.get("QSHARP_PYTHON_TELEMETRY") or "").lower(), + (os.environ.get("QDK_PYTHON_TELEMETRY") or "").lower(), +} + +# `&` here is set intersection: it yields the common values between sets. +# `not _env_values & _disable_values` is True iff no disable value is present. +# `bool(_env_values & _enable_values)` is True iff any enable value is present. +TELEMETRY_ENABLED = not _env_values & _disable_values and ( + bool(_env_values & _enable_values) or "dev" not in QSHARP_VERSION +) + +BATCH_INTERVAL_SEC = int(os.environ.get("QSHARP_PYTHON_TELEMETRY_INTERVAL") or 60) + + +# The below is taken from the Azure Monitor Python SDK +def _getlocale() -> str: + try: + with warnings.catch_warnings(): + # Workaround for https://github.com/python/cpython/issues/82986 by continuing to use getdefaultlocale() even though it has been deprecated. + # Ignore the deprecation warnings to reduce noise + warnings.simplefilter("ignore", category=DeprecationWarning) + return locale.getdefaultlocale()[0] or "" + except AttributeError: + # Use this as a fallback if locale.getdefaultlocale() doesn't exist (>Py3.13) + return locale.getlocale()[0] or "" + + +# Minimal device information to include with telemetry +AI_DEVICE_LOCALE = _getlocale() +AI_DEVICE_OS_VERSION = platform.version() + + +class Metric(TypedDict): + """Used internally for objects in the telemetry queue""" + + name: str + value: float + count: int + properties: Dict[str, Any] + type: str + + +class PendingMetric(Metric): + """Used internally to aggregate metrics before sending""" + + min: float + max: float + + +# Maintain a collection of custom metrics to log, stored by metric name with a list entry +# for each unique set of properties per metric name +pending_metrics: Dict[str, List[PendingMetric]] = {} + +# The telemetry queue is used to send telemetry from the main thread to the telemetry thread +# This simplifies any thread-safety concerns, and avoids the need for locks, etc. +telemetry_queue: Any = SimpleQueue() # type 'Any' until we get off Python 3.8 builds + + +def log_telemetry( + name: str, + value: float, + count: int = 1, + properties: Dict[str, Any] = {}, + type: Literal["counter", "histogram"] = "counter", +) -> None: + """ + Logs a custom metric with the name provided. Properties are optional and can be used to + capture additional context about the metric (but should be a relatively static set of values, as + each unique set of properties will be sent as a separate metric and creates a separate 'dimension' + in the backend telemetry store). + + The type can be either 'counter' or 'histogram'. A 'counter' is a simple value that is summed + over time, such as how many times an event occurs, while a 'histogram' is used to track 'quantative' + values, such as the distribution of values over time, e.g., the duration of an operation. + + Example usage for a counter: + + log_telemetry("qir_generated", 1, properties={"profile": "base", "qsharp.version": "1.9.0"}) + + Example usage for a histogram: + + log_telemetry("simulation_duration", 123.45, type="histogram") + + """ + if not TELEMETRY_ENABLED: + return + + obj: Metric = { + "name": name, + "value": value, + "count": count, + "properties": {**properties, "qsharp.version": QSHARP_VERSION}, + "type": type, + } + + logger.debug("Queuing telemetry: %s", obj) + telemetry_queue.put(obj) + + +def _add_to_pending(metric: Metric): + """Used by the telemetry thread to aggregate metrics before sending""" + + if metric["type"] not in ["counter", "histogram"]: + raise Exception("Metric must be of type counter or histogram") + + # Get or create the entry list for this name + name_entries = pending_metrics.setdefault(metric["name"], []) + + # Try to find the entry with matching properties + # This relies on the fact dicts with matching keys/values compare equal in Python + prop_entry = next( + ( + entry + for entry in name_entries + if entry["properties"] == metric["properties"] + ), + None, + ) + if prop_entry is None: + new_entry: PendingMetric = { + **metric, + "min": metric["value"], + "max": metric["value"], + } + name_entries.append(new_entry) + else: + if prop_entry["type"] != metric["type"]: + raise Exception("Cannot mix counter and histogram for the same metric name") + prop_entry["value"] += metric["value"] + prop_entry["count"] += metric["count"] + prop_entry["min"] = min(prop_entry["min"], metric["value"]) + prop_entry["max"] = max(prop_entry["max"], metric["value"]) + + +def _pending_to_payload() -> List[Dict[str, Any]]: + """Converts the pending metrics to the JSON payload for Azure Monitor""" + + result_array: List[Dict[str, Any]] = [] + formatted_time = ( + datetime.now(timezone.utc) + .isoformat(timespec="microseconds") + .replace("+00:00", "Z") + ) + for name in pending_metrics: + for unique_props in pending_metrics[name]: + # The below matches the entry format for Azure Monitor REST API + entry: Dict[str, Any] = { + "ver": 1, + "name": "Microsoft.ApplicationInsights.Metric", + "time": formatted_time, + "sampleRate": 100.0, + "iKey": AIKEY, + "tags": { + "ai.device.locale": AI_DEVICE_LOCALE, + "ai.device.osVersion": AI_DEVICE_OS_VERSION, + }, + "data": { + "baseType": "MetricData", + "baseData": { + "ver": 2, + "metrics": [ + { + "name": unique_props["name"], + "value": unique_props["value"], + "count": unique_props["count"], + } + ], + "properties": unique_props["properties"], + }, + }, + } + # Histogram values differ only in that they have min/max values also + if unique_props["type"] == "histogram": + entry["data"]["baseData"]["metrics"][0]["min"] = unique_props["min"] + entry["data"]["baseData"]["metrics"][0]["max"] = unique_props["max"] + + result_array.append(entry) + + return result_array + + +def _post_telemetry() -> bool: + """Posts the pending telemetry to Azure Monitor""" + + if len(pending_metrics) == 0: + return True + + payload = json.dumps(_pending_to_payload()).encode("utf-8") + logger.debug("Sending telemetry request: %s", payload) + try: + request = urllib.request.Request(AIURL, data=payload, method="POST") + request.add_header("Content-Type", "application/json") + with urllib.request.urlopen(request, timeout=10) as response: + logger.debug("Telemetry response: %s", response.status) + # On a successful post, clear the pending list. (Else they will be included on the next retry) + pending_metrics.clear() + return True + + except Exception: + logger.debug( + "Failed to post telemetry. Pending metrics will be retried at the next interval." + ) + return False + + +# This is the thread that aggregates and posts telemetry at a regular interval. +# The main thread will signal the thread loop to exit when the process is about to exit. +def _telemetry_thread_start(): + next_post_sec: Union[float, None] = None + + def on_metric(msg: Metric): + nonlocal next_post_sec + + # Add to the pending batch to send next + _add_to_pending(msg) + + # Schedule the next post if we don't have one scheduled + if next_post_sec == None: + next_post_sec = time.monotonic() + BATCH_INTERVAL_SEC + + while True: + try: + # Block if no timeout, else wait a maximum of time until the next post is due + timeout: Union[float, None] = None + if next_post_sec: + timeout = max(next_post_sec - time.monotonic(), 0) + msg = telemetry_queue.get(timeout=timeout) + + if msg == "exit": + logger.debug("Exiting telemetry thread") + if not _post_telemetry(): + logger.debug("Failed to post telemetry on exit") + return + else: + on_metric(msg) + # Loop until the queue has been drained. This will cause the 'Empty' exception + # below once the queue is empty and it's time to post + continue + except Empty: + # No more telemetry within timeout, so write what we have pending + _ = _post_telemetry() + + # If we get here, it's after a post attempt. Pending will still have items if the attempt + # failed, so updated the time for the next attempt in that case. + if len(pending_metrics) == 0: + next_post_sec = None + else: + next_post_sec = time.monotonic() + BATCH_INTERVAL_SEC + + +# When the process is about to exit, notify the telemetry thread to flush, and wait max 3 sec before exiting anyway +def _on_exit(): + logger.debug("In on_exit handler") + telemetry_queue.put("exit") + # Wait at most 3 seconds for the telemetry thread to flush and exit + telemetry_thread.join(timeout=3) + + +# Mark the telemetry thread as a daemon thread, else it will keep the process alive when the main thread exits +if TELEMETRY_ENABLED: + telemetry_thread = Thread(target=_telemetry_thread_start, daemon=True) + telemetry_thread.start() + atexit.register(_on_exit) diff --git a/source/qdk_package/qdk/telemetry_events.py b/source/qdk_package/qdk/telemetry_events.py new file mode 100644 index 0000000000..edffb17585 --- /dev/null +++ b/source/qdk_package/qdk/telemetry_events.py @@ -0,0 +1,357 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from .telemetry import log_telemetry +import math +from typing import Union + +# For metrics such as duration, we want to capture things like how many shots or qubits in +# the additional properties. However properties shouldn't be 'continuous' values, as they +# create new 'dimensions' on the backend, which is limited, thus we want to bucket these properties. + +# See some of the notes at: https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-custom-overview#design-limitations-and-considerations + + +def get_next_power_of_ten_bucket(value: int) -> int: + if value <= 1: + return 1 + elif value >= 1000000: + # Limit the buckets upper bound + return 1000000 + else: + # Bucket into nearest (rounded up) power of 10, e.g. 75 -> 100, 450 -> 1000, etc. + return 10 ** math.ceil(math.log10(value)) + + +# gets the order of magnitude for the number of qubits +def get_qubits_bucket(qubits: Union[str, int]) -> str: + if qubits == "unknown": + return "unknown" + qubits = int(qubits) + if qubits <= 1: + return "1" + elif qubits >= 50: + return "50" + else: + # integer divide by 5 to get nearest 5 + return str(qubits // 5 * 5) + + +def on_import() -> None: + log_telemetry("qsharp.import", 1) + + +def on_qdk_import() -> None: + log_telemetry("qdk.import", 1) + + +def on_run(shots: int, noise: bool, qubit_loss: bool) -> None: + log_telemetry( + "qsharp.run", + 1, + properties={ + "shots": get_next_power_of_ten_bucket(shots), + "noise": noise, + "qubit_loss": qubit_loss, + }, + ) + + +def on_run_end(durationMs: float, shots: int) -> None: + log_telemetry( + "qsharp.run.durationMs", + durationMs, + properties={"shots": get_next_power_of_ten_bucket(shots)}, + type="histogram", + ) + + +def on_run_qasm(shots: int, noise: bool, qubit_loss: bool) -> None: + log_telemetry( + "qsharp.run_qasm", + 1, + properties={ + "shots": get_next_power_of_ten_bucket(shots), + "noise": noise, + "qubit_loss": qubit_loss, + }, + ) + + +def on_run_qasm_end(durationMs: float, shots: int) -> None: + log_telemetry( + "qsharp.run_qasm.durationMs", + durationMs, + properties={"shots": get_next_power_of_ten_bucket(shots)}, + type="histogram", + ) + + +def on_eval() -> None: + log_telemetry( + "qsharp.eval", + 1, + ) + + +def on_eval_end(durationMs: float) -> None: + log_telemetry( + "qsharp.eval.durationMs", + durationMs, + type="histogram", + ) + + +def on_import_qasm() -> None: + log_telemetry( + "qsharp.import_qasm", + 1, + ) + + +def on_import_qasm_end(durationMs: float) -> None: + log_telemetry( + "qsharp.import_qasm.durationMs", + durationMs, + type="histogram", + ) + + +def on_run_cell() -> None: + log_telemetry( + "qsharp.run.cell", + 1, + ) + + +def on_run_cell_end(durationMs: float) -> None: + log_telemetry( + "qsharp.run.cell.durationMs", + durationMs, + type="histogram", + ) + + +def on_compile(profile: str) -> None: + log_telemetry("qsharp.compile", 1, properties={"profile": profile}) + + +def on_compile_end(durationMs: float, profile: str) -> None: + log_telemetry( + "qsharp.compile.durationMs", + durationMs, + properties={"profile": profile}, + type="histogram", + ) + + +def on_compile_qasm(profile: str) -> None: + log_telemetry("qsharp.compile_qasm", 1, properties={"profile": profile}) + + +def on_compile_qasm_end(durationMs: float, profile: str) -> None: + log_telemetry( + "qsharp.compile_qasm.durationMs", + durationMs, + properties={"profile": profile}, + type="histogram", + ) + + +def on_estimate() -> None: + log_telemetry( + "qsharp.estimate", + 1, + ) + + +def on_estimate_end(durationMs: float, qubits: Union[str, int]) -> None: + log_telemetry( + "qsharp.estimate.durationMs", + durationMs, + properties={"qubits": get_qubits_bucket(qubits)}, + type="histogram", + ) + + +def on_estimate_qasm() -> None: + log_telemetry( + "qsharp.estimate_qasm", + 1, + ) + + +def on_estimate_qasm_end(durationMs: float, qubits: Union[str, int]) -> None: + log_telemetry( + "qsharp.estimate_qasm.durationMs", + durationMs, + properties={"qubits": get_qubits_bucket(qubits)}, + type="histogram", + ) + + +def on_circuit() -> None: + log_telemetry( + "qsharp.circuit", + 1, + ) + + +def on_circuit_end(durationMs: float) -> None: + log_telemetry( + "qsharp.circuit.durationMs", + durationMs, + type="histogram", + ) + + +def on_circuit_qasm() -> None: + log_telemetry( + "qsharp.circuit_qasm", + 1, + ) + + +def on_circuit_qasm_end(durationMs: float) -> None: + log_telemetry( + "qsharp.circuit_qasm.durationMs", + durationMs, + type="histogram", + ) + + +# Qiskit telemetry events + + +def on_qiskit_run(shots: int, num_circuits: int) -> None: + log_telemetry( + "qiskit.run", + 1, + properties={ + "shots": get_next_power_of_ten_bucket(shots), + "circuits": get_next_power_of_ten_bucket(num_circuits), + }, + ) + + +def on_qiskit_run_end(shots: int, num_circuits: int, duration_ms: float) -> None: + log_telemetry( + "qiskit.run.durationMs", + duration_ms, + properties={ + "shots": get_next_power_of_ten_bucket(shots), + "circuits": get_next_power_of_ten_bucket(num_circuits), + }, + type="histogram", + ) + + +def on_qiskit_run_re() -> None: + log_telemetry( + "qiskit.run.re", + 1, + ) + + +def on_qiskit_run_re_end(duration_ms: float) -> None: + log_telemetry( + "qiskit.run.re.durationMs", + duration_ms, + type="histogram", + ) + + +def on_neutral_atom_init(default_layout: bool) -> None: + log_telemetry( + "neutral_atom.device.init", + 1, + properties={"default_layout": default_layout}, + ) + + +def on_neutral_atom_compile() -> None: + log_telemetry( + "neutral_atom.device.compile", + 1, + ) + + +def on_neutral_atom_compile_end(duration_ms: float) -> None: + log_telemetry( + "neutral_atom.device.compile.durationMs", + duration_ms, + type="histogram", + ) + + +def on_neutral_atom_trace() -> None: + log_telemetry( + "neutral_atom.device.trace", + 1, + ) + + +def on_neutral_atom_trace_end(duration_ms: float) -> None: + log_telemetry( + "neutral_atom.device.trace.durationMs", + duration_ms, + type="histogram", + ) + + +def on_neutral_atom_cpu_fallback() -> None: + log_telemetry( + "neutral_atom.device.cpu_fallback", + 1, + ) + + +def on_neutral_atom_simulate(shots: int, noise: bool, type: str) -> None: + log_telemetry( + "neutral_atom.device.simulate", + 1, + properties={ + "shots": get_next_power_of_ten_bucket(shots), + "noise": noise, + "type": type, + }, + ) + + +def on_neutral_atom_simulate_end( + duration_ms: float, shots: int, noise: bool, type: str +) -> None: + log_telemetry( + "neutral_atom.device.simulate.durationMs", + duration_ms, + properties={ + "shots": get_next_power_of_ten_bucket(shots), + "noise": noise, + "type": type, + }, + type="histogram", + ) + + +# QRE telemetry events + + +def on_qre_estimate(post_process: bool, use_graph: bool) -> None: + log_telemetry( + "qsharp.qre.estimate", + 1, + properties={ + "post_process": post_process, + "use_graph": use_graph, + }, + ) + + +def on_qre_application_created(application_type: str) -> None: + log_telemetry( + "qsharp.qre.application.created", + 1, + properties={ + "application_type": application_type, + }, + ) diff --git a/source/qdk_package/qdk/utils/__init__.py b/source/qdk_package/qdk/utils/__init__.py new file mode 100644 index 0000000000..03d71482e7 --- /dev/null +++ b/source/qdk_package/qdk/utils/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from ._utils import dump_operation + +__all__ = [ + "dump_operation", +] diff --git a/source/qdk_package/qdk/utils/_utils.py b/source/qdk_package/qdk/utils/_utils.py new file mode 100644 index 0000000000..26984dc4db --- /dev/null +++ b/source/qdk_package/qdk/utils/_utils.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +from .._qsharp import run +from typing import List +import math + + +def dump_operation(operation: str, num_qubits: int) -> List[List[complex]]: + """ + Returns a square matrix of complex numbers representing the operation performed. + + :param operation: The operation to be performed, which must operate on a list of qubits. + :param num_qubits: The number of qubits to be used. + + :return: The matrix representing the operation. + :rtype: List[List[complex]] + """ + code = f"""{{ + let op = {operation}; + use (targets, extra) = (Qubit[{num_qubits}], Qubit[{num_qubits}]); + for i in 0..{num_qubits}-1 {{ + H(targets[i]); + CNOT(targets[i], extra[i]); + }} + operation ApplyOp (op : (Qubit[] => Unit), targets : Qubit[]) : Unit {{ op(targets); }} + ApplyOp(op, targets); + Microsoft.Quantum.Diagnostics.DumpMachine(); + ResetAll(targets + extra); + }}""" + result = run(code, shots=1, save_events=True)[0] + state = result["events"][-1].state_dump().get_dict() + num_entries = pow(2, num_qubits) + factor = math.sqrt(num_entries) + ndigits = 6 + matrix = [] + for i in range(num_entries): + matrix += [[]] + for j in range(num_entries): + entry = state.get(i * num_entries + j) + if entry is None: + matrix[i] += [complex(0, 0)] + else: + matrix[i] += [ + complex( + round(factor * entry.real, ndigits), + round(factor * entry.imag, ndigits), + ) + ] + return matrix From bed4e6a563d23ab28533ecd831ec7143a505c47a Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Wed, 29 Apr 2026 11:53:03 -0700 Subject: [PATCH 03/25] remove python definitions from `qsharp` --- source/pip/pyproject.toml | 10 +- source/pip/qsharp/.data/qsharp_codemirror.js | 95 - source/pip/qsharp/__init__.py | 31 +- source/pip/qsharp/_adaptive_bytecode.py | 132 -- source/pip/qsharp/_adaptive_pass.py | 983 ---------- source/pip/qsharp/_device/__init__.py | 7 +- source/pip/qsharp/_device/_atom/__init__.py | 298 +-- source/pip/qsharp/_device/_atom/_decomp.py | 510 ----- source/pip/qsharp/_device/_atom/_optimize.py | 315 ---- source/pip/qsharp/_device/_atom/_reorder.py | 114 -- source/pip/qsharp/_device/_atom/_scheduler.py | 938 --------- source/pip/qsharp/_device/_atom/_trace.py | 76 - source/pip/qsharp/_device/_atom/_utils.py | 92 - source/pip/qsharp/_device/_atom/_validate.py | 45 - source/pip/qsharp/_device/_device.py | 139 -- source/pip/qsharp/_fs.py | 89 +- source/pip/qsharp/_http.py | 29 +- source/pip/qsharp/_ipython.py | 87 +- source/pip/qsharp/_native.pyi | 1140 ----------- source/pip/qsharp/_qsharp.py | 1180 +----------- source/pip/qsharp/_simulation.py | 726 +------ source/pip/qsharp/applications/__init__.py | 3 + .../qsharp/applications/magnets/__init__.py | 14 - .../applications/magnets/geometry/__init__.py | 22 - .../applications/magnets/geometry/complete.py | 150 -- .../magnets/geometry/lattice1d.py | 123 -- .../magnets/geometry/lattice2d.py | 187 -- .../applications/magnets/models/__init__.py | 12 - .../applications/magnets/models/model.py | 230 --- .../applications/magnets/trotter/__init__.py | 22 - .../applications/magnets/trotter/trotter.py | 372 ---- .../magnets/utilities/__init__.py | 26 - .../magnets/utilities/hypergraph.py | 317 ---- .../applications/magnets/utilities/pauli.py | 270 --- source/pip/qsharp/code/__init__.py | 5 +- source/pip/qsharp/code/__init__.pyi | 4 - source/pip/qsharp/estimator/__init__.py | 35 +- source/pip/qsharp/estimator/_estimator.py | 1180 ------------ source/pip/qsharp/interop/__init__.py | 2 +- source/pip/qsharp/interop/cirq/__init__.py | 32 +- .../pip/qsharp/interop/cirq/_neutral_atom.py | 172 -- source/pip/qsharp/interop/cirq/_result.py | 310 --- source/pip/qsharp/interop/qiskit/__init__.py | 107 +- .../interop/qiskit/backends/__init__.py | 10 - .../interop/qiskit/backends/backend_base.py | 614 ------ .../interop/qiskit/backends/compilation.py | 36 - .../qsharp/interop/qiskit/backends/errors.py | 29 - .../qiskit/backends/neutral_atom_backend.py | 288 --- .../qiskit/backends/neutral_atom_target.py | 44 - .../interop/qiskit/backends/qirtarget.py | 191 -- .../interop/qiskit/backends/qsharp_backend.py | 233 --- .../interop/qiskit/backends/re_backend.py | 194 -- .../interop/qiskit/execution/__init__.py | 4 - .../interop/qiskit/execution/default.py | 10 - .../qsharp/interop/qiskit/jobs/__init__.py | 5 - .../pip/qsharp/interop/qiskit/jobs/qsjob.py | 194 -- .../qsharp/interop/qiskit/jobs/qsjobset.py | 150 -- .../qsharp/interop/qiskit/passes/__init__.py | 4 - .../interop/qiskit/passes/remove_delay.py | 22 - source/pip/qsharp/noisy_simulator/__init__.py | 17 +- .../noisy_simulator/_noisy_simulator.py | 10 - .../noisy_simulator/_noisy_simulator.pyi | 242 --- source/pip/qsharp/openqasm/__init__.py | 19 +- source/pip/qsharp/openqasm/_circuit.py | 114 -- source/pip/qsharp/openqasm/_compile.py | 100 - source/pip/qsharp/openqasm/_estimate.py | 107 -- source/pip/qsharp/openqasm/_import.py | 72 - source/pip/qsharp/openqasm/_ipython.py | 24 - source/pip/qsharp/openqasm/_run.py | 195 -- source/pip/qsharp/openqasm/_utils.py | 40 - source/pip/qsharp/qre/__init__.py | 85 +- source/pip/qsharp/qre/_application.py | 172 -- source/pip/qsharp/qre/_architecture.py | 244 --- source/pip/qsharp/qre/_enumeration.py | 242 --- source/pip/qsharp/qre/_estimation.py | 218 --- source/pip/qsharp/qre/_instruction.py | 473 ----- source/pip/qsharp/qre/_isa_enumeration.py | 428 ----- source/pip/qsharp/qre/_qre.py | 36 - source/pip/qsharp/qre/_qre.pyi | 1679 ----------------- source/pip/qsharp/qre/_results.py | 418 ---- source/pip/qsharp/qre/_trace.py | 195 -- source/pip/qsharp/qre/application/__init__.py | 14 - source/pip/qsharp/qre/application/_cirq.py | 58 - .../pip/qsharp/qre/application/_openqasm.py | 68 - source/pip/qsharp/qre/application/_qir.py | 42 - source/pip/qsharp/qre/application/_qsharp.py | 60 - source/pip/qsharp/qre/instruction_ids.py | 9 +- source/pip/qsharp/qre/instruction_ids.pyi | 99 - source/pip/qsharp/qre/interop/__init__.py | 35 - source/pip/qsharp/qre/interop/_cirq.py | 822 -------- source/pip/qsharp/qre/interop/_qir.py | 136 -- source/pip/qsharp/qre/interop/_qsharp.py | 155 -- source/pip/qsharp/qre/models/__init__.py | 23 - .../qsharp/qre/models/factories/__init__.py | 8 - .../qsharp/qre/models/factories/_litinski.py | 395 ---- .../qre/models/factories/_round_based.py | 461 ----- .../pip/qsharp/qre/models/factories/_utils.py | 90 - source/pip/qsharp/qre/models/qec/__init__.py | 13 - .../qsharp/qre/models/qec/_surface_code.py | 154 -- .../pip/qsharp/qre/models/qec/_three_aux.py | 120 -- source/pip/qsharp/qre/models/qec/_yoked.py | 243 --- .../pip/qsharp/qre/models/qubits/__init__.py | 7 - .../qsharp/qre/models/qubits/_gate_based.py | 139 -- source/pip/qsharp/qre/models/qubits/_msft.py | 70 - source/pip/qsharp/qre/property_keys.py | 9 +- source/pip/qsharp/qre/property_keys.pyi | 23 - source/pip/qsharp/telemetry.py | 309 +-- source/pip/qsharp/telemetry_events.py | 356 +--- source/pip/qsharp/utils/__init__.py | 7 +- source/pip/qsharp/utils/_utils.py | 50 - 110 files changed, 73 insertions(+), 21691 deletions(-) delete mode 100644 source/pip/qsharp/.data/qsharp_codemirror.js delete mode 100644 source/pip/qsharp/_adaptive_bytecode.py delete mode 100644 source/pip/qsharp/_adaptive_pass.py delete mode 100644 source/pip/qsharp/_device/_atom/_decomp.py delete mode 100644 source/pip/qsharp/_device/_atom/_optimize.py delete mode 100644 source/pip/qsharp/_device/_atom/_reorder.py delete mode 100644 source/pip/qsharp/_device/_atom/_scheduler.py delete mode 100644 source/pip/qsharp/_device/_atom/_trace.py delete mode 100644 source/pip/qsharp/_device/_atom/_utils.py delete mode 100644 source/pip/qsharp/_device/_atom/_validate.py delete mode 100644 source/pip/qsharp/_device/_device.py delete mode 100644 source/pip/qsharp/_native.pyi delete mode 100644 source/pip/qsharp/applications/magnets/__init__.py delete mode 100644 source/pip/qsharp/applications/magnets/geometry/__init__.py delete mode 100644 source/pip/qsharp/applications/magnets/geometry/complete.py delete mode 100644 source/pip/qsharp/applications/magnets/geometry/lattice1d.py delete mode 100644 source/pip/qsharp/applications/magnets/geometry/lattice2d.py delete mode 100644 source/pip/qsharp/applications/magnets/models/__init__.py delete mode 100755 source/pip/qsharp/applications/magnets/models/model.py delete mode 100644 source/pip/qsharp/applications/magnets/trotter/__init__.py delete mode 100644 source/pip/qsharp/applications/magnets/trotter/trotter.py delete mode 100644 source/pip/qsharp/applications/magnets/utilities/__init__.py delete mode 100644 source/pip/qsharp/applications/magnets/utilities/hypergraph.py delete mode 100644 source/pip/qsharp/applications/magnets/utilities/pauli.py delete mode 100644 source/pip/qsharp/code/__init__.pyi delete mode 100644 source/pip/qsharp/estimator/_estimator.py delete mode 100644 source/pip/qsharp/interop/cirq/_neutral_atom.py delete mode 100644 source/pip/qsharp/interop/cirq/_result.py delete mode 100644 source/pip/qsharp/interop/qiskit/backends/__init__.py delete mode 100644 source/pip/qsharp/interop/qiskit/backends/backend_base.py delete mode 100644 source/pip/qsharp/interop/qiskit/backends/compilation.py delete mode 100644 source/pip/qsharp/interop/qiskit/backends/errors.py delete mode 100644 source/pip/qsharp/interop/qiskit/backends/neutral_atom_backend.py delete mode 100644 source/pip/qsharp/interop/qiskit/backends/neutral_atom_target.py delete mode 100644 source/pip/qsharp/interop/qiskit/backends/qirtarget.py delete mode 100644 source/pip/qsharp/interop/qiskit/backends/qsharp_backend.py delete mode 100644 source/pip/qsharp/interop/qiskit/backends/re_backend.py delete mode 100644 source/pip/qsharp/interop/qiskit/execution/__init__.py delete mode 100644 source/pip/qsharp/interop/qiskit/execution/default.py delete mode 100644 source/pip/qsharp/interop/qiskit/jobs/__init__.py delete mode 100644 source/pip/qsharp/interop/qiskit/jobs/qsjob.py delete mode 100644 source/pip/qsharp/interop/qiskit/jobs/qsjobset.py delete mode 100644 source/pip/qsharp/interop/qiskit/passes/__init__.py delete mode 100644 source/pip/qsharp/interop/qiskit/passes/remove_delay.py delete mode 100644 source/pip/qsharp/noisy_simulator/_noisy_simulator.py delete mode 100644 source/pip/qsharp/noisy_simulator/_noisy_simulator.pyi delete mode 100644 source/pip/qsharp/openqasm/_circuit.py delete mode 100644 source/pip/qsharp/openqasm/_compile.py delete mode 100644 source/pip/qsharp/openqasm/_estimate.py delete mode 100644 source/pip/qsharp/openqasm/_import.py delete mode 100644 source/pip/qsharp/openqasm/_ipython.py delete mode 100644 source/pip/qsharp/openqasm/_run.py delete mode 100644 source/pip/qsharp/openqasm/_utils.py delete mode 100644 source/pip/qsharp/qre/_application.py delete mode 100644 source/pip/qsharp/qre/_architecture.py delete mode 100644 source/pip/qsharp/qre/_enumeration.py delete mode 100644 source/pip/qsharp/qre/_estimation.py delete mode 100644 source/pip/qsharp/qre/_instruction.py delete mode 100644 source/pip/qsharp/qre/_isa_enumeration.py delete mode 100644 source/pip/qsharp/qre/_qre.py delete mode 100644 source/pip/qsharp/qre/_qre.pyi delete mode 100644 source/pip/qsharp/qre/_results.py delete mode 100644 source/pip/qsharp/qre/_trace.py delete mode 100644 source/pip/qsharp/qre/application/__init__.py delete mode 100644 source/pip/qsharp/qre/application/_cirq.py delete mode 100644 source/pip/qsharp/qre/application/_openqasm.py delete mode 100644 source/pip/qsharp/qre/application/_qir.py delete mode 100644 source/pip/qsharp/qre/application/_qsharp.py delete mode 100644 source/pip/qsharp/qre/instruction_ids.pyi delete mode 100644 source/pip/qsharp/qre/interop/__init__.py delete mode 100644 source/pip/qsharp/qre/interop/_cirq.py delete mode 100644 source/pip/qsharp/qre/interop/_qir.py delete mode 100644 source/pip/qsharp/qre/interop/_qsharp.py delete mode 100644 source/pip/qsharp/qre/models/__init__.py delete mode 100644 source/pip/qsharp/qre/models/factories/__init__.py delete mode 100644 source/pip/qsharp/qre/models/factories/_litinski.py delete mode 100644 source/pip/qsharp/qre/models/factories/_round_based.py delete mode 100644 source/pip/qsharp/qre/models/factories/_utils.py delete mode 100644 source/pip/qsharp/qre/models/qec/__init__.py delete mode 100644 source/pip/qsharp/qre/models/qec/_surface_code.py delete mode 100644 source/pip/qsharp/qre/models/qec/_three_aux.py delete mode 100644 source/pip/qsharp/qre/models/qec/_yoked.py delete mode 100644 source/pip/qsharp/qre/models/qubits/__init__.py delete mode 100644 source/pip/qsharp/qre/models/qubits/_gate_based.py delete mode 100644 source/pip/qsharp/qre/models/qubits/_msft.py delete mode 100644 source/pip/qsharp/qre/property_keys.pyi delete mode 100644 source/pip/qsharp/utils/_utils.py diff --git a/source/pip/pyproject.toml b/source/pip/pyproject.toml index 1e16486b09..3331449980 100644 --- a/source/pip/pyproject.toml +++ b/source/pip/pyproject.toml @@ -3,6 +3,7 @@ name = "qsharp" version = "0.0.0" readme = "README.md" requires-python = ">= 3.10" +dependencies = ["qdk==0.0.0"] classifiers = [ "License :: OSI Approved :: MIT License", "Development Status :: 5 - Production/Stable", @@ -13,7 +14,6 @@ classifiers = [ "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python", - "Programming Language :: Rust", "Operating System :: MacOS", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX :: Linux", @@ -27,8 +27,8 @@ cirq = ["cirq-core>=1.6.1,<1.7"] qre = ["cirq-core==1.6.1,<1.7", "pandas>=2.1", "ply>=3.11", "pyqir>=0.12.3,<0.13"] [build-system] -requires = ["maturin ~= 1.10.2"] -build-backend = "maturin" +requires = ["setuptools>=64", "wheel"] +build-backend = "setuptools.build_meta" -[tool.maturin] -module-name = "qsharp._native" +[tool.setuptools.packages.find] +where = ["."] diff --git a/source/pip/qsharp/.data/qsharp_codemirror.js b/source/pip/qsharp/.data/qsharp_codemirror.js deleted file mode 100644 index 8078c1a8cb..0000000000 --- a/source/pip/qsharp/.data/qsharp_codemirror.js +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -// This file provides CodeMirror syntax highlighting for Q# magic cells -// in classic Jupyter Notebooks. It does nothing in other (Jupyter Notebook 7, -// VS Code, Azure Notebooks, etc.) environments. - -// Detect the prerequisites and do nothing if they don't exist. -if (window.require && window.CodeMirror && window.Jupyter) { - // The simple mode plugin for CodeMirror is not loaded by default, so require it. - window.require(["codemirror/addon/mode/simple"], function defineMode() { - let rules = [ - { - token: "comment", - regex: /(\/\/).*/, - beginWord: false, - }, - { - token: "string", - regex: String.raw`^\"(?:[^\"\\]|\\[\s\S])*(?:\"|$)`, - beginWord: false, - }, - { - token: "keyword", - regex: String.raw`(namespace|open|as|operation|function|body|adjoint|newtype|controlled|internal)\b`, - beginWord: true, - }, - { - token: "keyword", - regex: String.raw`(if|elif|else|repeat|until|fixup|for|in|return|fail|within|apply)\b`, - beginWord: true, - }, - { - token: "keyword", - regex: String.raw`(Adjoint|Controlled|Adj|Ctl|is|self|auto|distribute|invert|intrinsic)\b`, - beginWord: true, - }, - { - token: "keyword", - regex: String.raw`(let|set|use|borrow|mutable)\b`, - beginWord: true, - }, - { - token: "operatorKeyword", - regex: String.raw`(not|and|or)\b|(w/)`, - beginWord: true, - }, - { - token: "operatorKeyword", - regex: String.raw`(=)|(!)|(<)|(>)|(\+)|(-)|(\*)|(/)|(\^)|(%)|(\|)|(&&&)|(~~~)|(\.\.\.)|(\.\.)|(\?)`, - beginWord: false, - }, - { - token: "meta", - regex: String.raw`(Int|BigInt|Double|Bool|Qubit|Pauli|Result|Range|String|Unit)\b`, - beginWord: true, - }, - { - token: "atom", - regex: String.raw`(true|false|Pauli(I|X|Y|Z)|One|Zero)\b`, - beginWord: true, - }, - ]; - let simpleRules = []; - for (let rule of rules) { - simpleRules.push({ - token: rule.token, - regex: new RegExp(rule.regex, "g"), - sol: rule.beginWord, - }); - if (rule.beginWord) { - // Need an additional rule due to the fact that CodeMirror simple mode doesn't work with ^ token - simpleRules.push({ - token: rule.token, - regex: new RegExp(String.raw`\W` + rule.regex, "g"), - sol: false, - }); - } - } - - // Register the mode defined above with CodeMirror - window.CodeMirror.defineSimpleMode("qsharp", { start: simpleRules }); - window.CodeMirror.defineMIME("text/x-qsharp", "qsharp"); - - // Tell Jupyter to associate %%qsharp magic cells with the qsharp mode - window.Jupyter.CodeCell.options_default.highlight_modes["qsharp"] = { - reg: [/^%%qsharp/], - }; - - // Force re-highlighting of all cells the first time this code runs - for (const cell of window.Jupyter.notebook.get_cells()) { - cell.auto_highlight(); - } - }); -} diff --git a/source/pip/qsharp/__init__.py b/source/pip/qsharp/__init__.py index a6161f518c..9559b7b333 100644 --- a/source/pip/qsharp/__init__.py +++ b/source/pip/qsharp/__init__.py @@ -1,8 +1,28 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from . import telemetry_events -from ._qsharp import ( +"""Deprecated: The ``qsharp`` package has been replaced by ``qdk``. + +All functionality previously available in ``qsharp`` is now provided by the +``qdk`` package. This package is a thin compatibility shim that re-exports the +``qdk`` public API so that existing code continues to work. + +To migrate, replace ``import qsharp`` with ``from qdk import qsharp`` or +``import qdk`` and use the ``qdk.*`` namespace directly. +""" + +import warnings as _warnings + +_warnings.warn( + "The 'qsharp' package is deprecated and will be removed in a future release. " + "Please use the 'qdk' package instead. " + "See https://github.com/microsoft/qdk for migration guidance.", + DeprecationWarning, + stacklevel=2, +) + +# Re-export the full public API from qdk so that existing code keeps working. +from qdk._qsharp import ( init, eval, run, @@ -23,14 +43,15 @@ CircuitGenerationMethod, ) -telemetry_events.on_import() +from qdk._native import Result, Pauli, QSharpError, TargetProfile, estimate_custom -from ._native import Result, Pauli, QSharpError, TargetProfile, estimate_custom +from qdk import telemetry_events +telemetry_events.on_import() # IPython notebook specific features try: if __IPYTHON__: # type: ignore - from ._ipython import register_magic, enable_classic_notebook_codemirror_mode + from qdk._ipython import register_magic, enable_classic_notebook_codemirror_mode register_magic() enable_classic_notebook_codemirror_mode() diff --git a/source/pip/qsharp/_adaptive_bytecode.py b/source/pip/qsharp/_adaptive_bytecode.py deleted file mode 100644 index 876a0a196f..0000000000 --- a/source/pip/qsharp/_adaptive_bytecode.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""Shared opcode constants for the Adaptive Profile QIR bytecode interpreter. - -These constants define the bytecode encoding used by the Python AdaptiveProfilePass -(emitter) and the Rust GPU receiver. Values must stay in sync with the Rust -``bytecode.rs`` module and the WGSL interpreter. - -Opcode word layout:: - - bits [7:0] = primary opcode - bits [15:8] = sub-opcode / condition code - bits [23:16] = flags - -Compose via bitwise OR: ``opcode | (sub << 8) | flag`` -Example: ``OP_ICMP | (ICMP_SLE << 8) | FLAG_SRC1_IMM`` -""" - -# ── Flags (pre-shifted to bit 16+) ────────────────────────────────────────── -FLAG_DST_IMM = 1 << 18 # dst field is an immediate value, not a register -FLAG_SRC0_IMM = 1 << 16 # src0 field is an immediate value, not a register -FLAG_SRC1_IMM = 1 << 17 # src1 field is an immediate value, not a register -FLAG_AUX0_IMM = 1 << 19 # aux0 field is an immediate value, not a register -FLAG_AUX1_IMM = 1 << 20 # aux1 field is an immediate value, not a register -FLAG_AUX2_IMM = 1 << 21 # aux2 field is an immediate value, not a register -FLAG_AUX3_IMM = 1 << 22 # aux3 field is an immediate value, not a register - -FLAG_FLOAT = 1 << 23 # operation uses float semantics - - -# ── Control Flow ───────────────────────────────────────────────────────────── -OP_NOP = 0x00 -OP_RET = 0x02 -OP_JUMP = 0x04 -OP_BRANCH = 0x05 -OP_SWITCH = 0x06 -OP_CALL = 0x07 -OP_CALL_RETURN = 0x08 - -# ── Quantum ────────────────────────────────────────────────────────────────── -OP_QUANTUM_GATE = 0x10 -OP_MEASURE = 0x11 -OP_RESET = 0x12 -OP_READ_RESULT = 0x13 -OP_RECORD_OUTPUT = 0x14 - -# ── Integer Arithmetic ─────────────────────────────────────────────────────── -OP_ADD = 0x20 -OP_SUB = 0x21 -OP_MUL = 0x22 -OP_UDIV = 0x23 -OP_SDIV = 0x24 -OP_UREM = 0x25 -OP_SREM = 0x26 - -# ── Bitwise / Shift ───────────────────────────────────────────────────────── -OP_AND = 0x28 -OP_OR = 0x29 -OP_XOR = 0x2A -OP_SHL = 0x2B -OP_LSHR = 0x2C -OP_ASHR = 0x2D - -# ── Comparison ─────────────────────────────────────────────────────────────── -OP_ICMP = 0x30 -OP_FCMP = 0x31 - -# ── Float Arithmetic ───────────────────────────────────────────────────────── -OP_FADD = 0x38 -OP_FSUB = 0x39 -OP_FMUL = 0x3A -OP_FDIV = 0x3B - -# ── Type Conversion ────────────────────────────────────────────────────────── -OP_ZEXT = 0x40 -OP_SEXT = 0x41 -OP_TRUNC = 0x42 -OP_FPEXT = 0x43 -OP_FPTRUNC = 0x44 -OP_INTTOPTR = 0x45 -OP_FPTOSI = 0x46 -OP_SITOFP = 0x47 - -# ── SSA / Data Movement ───────────────────────────────────────────────────── -OP_PHI = 0x50 -OP_SELECT = 0x51 -OP_MOV = 0x52 -OP_CONST = 0x53 - -# ── ICmp condition codes (sub-opcode, placed in bits[15:8] via << 8) ───────── -# Reference: https://llvm.org/docs/LangRef.html#icmp-instruction -ICMP_EQ = 0 -ICMP_NE = 1 -ICMP_SLT = 2 -ICMP_SLE = 3 -ICMP_SGT = 4 -ICMP_SGE = 5 -ICMP_ULT = 6 -ICMP_ULE = 7 -ICMP_UGT = 8 -ICMP_UGE = 9 - -# ── FCmp condition codes ───────────────────────────────────────────────────── -# Reference: https://llvm.org/docs/LangRef.html#fcmp-instruction -FCMP_FALSE = 0 -FCMP_OEQ = 1 -FCMP_OGT = 2 -FCMP_OGE = 3 -FCMP_OLT = 4 -FCMP_OLE = 5 -FCMP_ONE = 6 -FCMP_ORD = 7 -FCMP_UNO = 8 -FCMP_UEQ = 9 -FCMP_UGT = 10 -FCMP_UGE = 11 -FCMP_ULT = 12 -FCMP_ULE = 13 -FCMP_UNE = 14 -FCMP_TRUE = 15 - -# ── Register type tags ─────────────────────────────────────────────────────── -REG_TYPE_BOOL = 0 -REG_TYPE_I32 = 1 -REG_TYPE_I64 = 2 -REG_TYPE_F32 = 3 -REG_TYPE_F64 = 4 -REG_TYPE_PTR = 5 - -# ── Sentinel values ────────────────────────────────────────────────────────── -VOID_RETURN = 0xFFFFFFFF # Function does not have a return value. diff --git a/source/pip/qsharp/_adaptive_pass.py b/source/pip/qsharp/_adaptive_pass.py deleted file mode 100644 index a3cd815de6..0000000000 --- a/source/pip/qsharp/_adaptive_pass.py +++ /dev/null @@ -1,983 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""AdaptiveProfilePass: walks Adaptive Profile QIR and emits the intermediate -format consumed by Rust. - -Unlike ``AggregateGatesPass`` (which subclasses ``pyqir.QirModuleVisitor`` and -only dispatches CALL instructions), this pass iterates basic blocks and -instructions directly so it can handle *all* LLVM IR opcodes required by the -Adaptive Profile specification. -""" - -from __future__ import annotations -from dataclasses import dataclass, astuple -import pyqir -import struct -from typing import Any, Dict, List, Optional, Tuple, TypeAlias, cast -from ._adaptive_bytecode import * - -# --------------------------------------------------------------------------- -# Gate name → OpID mapping (must match shader_types.rs OpID enum) -# --------------------------------------------------------------------------- - -GATE_MAP: Dict[str, int] = { - "reset": 1, - "x": 2, - "y": 3, - "z": 4, - "h": 5, - "s": 6, - "s__adj": 7, - "t": 8, - "t__adj": 9, - "sx": 10, - "sx__adj": 11, - "rx": 12, - "ry": 13, - "rz": 14, - "cnot": 15, - "cx": 15, - "cz": 16, - "cy": 29, - "rxx": 17, - "ryy": 18, - "rzz": 19, - "ccx": 20, - "m": 21, - "mz": 21, - "mresetz": 22, - "swap": 24, -} - -# Gates that take a result ID as a second argument -MEASURE_GATES = {"m", "mz", "mresetz"} - -# Gates that reset a qubit (single qubit argument, no result) -RESET_GATES = {"reset"} - -# Rotation gates that take an angle parameter as first argument -ROTATION_GATES = {"rx", "ry", "rz", "rxx", "ryy", "rzz"} - -# --------------------------------------------------------------------------- -# ICmp / FCmp predicate mappings -# --------------------------------------------------------------------------- - -ICMP_MAP = { - pyqir.IntPredicate.EQ: ICMP_EQ, - pyqir.IntPredicate.NE: ICMP_NE, - pyqir.IntPredicate.SLT: ICMP_SLT, - pyqir.IntPredicate.SLE: ICMP_SLE, - pyqir.IntPredicate.SGT: ICMP_SGT, - pyqir.IntPredicate.SGE: ICMP_SGE, - pyqir.IntPredicate.ULT: ICMP_ULT, - pyqir.IntPredicate.ULE: ICMP_ULE, - pyqir.IntPredicate.UGT: ICMP_UGT, - pyqir.IntPredicate.UGE: ICMP_UGE, -} - -FCMP_MAP = { - pyqir.FloatPredicate.FALSE: FCMP_FALSE, - pyqir.FloatPredicate.OEQ: FCMP_OEQ, - pyqir.FloatPredicate.OGT: FCMP_OGT, - pyqir.FloatPredicate.OGE: FCMP_OGE, - pyqir.FloatPredicate.OLT: FCMP_OLT, - pyqir.FloatPredicate.OLE: FCMP_OLE, - pyqir.FloatPredicate.ONE: FCMP_ONE, - pyqir.FloatPredicate.ORD: FCMP_ORD, - pyqir.FloatPredicate.UNO: FCMP_UNO, - pyqir.FloatPredicate.UEQ: FCMP_UEQ, - pyqir.FloatPredicate.UGT: FCMP_UGT, - pyqir.FloatPredicate.UGE: FCMP_UGE, - pyqir.FloatPredicate.ULT: FCMP_ULT, - pyqir.FloatPredicate.ULE: FCMP_ULE, - pyqir.FloatPredicate.UNE: FCMP_UNE, - pyqir.FloatPredicate.TRUE: FCMP_TRUE, -} - - -@dataclass -class AdaptiveProgram: - num_qubits: int - num_results: int - num_registers: int - entry_block: int - blocks: List[Block] - instructions: List[Instruction] - quantum_ops: List[QuantumOp] - functions: List[Function] - phi_entries: List[PhiNodeEntry] - switch_cases: List[SwitchCase] - call_args: List[CallArg] - labels: List[Label] - register_types: List[RegisterType] - - def as_dict(self): - """ - Transforms the program to a dictionary, and each of - the helper dataclasses to a tuple. This format is intended - to be used in the FFI between Python and Rust. - """ - return { - "num_qubits": self.num_qubits, - "num_results": self.num_results, - "num_registers": self.num_registers, - "entry_block": self.entry_block, - "blocks": [astuple(x) for x in self.blocks], - "instructions": [astuple(x) for x in self.instructions], - "quantum_ops": [astuple(x) for x in self.quantum_ops], - "functions": [astuple(x) for x in self.functions], - "phi_entries": [astuple(x) for x in self.phi_entries], - "switch_cases": [astuple(x) for x in self.switch_cases], - "call_args": self.call_args, - "labels": self.labels, - "register_types": self.register_types, - } - - -@dataclass -class Block: - block_id: int - instr_offset: int - instr_count: int - - -@dataclass -class Instruction: - opcode: int - dst: int - src0: int - src1: int - aux0: int - aux1: int - aux2: int - aux3: int - - -@dataclass -class QuantumOp: - op_id: int - q1: int - q2: int - q3: int - angle: float - - -@dataclass -class Function: - func_entry_block: int - num_params: int - param_base: int - - -@dataclass -class PhiNodeEntry: - block_id: int - val_reg: int - - -@dataclass -class SwitchCase: - case_val: int - target_block: int - - -# OpID for correlated noise (must match shader_types.rs OpID::CorrelatedNoise) -CORRELATED_NOISE_OP_ID = 131 - -CallArg: TypeAlias = int -Label: TypeAlias = str -RegisterType: TypeAlias = int - - -@dataclass -class IntOperand: - val: int = 0 - - def __post_init__(self): - # Mask to u32 range so negative Python ints become their - # two's-complement u32 representation (e.g. -7 → 0xFFFFFFF9). - self.val = self.val & 0xFFFFFFFF - - -class FloatOperand: - def __init__(self, val: float = 0.0) -> None: - self.val: int = encode_float_as_bits(val) - - -@dataclass -class Reg: - val: int # index in the registers table - - -def is_immediate(arg) -> bool: - return isinstance(arg, (IntOperand, FloatOperand)) - - -def prepare_immediate_flags( - *, dst=None, src0=None, src1=None, aux0=None, aux1=None, aux2=None, aux3=None -): - flags = 0 - if is_immediate(dst): - flags |= FLAG_DST_IMM - if is_immediate(src0): - flags |= FLAG_SRC0_IMM - if is_immediate(src1): - flags |= FLAG_SRC1_IMM - if is_immediate(aux0): - flags |= FLAG_AUX0_IMM - if is_immediate(aux1): - flags |= FLAG_AUX1_IMM - if is_immediate(aux2): - flags |= FLAG_AUX2_IMM - if is_immediate(aux3): - flags |= FLAG_AUX3_IMM - return flags - - -def unwrap_operands( - dst, src0, src1, aux0, aux1, aux2, aux3 -) -> Tuple[int, int, int, int, int, int, int]: - if not isinstance(dst, int): - dst = dst.val - if not isinstance(src0, int): - src0 = src0.val - if not isinstance(src1, int): - src1 = src1.val - if not isinstance(aux0, int): - aux0 = aux0.val - if not isinstance(aux1, int): - aux1 = aux1.val - if not isinstance(aux2, int): - aux2 = aux2.val - if not isinstance(aux3, int): - aux3 = aux3.val - return (dst, src0, src1, aux0, aux1, aux2, aux3) - - -def encode_float_as_bits(val: float) -> int: - return struct.unpack(" AdaptiveProgram: - """Process module and return the AdaptiveProgram. - - :param mod: The QIR module to process. - :param noise: Optional NoiseConfig. When provided, noise intrinsic calls - are resolved to correlated noise ops using the intrinsics table. - :param noise_intrinsics: Optional dict mapping noise intrinsic callee names - to noise table IDs. Takes precedence over ``noise`` if both are given. - :return: The processed adaptive program. - :rtype: AdaptiveProgram - """ - if mod.get_flag("arrays"): - raise ValueError("QIR arrays are not currently supported.") - - if noise_intrinsics is not None: - self._noise_intrinsics = noise_intrinsics - elif noise is not None: - # Build {name: table_id} mapping from the NoiseConfig intrinsics - intrinsics = noise.intrinsics - self._noise_intrinsics = {} - for callee_name in mod.functions: - name = callee_name.name - if name in intrinsics: - self._noise_intrinsics[name] = intrinsics.get_intrinsic_id(name) - - errors = mod.verify() - if errors is not None: - raise ValueError(f"Module verification failed: {errors}") - - # Pass 1: Assign block IDs and function IDs for all defined functions - for func in mod.functions: - if len(func.basic_blocks) > 0: - self._assign_function(func) - - # Pass 2: Walk instructions and emit encoding - for func in mod.functions: - if len(func.basic_blocks) > 0: - self._walk_function(func) - - entry_func = next(filter(pyqir.is_entry_point, mod.functions)) - num_qubits = pyqir.required_num_qubits(entry_func) - num_results = pyqir.required_num_results(entry_func) - assert isinstance(num_qubits, int) - assert isinstance(num_results, int) - - return AdaptiveProgram( - num_qubits=num_qubits, - num_results=num_results, - num_registers=self._next_reg, - entry_block=self._block_to_id[entry_func.basic_blocks[0]], - blocks=self.blocks, - instructions=self.instructions, - quantum_ops=self.quantum_ops, - functions=self.functions, - phi_entries=self.phi_entries, - switch_cases=self.switch_cases, - call_args=self.call_args, - labels=self.labels, - register_types=self.register_types, - ) - - # ------------------------------------------------------------------ - # Register allocation - # ------------------------------------------------------------------ - - def _alloc_reg(self, value: Any, type_tag: int) -> Reg: - """Allocate a new register for `value` and record its type. - - If `value` was already pre-allocated (e.g. as a forward reference from - a phi node), return the existing register instead of allocating a new - one. - """ - if value is not None and value in self._value_to_reg: - return self._value_to_reg[value] - reg = Reg(self._next_reg) - self._next_reg += 1 - if value is not None: - self._value_to_reg[value] = reg - self.register_types.append(type_tag) - return reg - - # ------------------------------------------------------------------ - # Instruction emission - # ------------------------------------------------------------------ - - def _emit( - self, - opcode: int, - *, - dst: int | IntOperand | FloatOperand | Reg = 0, - src0: int | IntOperand | FloatOperand | Reg = 0, - src1: int | IntOperand | FloatOperand | Reg = 0, - aux0: int | IntOperand | FloatOperand | Reg = 0, - aux1: int | IntOperand | FloatOperand | Reg = 0, - aux2: int | IntOperand | FloatOperand | Reg = 0, - aux3: int | IntOperand | FloatOperand | Reg = 0, - ) -> None: - imm_flags = prepare_immediate_flags( - dst=dst, src0=src0, src1=src1, aux0=aux0, aux1=aux1, aux2=aux2, aux3=aux3 - ) - (dst, src0, src1, aux0, aux1, aux2, aux3) = unwrap_operands( - dst, src0, src1, aux0, aux1, aux2, aux3 - ) - ins = Instruction(opcode | imm_flags, dst, src0, src1, aux0, aux1, aux2, aux3) - self.instructions.append(ins) - - def _emit_quantum_op( - self, - op_id: int, - q1: int = 0, - q2: int = 0, - q3: int = 0, - angle: float = 0.0, - ) -> int: - idx = self._next_qop - self._next_qop += 1 - qop = QuantumOp(op_id, q1, q2, q3, angle) - self.quantum_ops.append(qop) - return idx - - # ------------------------------------------------------------------ - # Operand resolution - # ------------------------------------------------------------------ - - def _resolve_operand(self, value: pyqir.Value) -> IntOperand | FloatOperand | Reg: - """Resolve a pyqir Value to a register index. - - If `value` is an already-assigned SSA register, return its index. - If `value` is an integer constant, allocate a register and emit - ``OP_CONST`` to materialise it. - """ - if value in self._value_to_reg: - return self._value_to_reg[value] - - if isinstance(value, pyqir.IntConstant): - val = value.value - return IntOperand(val) - - if isinstance(value, pyqir.FloatConstant): - val = value.value - return FloatOperand(val) - - # Forward reference (e.g. phi incoming from a later block). - # Pre-allocate a register; the defining instruction will reuse it - # via _alloc_reg's dedup check. - if isinstance(value, pyqir.Instruction): - return self._alloc_reg(value, self._type_tag(value.type)) - - # Constant expressions (e.g. inttoptr (i64 N to ptr)). - if isinstance(value, pyqir.Constant): - # Try extracting as a qubit/result pointer constant. - pid = pyqir.ptr_id(value) - if pid is not None: - return IntOperand(pid) - # Null pointer - if value.is_null: - reg = self._alloc_reg(value, REG_TYPE_PTR) - self._emit(OP_CONST | FLAG_SRC0_IMM, dst=reg.val, src0=0) - return reg - - raise ValueError(f"Cannot resolve operand: {type(value).__name__}") - - def _type_tag(self, ty: Any) -> int: - """Map a pyqir Type to a register type tag.""" - if isinstance(ty, pyqir.IntType): - w = ty.width - if w == 1: - return REG_TYPE_BOOL - if w <= 32: - return REG_TYPE_I32 - return REG_TYPE_I64 - if isinstance(ty, pyqir.PointerType): - return REG_TYPE_PTR - if ty.is_double: - return REG_TYPE_F64 - # Remaining floating-point types (e.g. float/f32) - return REG_TYPE_F32 - - # ------------------------------------------------------------------ - # Binary / unary helpers - # ------------------------------------------------------------------ - - def _emit_binary(self, opcode: int, instr: Any) -> None: - """Emit a binary arithmetic/bitwise instruction.""" - dst = self._alloc_reg(instr, self._type_tag(instr.type)) - src0 = self._resolve_operand(instr.operands[0]) - src1 = self._resolve_operand(instr.operands[1]) - self._emit(opcode, dst=dst, src0=src0, src1=src1) - - def _emit_unary(self, opcode: int, instr: Any) -> None: - """Emit a unary conversion instruction.""" - dst = self._alloc_reg(instr, self._type_tag(instr.type)) - src0 = self._resolve_operand(instr.operands[0]) - self._emit(opcode, dst=dst, src0=src0) - - def _emit_sext(self, instr: Any) -> None: - """Emit OP_SEXT with source bit width in aux0.""" - dst = self._alloc_reg(instr, self._type_tag(instr.type)) - src0 = self._resolve_operand(instr.operands[0]) - src_type = instr.operands[0].type - src_bits = src_type.width if isinstance(src_type, pyqir.IntType) else 32 - self._emit(OP_SEXT, dst=dst, src0=src0, aux0=src_bits) - - # ------------------------------------------------------------------ - # Function assignment (Pass 1) - # ------------------------------------------------------------------ - - def _assign_function(self, func: pyqir.Function) -> None: - """Assign block IDs and function IDs for a function.""" - if not pyqir.is_entry_point(func) and func.name not in self._func_to_id: - func_id = len(self._func_to_id) - self._func_to_id[func.name] = func_id - for block in func.basic_blocks: - self._block_to_id[block] = self._next_block - self._next_block += 1 - - # ------------------------------------------------------------------ - # Function walking (Pass 2) - # ------------------------------------------------------------------ - - def _walk_function(self, func: pyqir.Function) -> None: - """Walk all blocks and instructions in a function, emitting bytecode.""" - self._current_func_is_entry = pyqir.is_entry_point(func) - - # For non-entry functions, register parameters as registers - if not self._current_func_is_entry: - param_base = self._next_reg - for param in func.params: - self._alloc_reg( - param, REG_TYPE_PTR - ) # params are pointers (%Qubit*, %Result*) - # Record function entry in the function table - if func.name in self._func_to_id: - func_entry_block = self._block_to_id[func.basic_blocks[0]] - f = Function(func_entry_block, len(func.params), param_base) - self.functions.append(f) - - for block in func.basic_blocks: - block_id = self._block_to_id[block] - instr_offset = len(self.instructions) - for instr in block.instructions: - self._on_instruction(instr) - # NOTE: block.terminator is already included in block.instructions - # in pyqir, so we do NOT separately process it. - instr_count = len(self.instructions) - instr_offset - blk = Block(block_id, instr_offset, instr_count) - self.blocks.append(blk) - - # ------------------------------------------------------------------ - # Instruction dispatch - # ------------------------------------------------------------------ - - def _on_instruction(self, instr: pyqir.Instruction) -> None: - """Dispatch a single instruction by opcode.""" - match instr.opcode: - case pyqir.Opcode.CALL: - self._emit_call(cast(pyqir.Call, instr)) - case pyqir.Opcode.PHI: - self._emit_phi(cast(pyqir.Phi, instr)) - case pyqir.Opcode.ICMP: - self._emit_icmp(cast(pyqir.ICmp, instr)) - case pyqir.Opcode.FCMP: - self._emit_fcmp(cast(pyqir.FCmp, instr)) - case pyqir.Opcode.SWITCH: - self._emit_switch(cast(pyqir.Switch, instr)) - case pyqir.Opcode.BR: - self._emit_branch(instr) - case pyqir.Opcode.RET: - self._emit_ret(instr) - case pyqir.Opcode.SELECT: - self._emit_select(instr) - case pyqir.Opcode.ADD: - self._emit_binary(OP_ADD, instr) - case pyqir.Opcode.SUB: - self._emit_binary(OP_SUB, instr) - case pyqir.Opcode.MUL: - self._emit_binary(OP_MUL, instr) - case pyqir.Opcode.UDIV: - self._emit_binary(OP_UDIV, instr) - case pyqir.Opcode.SDIV: - self._emit_binary(OP_SDIV, instr) - case pyqir.Opcode.UREM: - self._emit_binary(OP_UREM, instr) - case pyqir.Opcode.SREM: - self._emit_binary(OP_SREM, instr) - case pyqir.Opcode.AND: - self._emit_binary(OP_AND, instr) - case pyqir.Opcode.OR: - self._emit_binary(OP_OR, instr) - case pyqir.Opcode.XOR: - self._emit_binary(OP_XOR, instr) - case pyqir.Opcode.SHL: - self._emit_binary(OP_SHL, instr) - case pyqir.Opcode.LSHR: - self._emit_binary(OP_LSHR, instr) - case pyqir.Opcode.ASHR: - self._emit_binary(OP_ASHR, instr) - case pyqir.Opcode.ZEXT: - self._emit_unary(OP_ZEXT, instr) - case pyqir.Opcode.SEXT: - self._emit_sext(instr) - case pyqir.Opcode.TRUNC: - self._emit_unary(OP_TRUNC, instr) - case pyqir.Opcode.FADD: - self._emit_binary(OP_FADD | FLAG_FLOAT, instr) - case pyqir.Opcode.FSUB: - self._emit_binary(OP_FSUB | FLAG_FLOAT, instr) - case pyqir.Opcode.FMUL: - self._emit_binary(OP_FMUL | FLAG_FLOAT, instr) - case pyqir.Opcode.FDIV: - self._emit_binary(OP_FDIV | FLAG_FLOAT, instr) - case pyqir.Opcode.FP_EXT: - self._emit_unary(OP_FPEXT | FLAG_FLOAT, instr) - case pyqir.Opcode.FP_TRUNC: - self._emit_unary(OP_FPTRUNC | FLAG_FLOAT, instr) - case pyqir.Opcode.FP_TO_SI: - self._emit_unary(OP_FPTOSI, instr) - case pyqir.Opcode.SI_TO_FP: - self._emit_unary(OP_SITOFP | FLAG_FLOAT, instr) - case pyqir.Opcode.INT_TO_PTR: - self._emit_inttoptr(instr) - case _: - raise ValueError(f"Unsupported instruction: {instr.opcode}") - - # ------------------------------------------------------------------ - # Call dispatch - # ------------------------------------------------------------------ - - def _emit_call(self, call: pyqir.Call) -> None: - """Dispatch a CALL instruction based on callee name.""" - callee = call.callee.name - - match callee: - case "__quantum__qis__read_result__body" | "__quantum__rt__read_result": - dst = self._alloc_reg(call, REG_TYPE_BOOL) - result_reg = self._resolve_result_operand(call.args[0]) - self._emit(OP_READ_RESULT, dst=dst, src0=result_reg) - case _ if callee.startswith("__quantum__qis__"): - self._emit_quantum_call(call) - case "__quantum__rt__result_record_output": - result_reg = self._resolve_result_operand(call.args[0]) - label_str = self._extract_label(call.args[1]) - label_idx = len(self.labels) - self.labels.append(label_str) - self._emit(OP_RECORD_OUTPUT, src0=result_reg, aux0=label_idx) - case "__quantum__rt__array_record_output": - # Record structure output — pass through as-is for output formatting - count = ( - call.args[0].value - if isinstance(call.args[0], pyqir.IntConstant) - else 0 - ) - label_str = self._extract_label(call.args[1]) - label_idx = len(self.labels) - self.labels.append(label_str) - self._emit( - OP_RECORD_OUTPUT, src0=count, aux0=label_idx, aux1=1 - ) # aux1=1 -> array - case "__quantum__rt__tuple_record_output": - count = ( - call.args[0].value - if isinstance(call.args[0], pyqir.IntConstant) - else 0 - ) - label_str = self._extract_label(call.args[1]) - label_idx = len(self.labels) - self.labels.append(label_str) - self._emit( - OP_RECORD_OUTPUT, src0=count, aux0=label_idx, aux1=2 - ) # aux1=2 -> tuple - case "__quantum__rt__bool_record_output": - # Bool record output - pass through - src = self._resolve_operand(call.args[0]) - label_str = self._extract_label(call.args[1]) - label_idx = len(self.labels) - self.labels.append(label_str) - self._emit( - OP_RECORD_OUTPUT, src0=src, aux0=label_idx, aux1=3 - ) # aux1=3 -> bool - case "__quantum__rt__int_record_output": - src = self._resolve_operand(call.args[0]) - label_str = self._extract_label(call.args[1]) - label_idx = len(self.labels) - self.labels.append(label_str) - self._emit( - OP_RECORD_OUTPUT, src0=src, aux0=label_idx, aux1=4 - ) # aux1=4 -> int - case ( - "__quantum__rt__initialize" - | "__quantum__rt__begin_parallel" - | "__quantum__rt__end_parallel" - | "__quantum__qis__barrier__body" - | "__quantum__rt__read_loss" - ): - pass # No-op - case _ if callee in self._func_to_id: - self._emit_ir_function_call(call) - case _ if "qdk_noise" in call.callee.attributes.func: - # Check if this is a noise intrinsic (custom gate with qdk_noise attribute) - self._emit_noise_intrinsic_call(call) - case _: - raise ValueError(f"Unsupported call: {callee}") - - # ------------------------------------------------------------------ - # Quantum call dispatch - # ------------------------------------------------------------------ - - def _resolve_qubit_operands( - self, args: List[pyqir.Value] - ) -> Tuple[IntOperand | Reg, IntOperand | Reg, IntOperand | Reg]: - qs: List[IntOperand | Reg] = [IntOperand(), IntOperand(), IntOperand()] - for i, arg in enumerate(args): - qs[i] = self._resolve_qubit_operand(arg) - return (qs[0], qs[1], qs[2]) - - def _resolve_qubit_operand(self, arg: pyqir.Value) -> IntOperand | Reg: - a = self._resolve_operand(arg) - assert isinstance(a, (IntOperand, Reg)) - return a - - def _resolve_result_operand(self, arg: pyqir.Value) -> IntOperand | Reg: - a = self._resolve_operand(arg) - assert isinstance(a, (IntOperand, Reg)) - return a - - def _resolve_angle_operand(self, arg: pyqir.Value) -> FloatOperand | Reg: - a = self._resolve_operand(arg) - assert isinstance(a, (FloatOperand, Reg)) - return a - - def _emit_quantum_call(self, call: pyqir.Call) -> None: - """Emit a quantum gate, measure, or reset from a ``__quantum__qis__*`` call.""" - callee_name = call.callee.name - gate_name = callee_name.replace("__quantum__qis__", "").replace("__body", "") - op_id = GATE_MAP[gate_name] - if gate_name in MEASURE_GATES: - q = self._resolve_qubit_operand(call.args[0]) - r = self._resolve_result_operand(call.args[1]) - qop_idx = self._emit_quantum_op(op_id, q.val, r.val) - self._emit( - OP_MEASURE, - aux0=qop_idx, - aux1=q, - aux2=r, - ) - return - if gate_name in RESET_GATES: - q = self._resolve_qubit_operand(call.args[0]) - qop_idx = self._emit_quantum_op(op_id, q.val) - self._emit( - OP_RESET, - aux0=qop_idx, - aux1=q, - ) - return - if gate_name in ROTATION_GATES: - qubit_arg_offset = 1 - angle = self._resolve_angle_operand(call.args[0]) - else: - qubit_arg_offset = 0 - angle = FloatOperand() - qubit_arg_offset = 1 if gate_name in ROTATION_GATES else 0 - q1, q2, q3 = self._resolve_qubit_operands(call.args[qubit_arg_offset:]) - qop_idx = self._emit_quantum_op(op_id, q1.val, q2.val, q3.val, angle.val) - self._emit( - OP_QUANTUM_GATE, - aux0=qop_idx, - aux1=q1, - aux2=q2, - aux3=q3, - ) - - def _emit_noise_intrinsic_call(self, call: pyqir.Call) -> None: - """Emit a noise intrinsic call. - - When a noise config is provided and the callee is a known intrinsic, - store qubit register indices in ``call_args`` (following the same - pattern as ``_emit_ir_function_call``), then emit a single - ``OP_QUANTUM_GATE`` whose ``aux1`` = qubit count and ``aux2`` = - offset into ``call_args``. The shader reads qubit IDs from - ``call_arg_table`` at runtime, supporting arbitrarily many qubits. - - When no noise config is provided, emit an identity gate (no-op). - """ - callee_name = call.callee.name - if self._noise_intrinsics is not None and callee_name in self._noise_intrinsics: - table_id = self._noise_intrinsics[callee_name] - qubit_count = len(call.args) - # Store qubit register indices in call_args, materializing - # immediates into registers (same pattern as _emit_ir_function_call). - arg_offset = len(self.call_args) - for arg in call.args: - operand = self._resolve_qubit_operand(arg) - if isinstance(operand, Reg): - self.call_args.append(operand.val) - else: - reg = self._alloc_reg(None, REG_TYPE_PTR) - self._emit(OP_MOV | FLAG_SRC0_IMM, dst=reg, src0=operand.val) - self.call_args.append(reg.val) - # QuantumOp stores table_id in q1 and qubit_count in q2. - qop_idx = self._emit_quantum_op( - CORRELATED_NOISE_OP_ID, table_id, qubit_count - ) - self._emit( - OP_QUANTUM_GATE, - aux0=qop_idx, - aux1=IntOperand(qubit_count), - aux2=IntOperand(arg_offset), - ) - elif self._noise_intrinsics is not None: - raise ValueError(f"Missing noise intrinsic: {callee_name}") - else: - # No noise config — no-op - pass - - # ------------------------------------------------------------------ - # Control flow emitters - # ------------------------------------------------------------------ - - def _emit_branch(self, instr: pyqir.Instruction) -> None: - """Emit jump or conditional branch.""" - operands = instr.operands - if len(operands) == 1: - # Unconditional: br label %target - target = self._block_to_id[operands[0]] - self._emit(OP_JUMP, dst=target) - else: - # Conditional: br i1 %cond, label %true, label %false - # pyqir operands: [condition, FALSE_block, TRUE_block] - cond_reg = self._resolve_operand(operands[0]) - false_block = self._block_to_id[operands[1]] - true_block = self._block_to_id[operands[2]] - self._emit(OP_BRANCH, src0=cond_reg, aux0=true_block, aux1=false_block) - - def _emit_phi(self, phi_instr: pyqir.Phi) -> None: - """Emit a PHI node with side table entries.""" - dst_reg = self._alloc_reg(phi_instr, self._type_tag(phi_instr.type)) - phi_offset = len(self.phi_entries) - for value, block in phi_instr.incoming: - operand = self._resolve_operand(value) - if isinstance(operand, Reg): - val_reg = operand.val - else: - # Immediate values must be materialized into a register - # because the GPU phi_table stores register indices. - reg = self._alloc_reg(None, self._type_tag(phi_instr.type)) - self._emit(OP_MOV | FLAG_SRC0_IMM, dst=reg, src0=operand.val) - val_reg = reg.val - block_id = self._block_to_id[block] - phi_entry = PhiNodeEntry(block_id, val_reg) - self.phi_entries.append(phi_entry) - count = len(phi_instr.incoming) - self._emit(OP_PHI, dst=dst_reg, aux0=phi_offset, aux1=count) - - def _emit_select(self, instr: pyqir.Instruction) -> None: - """Emit a SELECT instruction.""" - dst = self._alloc_reg(instr, self._type_tag(instr.type)) - cond = self._resolve_operand(instr.operands[0]) - true_val = self._resolve_operand(instr.operands[1]) - false_val = self._resolve_operand(instr.operands[2]) - self._emit(OP_SELECT, dst=dst, src0=cond, aux0=true_val, aux1=false_val) - - def _emit_switch(self, switch_instr: pyqir.Switch) -> None: - """Emit a SWITCH instruction with case table entries. - - NOTE: We use ``operands`` instead of the ``.cond`` / ``.cases`` - helpers because pyqir's ``Switch.cond`` returns a stale ``Function`` - reference when ``mod.functions`` has already been iterated (two-pass - compilation). ``operands`` is not affected by this behavior. - """ - # operands layout: [cond, default_block, case_val0, case_block0, ...] - ops = switch_instr.operands - cond_reg = self._resolve_operand(ops[0]) - default_block = self._block_to_id[ops[1]] - case_offset = len(self.switch_cases) - num_case_pairs = (len(ops) - 2) // 2 - for i in range(num_case_pairs): - case_val = ops[2 + 2 * i] - case_block = ops[2 + 2 * i + 1] - target_block = self._block_to_id[case_block] - switch_case = SwitchCase(case_val.value, target_block) - self.switch_cases.append(switch_case) - case_count = num_case_pairs - self._emit( - OP_SWITCH, - src0=cond_reg, - aux0=default_block, - aux1=case_offset, - aux2=case_count, - ) - - def _emit_ret(self, instr: Any) -> None: - """Emit RET or CALL_RETURN.""" - if not self._current_func_is_entry: - # Return from IR-defined function - if len(instr.operands) > 0: - ret_reg = self._resolve_operand(instr.operands[0]) - self._emit(OP_CALL_RETURN, src0=ret_reg) - else: - self._emit(OP_CALL_RETURN) - else: - # Return from entry point - if len(instr.operands) > 0: - ret_reg = self._resolve_operand(instr.operands[0]) - self._emit(OP_RET, dst=ret_reg) - else: - # Void return — use immediate 0 as exit code. - self._emit(OP_RET, dst=IntOperand(0)) - - # ------------------------------------------------------------------ - # Comparison emitters - # ------------------------------------------------------------------ - - def _emit_icmp(self, instr: Any) -> None: - """Emit an integer comparison.""" - cond_code = ICMP_MAP.get(instr.predicate, 0) - dst = self._alloc_reg(instr, REG_TYPE_BOOL) - src0 = self._resolve_operand(instr.operands[0]) - src1 = self._resolve_operand(instr.operands[1]) - self._emit(OP_ICMP | (cond_code << 8), dst=dst, src0=src0, src1=src1) - - def _emit_fcmp(self, instr: Any) -> None: - """Emit a float comparison.""" - cond_code = FCMP_MAP.get(instr.predicate, 0) - dst = self._alloc_reg(instr, REG_TYPE_BOOL) - src0 = self._resolve_operand(instr.operands[0]) - src1 = self._resolve_operand(instr.operands[1]) - self._emit( - OP_FCMP | (cond_code << 8) | FLAG_FLOAT, - dst=dst, - src0=src0, - src1=src1, - ) - - # ------------------------------------------------------------------ - # inttoptr handling - # ------------------------------------------------------------------ - - def _emit_inttoptr(self, instr: Any) -> None: - """Handle ``inttoptr`` — just propagate the source register. - - ``inttoptr i64 %v to %Qubit*`` is a no-op cast; the integer value - is the qubit/result ID. We use OP_MOV to alias the value. - """ - src_operand = instr.operands[0] - src_reg = self._resolve_operand(src_operand) - # Register the inttoptr result as pointing to the same register - dst = self._alloc_reg(instr, REG_TYPE_PTR) - self._emit(OP_MOV, dst=dst, src0=src_reg) - - # ------------------------------------------------------------------ - # IR-defined function call/return - # ------------------------------------------------------------------ - - def _emit_ir_function_call(self, call: Any) -> None: - """Emit OP_CALL for an IR-defined function.""" - func_name = call.callee.name - func_id = self._func_to_id[func_name] - arg_offset = len(self.call_args) - for arg in call.args: - operand = self._resolve_operand(arg) - if isinstance(operand, Reg): - self.call_args.append(operand.val) - else: - # Immediate values must be materialized into a register - # because the GPU call_arg_table stores register indices. - reg = self._alloc_reg(None, REG_TYPE_PTR) - self._emit(OP_MOV | FLAG_SRC0_IMM, dst=reg, src0=operand.val) - self.call_args.append(reg.val) - # Allocate return register if function has non-void return type - if call.type.is_void: - return_reg = VOID_RETURN # no return - else: - return_reg = self._alloc_reg(call, REG_TYPE_I32) - self._emit( - OP_CALL, - dst=return_reg, - aux0=func_id, - aux1=len(call.args), - aux2=arg_offset, - ) - - # ------------------------------------------------------------------ - # Helpers - # ------------------------------------------------------------------ - - def _extract_label(self, value: Any) -> str: - """Extract a label string from a call argument.""" - bs = pyqir.extract_byte_string(value) - if bs is not None: - return bs.decode("utf-8") - return "" diff --git a/source/pip/qsharp/_device/__init__.py b/source/pip/qsharp/_device/__init__.py index 59041732f4..1b0b1e35a3 100644 --- a/source/pip/qsharp/_device/__init__.py +++ b/source/pip/qsharp/_device/__init__.py @@ -1,8 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from ._device import Device - -__all__ = [ - "Device", -] +# Deprecated: use qdk._device instead. +from qdk._device import * # noqa: F401,F403 diff --git a/source/pip/qsharp/_device/_atom/__init__.py b/source/pip/qsharp/_device/_atom/__init__.py index bea15a145c..8cd552d4b2 100644 --- a/source/pip/qsharp/_device/_atom/__init__.py +++ b/source/pip/qsharp/_device/_atom/__init__.py @@ -1,299 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from .._device import Device, Zone, ZoneType -from ..._simulation import NoiseConfig, run_qir_clifford, run_qir_cpu, run_qir_gpu -from ..._native import try_create_gpu_adapter -from ..._qsharp import QirInputData -from ... import telemetry_events - -from typing import List, Literal, Optional -import time - - -class NeutralAtomDevice(Device): - """ - Representation of a neutral atom device quantum computer. - """ - - def __init__( - self, - column_count: int = 40, - register_zone_row_count: int = 25, - interaction_zone_row_count: int = 2, - measurement_zone_row_count: int = 2, - ): - default_layout = ( - column_count == 40 - and register_zone_row_count == 25 - and interaction_zone_row_count == 2 - and measurement_zone_row_count == 2 - ) - telemetry_events.on_neutral_atom_init(default_layout) - - super().__init__( - column_count, - [ - Zone("Register 1", register_zone_row_count, ZoneType.REG), - Zone("Interaction Zone", interaction_zone_row_count, ZoneType.INTER), - Zone("Measurement Zone", measurement_zone_row_count, ZoneType.MEAS), - ], - ) - - def _init_home_locs(self): - # Set up the home locations for qubits in the NeutralAtomDevice layout. - assert len(self.zones) == 3 - assert ( - self.zones[0].type == ZoneType.REG - and self.zones[1].type == ZoneType.INTER - and self.zones[2].type == ZoneType.MEAS - ) - rz1_rows = range(self.zones[0].row_count - 1, -1, -1) - self.home_locs = [] - for row in range(self.zones[0].row_count): - for col in range(self.column_count): - self.home_locs.append((rz1_rows[row], col)) - - def compile( - self, - program: str | QirInputData, - verbose: bool = False, - ) -> QirInputData: - """ - Compile a QIR program for the NeutralAtomDevice device. This includes decomposing gates to the native gate set, - optimizing sequences of single qubit gates, pruning unused functions, and reordering instructions to - enable better scheduling during execution. - - :param program: The QIR program to compile, either as a string or as QirInputData. - :param verbose: If true, print detailed information about each compilation step. - :returns QirInputData: The compiled QIR program. - """ - - from ._optimize import ( - OptimizeSingleQubitGates, - PruneUnusedFunctions, - ) - from ._decomp import ( - DecomposeMultiQubitToCZ, - DecomposeSingleRotationToRz, - DecomposeSingleQubitToRzSX, - ReplaceResetWithMResetZ, - ) - from ._reorder import Reorder - from pyqir import Module, Context - - start_time = time.monotonic() - all_start_time = start_time - telemetry_events.on_neutral_atom_compile() - - name = "" - if isinstance(program, QirInputData): - name = program._name - - if verbose: - print(f"Compiling program {name} for NeutralAtomDevice device...") - - module = Module.from_ir(Context(), str(program)) - if verbose: - end_time = time.monotonic() - print(f" Loaded module in {end_time - start_time:.2f} seconds") - start_time = end_time - - OptimizeSingleQubitGates().run(module) - if verbose: - end_time = time.monotonic() - print( - f" Optimized single qubit gates in {end_time - start_time:.2f} seconds" - ) - start_time = end_time - - DecomposeMultiQubitToCZ().run(module) - if verbose: - end_time = time.monotonic() - print( - f" Decomposed multi-qubit gates to CZ in {end_time - start_time:.2f} seconds" - ) - start_time = end_time - - OptimizeSingleQubitGates().run(module) - if verbose: - end_time = time.monotonic() - print( - f" Optimized single qubit gates in {end_time - start_time:.2f} seconds" - ) - start_time = end_time - - DecomposeSingleRotationToRz().run(module) - if verbose: - end_time = time.monotonic() - print( - f" Decomposed single rotations to Rz in {end_time - start_time:.2f} seconds" - ) - start_time = end_time - - OptimizeSingleQubitGates().run(module) - if verbose: - end_time = time.monotonic() - print( - f" Optimized single qubit gates in {end_time - start_time:.2f} seconds" - ) - start_time = end_time - - DecomposeSingleQubitToRzSX().run(module) - if verbose: - end_time = time.monotonic() - print( - f" Decomposed single qubit gates to Rz and SX in {end_time - start_time:.2f} seconds" - ) - start_time = end_time - - OptimizeSingleQubitGates().run(module) - if verbose: - end_time = time.monotonic() - print( - f" Optimized single qubit gates in {end_time - start_time:.2f} seconds" - ) - start_time = end_time - - ReplaceResetWithMResetZ().run(module) - if verbose: - end_time = time.monotonic() - print( - f" Replaced resets with mresetz in {end_time - start_time:.2f} seconds" - ) - start_time = end_time - - PruneUnusedFunctions().run(module) - if verbose: - end_time = time.monotonic() - print(f" Pruned unused functions in {end_time - start_time:.2f} seconds") - start_time = end_time - - Reorder(self).run(module) - if verbose: - end_time = time.monotonic() - print(f" Reordered instructions in {end_time - start_time:.2f} seconds") - start_time = end_time - - end_time = time.monotonic() - telemetry_events.on_neutral_atom_compile_end((end_time - all_start_time) * 1000) - if verbose: - print( - f"Finished compiling program {name} in {end_time - all_start_time:.2f} seconds" - ) - - return QirInputData(name, str(module)) - - def show_trace(self, qir: str | QirInputData): - """ - Visualize the execution trace of a QIR program on the NeutralAtomDevice device using the Atoms widget. - This includes approximate layout and scheduling of the program to show the parallelism of gates and - movement of qubits during execution. - - :param qir: The QIR program to visualize, either as a string or as QirInputData. - """ - - try: - from qsharp_widgets import Atoms - except ImportError: - raise ImportError( - "The qsharp-widgets package is required for showing atom trace visualization. " - "Please install it via 'pip install \"qdk[jupyter]\"' or 'pip install qsharp-widgets'." - ) - from ._trace import Trace - from ._validate import ValidateNoConditionalBranches - from ._scheduler import Schedule - from pyqir import Module, Context - from IPython.display import display - - start_time = time.monotonic() - telemetry_events.on_neutral_atom_trace() - - # Compile and visualize the trace in one step. - compiled = self.compile(qir) - module = Module.from_ir(Context(), str(compiled)) - ValidateNoConditionalBranches().run(module) - Schedule(self).run(module) - tracer = Trace(self) - tracer.run(module) - display(Atoms(machine_layout=self.get_layout(), trace_data=tracer.trace)) - - end_time = time.monotonic() - telemetry_events.on_neutral_atom_trace_end((end_time - start_time) * 1000) - - def simulate( - self, - qir: str | QirInputData, - shots=1, - noise: NoiseConfig | None = None, - type: Optional[Literal["clifford", "cpu", "gpu"]] = None, - seed: Optional[int] = None, - ) -> List: - """ - Simulate a QIR program on the NeutralAtomDevice device. This includes approximate layout and scheduling of the program - to model the parallelism of gates and movement of qubits during execution. The simulation can optionally - include noise based on a provided noise configuration. - - :param qir: The QIR program to simulate, either as a string or as QirInputData. - :param shots: The number of shots to simulate. Defaults to 1. - :param noise: An optional NoiseConfig to include noise in the simulation. - :param type: The type of simulator to use: - Use `"clifford"` if your QIR only contains Clifford gates and measurements. - Use `"gpu"` if you have a GPU available in your system. - Use `"cpu"` as a fallback option if you don't have a GPU in your system. - If `None` (default), the GPU simulator will be tried first, falling back to - CPU if a suitable GPU device could not be located. - :param seed: An optional random seed for reproducibility. - :return: The results of each shot of the simulation as a list. - """ - - from ._validate import ValidateNoConditionalBranches - from ._scheduler import Schedule - from ._decomp import DecomposeRzAnglesToCliffordGates - from pyqir import Module, Context - - start_time = time.monotonic() - - using_noise = noise is not None - if noise is None: - noise = NoiseConfig() - - compiled = self.compile(qir) - module = Module.from_ir(Context(), str(compiled)) - ValidateNoConditionalBranches().run(module) - Schedule(self).run(module) - - if type is None: - try: - try_create_gpu_adapter() - type = "gpu" - except OSError: - telemetry_events.on_neutral_atom_cpu_fallback() - type = "cpu" - - telemetry_events.on_neutral_atom_simulate(shots, using_noise, type) - - match type: - case "clifford": - DecomposeRzAnglesToCliffordGates().run(module) - result = run_qir_clifford( - str(module), - shots, - noise, - seed, - ) - case "cpu": - result = run_qir_cpu(str(module), shots, noise, seed) - case "gpu": - result = run_qir_gpu(str(module), shots, noise, seed) - case _: - raise ValueError(f"Simulation type {type} is not supported") - - end_time = time.monotonic() - telemetry_events.on_neutral_atom_simulate_end( - (end_time - start_time) * 1000, shots, using_noise, type - ) - return result - - -__all__ = ["NeutralAtomDevice"] +# Deprecated: use qdk._device._atom instead. +from qdk._device._atom import * # noqa: F401,F403 diff --git a/source/pip/qsharp/_device/_atom/_decomp.py b/source/pip/qsharp/_device/_atom/_decomp.py deleted file mode 100644 index f51d878119..0000000000 --- a/source/pip/qsharp/_device/_atom/_decomp.py +++ /dev/null @@ -1,510 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from pyqir import ( - FloatConstant, - const, - Function, - FunctionType, - PointerType, - Type, - result, - Context, - Linkage, - QirModuleVisitor, - required_num_results, -) -from math import pi -from ._utils import TOLERANCE - - -class DecomposeMultiQubitToCZ(QirModuleVisitor): - """ - Decomposes all multi-qubit gates to CZ gates and single qubit gates. - """ - - h_func: Function - s_func: Function - sadj_func: Function - t_func: Function - tadj_func: Function - rz_func: Function - cz_func: Function - - def _on_module(self, module): - void = Type.void(module.context) - qubit_ty = PointerType(Type.void(module.context)) - self.double_ty = Type.double(module.context) - # Find or create all the needed functions. - for func in module.functions: - match func.name: - case "__quantum__qis__h__body": - self.h_func = func - case "__quantum__qis__s__body": - self.s_func = func - case "__quantum__qis__s__adj": - self.sadj_func = func - case "__quantum__qis__t__body": - self.t_func = func - case "__quantum__qis__t__adj": - self.tadj_func = func - case "__quantum__qis__rz__body": - self.rz_func = func - case "__quantum__qis__cz__body": - self.cz_func = func - if not hasattr(self, "h_func"): - self.h_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__h__body", - module, - ) - if not hasattr(self, "s_func"): - self.s_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__s__body", - module, - ) - if not hasattr(self, "sadj_func"): - self.sadj_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__s__adj", - module, - ) - if not hasattr(self, "t_func"): - self.t_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__t__body", - module, - ) - if not hasattr(self, "tadj_func"): - self.tadj_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__t__adj", - module, - ) - if not hasattr(self, "rz_func"): - self.rz_func = Function( - FunctionType(void, [self.double_ty, qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__rz__body", - module, - ) - if not hasattr(self, "cz_func"): - self.cz_func = Function( - FunctionType(void, [qubit_ty, qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__cz__body", - module, - ) - super()._on_module(module) - - def _on_qis_ccx(self, call, ctrl1, ctrl2, target): - self.builder.insert_before(call) - self.builder.call(self.h_func, [target]) - self.builder.call(self.tadj_func, [ctrl1]) - self.builder.call(self.tadj_func, [ctrl2]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.cz_func, [target, ctrl1]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.t_func, [ctrl1]) - self.builder.call(self.h_func, [target]) - self.builder.call(self.cz_func, [ctrl2, target]) - self.builder.call(self.h_func, [target]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.cz_func, [ctrl2, ctrl1]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.t_func, [target]) - self.builder.call(self.tadj_func, [ctrl1]) - self.builder.call(self.h_func, [target]) - self.builder.call(self.cz_func, [ctrl2, target]) - self.builder.call(self.h_func, [target]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.cz_func, [target, ctrl1]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.tadj_func, [target]) - self.builder.call(self.t_func, [ctrl1]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.cz_func, [ctrl2, ctrl1]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.h_func, [target]) - call.erase() - - def _on_qis_cx(self, call, ctrl, target): - self.builder.insert_before(call) - self.builder.call(self.h_func, [target]) - self.builder.call(self.cz_func, [ctrl, target]) - self.builder.call(self.h_func, [target]) - call.erase() - - def _on_qis_cy(self, call, ctrl, target): - self.builder.insert_before(call) - self.builder.call(self.sadj_func, [target]) - self.builder.call(self.h_func, [target]) - self.builder.call(self.cz_func, [ctrl, target]) - self.builder.call(self.h_func, [target]) - self.builder.call(self.s_func, [target]) - call.erase() - - def _on_qis_rxx(self, call, angle, target1, target2): - self.builder.insert_before(call) - self.builder.call(self.h_func, [target2]) - self.builder.call(self.cz_func, [target2, target1]) - self.builder.call(self.h_func, [target1]) - self.builder.call(self.rz_func, [angle, target1]) - self.builder.call(self.h_func, [target1]) - self.builder.call(self.cz_func, [target2, target1]) - self.builder.call(self.h_func, [target2]) - call.erase() - - def _on_qis_ryy(self, call, angle, target1, target2): - self.builder.insert_before(call) - self.builder.call(self.sadj_func, [target1]) - self.builder.call(self.sadj_func, [target2]) - self.builder.call(self.h_func, [target2]) - self.builder.call(self.cz_func, [target2, target1]) - self.builder.call(self.h_func, [target1]) - self.builder.call(self.rz_func, [angle, target1]) - self.builder.call(self.h_func, [target1]) - self.builder.call(self.cz_func, [target2, target1]) - self.builder.call(self.h_func, [target2]) - self.builder.call(self.s_func, [target2]) - self.builder.call(self.s_func, [target1]) - call.erase() - - def _on_qis_rzz(self, call, angle, target1, target2): - self.builder.insert_before(call) - self.builder.call(self.h_func, [target1]) - self.builder.call(self.cz_func, [target2, target1]) - self.builder.call(self.h_func, [target1]) - self.builder.call(self.rz_func, [angle, target1]) - self.builder.call(self.h_func, [target1]) - self.builder.call(self.cz_func, [target2, target1]) - self.builder.call(self.h_func, [target1]) - call.erase() - - def _on_qis_swap(self, call, target1, target2): - self.builder.insert_before(call) - self.builder.call(self.h_func, [target2]) - self.builder.call(self.cz_func, [target1, target2]) - self.builder.call(self.h_func, [target2]) - self.builder.call(self.h_func, [target1]) - self.builder.call(self.cz_func, [target2, target1]) - self.builder.call(self.h_func, [target1]) - self.builder.call(self.h_func, [target2]) - self.builder.call(self.cz_func, [target1, target2]) - self.builder.call(self.h_func, [target2]) - call.erase() - - -class DecomposeSingleRotationToRz(QirModuleVisitor): - """ - Decomposes all single qubit rotations to Rz gates. - """ - - h_func: Function - s_func: Function - sadj_func: Function - rz_func: Function - - def _on_module(self, module): - void = Type.void(module.context) - qubit_ty = PointerType(Type.void(module.context)) - self.double_ty = Type.double(module.context) - # Find or create all the needed functions. - for func in module.functions: - match func.name: - case "__quantum__qis__h__body": - self.h_func = func - case "__quantum__qis__s__body": - self.s_func = func - case "__quantum__qis__s__adj": - self.sadj_func = func - case "__quantum__qis__rz__body": - self.rz_func = func - if not hasattr(self, "h_func"): - self.h_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__h__body", - module, - ) - if not hasattr(self, "s_func"): - self.s_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__s__body", - module, - ) - if not hasattr(self, "sadj_func"): - self.sadj_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__s__adj", - module, - ) - if not hasattr(self, "rz_func"): - self.rz_func = Function( - FunctionType(void, [self.double_ty, qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__rz__body", - module, - ) - super()._on_module(module) - - def _on_qis_rx(self, call, angle, target): - self.builder.insert_before(call) - self.builder.call(self.h_func, [target]) - self.builder.call( - self.rz_func, - [angle, target], - ) - self.builder.call(self.h_func, [target]) - call.erase() - - def _on_qis_ry(self, call, angle, target): - self.builder.insert_before(call) - self.builder.call(self.sadj_func, [target]) - self.builder.call(self.h_func, [target]) - self.builder.call( - self.rz_func, - [angle, target], - ) - self.builder.call(self.h_func, [target]) - self.builder.call(self.s_func, [target]) - call.erase() - - -class DecomposeSingleQubitToRzSX(QirModuleVisitor): - """ - Decomposes all single qubit gates to Rz and Sx gates. - """ - - sx_func: Function - rz_func: Function - - def _on_module(self, module): - void = Type.void(module.context) - qubit_ty = PointerType(Type.void(module.context)) - self.double_ty = Type.double(module.context) - # Find or create all the needed functions. - for func in module.functions: - match func.name: - case "__quantum__qis__sx__body": - self.sx_func = func - case "__quantum__qis__rz__body": - self.rz_func = func - if not hasattr(self, "sx_func"): - self.sx_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__sx__body", - module, - ) - if not hasattr(self, "rz_func"): - self.rz_func = Function( - FunctionType(void, [self.double_ty, qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__rz__body", - module, - ) - super()._on_module(module) - - def _on_qis_h(self, call, target): - self.builder.insert_before(call) - self.builder.call( - self.rz_func, - [const(self.double_ty, pi / 2), target], - ) - self.builder.call(self.sx_func, [target]) - self.builder.call( - self.rz_func, - [const(self.double_ty, pi / 2), target], - ) - call.erase() - - def _on_qis_s(self, call, target): - self.builder.insert_before(call) - self.builder.call( - self.rz_func, - [const(self.double_ty, pi / 2), target], - ) - call.erase() - - def _on_qis_s_adj(self, call, target): - self.builder.insert_before(call) - self.builder.call( - self.rz_func, - [const(self.double_ty, -pi / 2), target], - ) - call.erase() - - def _on_qis_t(self, call, target): - self.builder.insert_before(call) - self.builder.call( - self.rz_func, - [const(self.double_ty, pi / 4), target], - ) - call.erase() - - def _on_qis_t_adj(self, call, target): - self.builder.insert_before(call) - self.builder.call( - self.rz_func, - [const(self.double_ty, -pi / 4), target], - ) - call.erase() - - def _on_qis_x(self, call, target): - self.builder.insert_before(call) - self.builder.call(self.sx_func, [target]) - self.builder.call(self.sx_func, [target]) - call.erase() - - def _on_qis_y(self, call, target): - self.builder.insert_before(call) - self.builder.call(self.sx_func, [target]) - self.builder.call(self.sx_func, [target]) - self.builder.call( - self.rz_func, - [const(self.double_ty, pi), target], - ) - call.erase() - - def _on_qis_z(self, call, target): - self.builder.insert_before(call) - self.builder.call( - self.rz_func, - [const(self.double_ty, pi), target], - ) - call.erase() - - -class DecomposeRzAnglesToCliffordGates(QirModuleVisitor): - """ - Ensure that the module only contains Clifford gates instead of rotation angles. - """ - - THREE_PI_OVER_2 = 3 * pi / 2 - PI_OVER_2 = pi / 2 - TWO_PI = 2 * pi - - z_func: Function - s_func: Function - sadj_func: Function - - def _on_module(self, module): - void = Type.void(module.context) - qubit_ty = PointerType(Type.void(module.context)) - self.double_ty = Type.double(module.context) - # Find or create all the needed functions. - for func in module.functions: - match func.name: - case "__quantum__qis__s__body": - self.s_func = func - case "__quantum__qis__s__adj": - self.sadj_func = func - case "__quantum__qis__z__body": - self.z_func = func - - if not hasattr(self, "s_func"): - self.s_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__s__body", - module, - ) - if not hasattr(self, "sadj_func"): - self.sadj_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__s__adj", - module, - ) - if not hasattr(self, "z_func"): - self.z_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__z__body", - module, - ) - - super()._on_module(module) - - def _on_qis_rz(self, call, angle, target): - if not isinstance(angle, FloatConstant): - raise ValueError("Angle used in RZ must be a constant") - angle = angle.value - - self.builder.insert_before(call) - - if ( - abs(angle - self.THREE_PI_OVER_2) < TOLERANCE - or abs(angle + self.PI_OVER_2) < TOLERANCE - ): - self.builder.call(self.sadj_func, [target]) - elif abs(angle - pi) < TOLERANCE or abs(angle + pi) < TOLERANCE: - self.builder.call(self.z_func, [target]) - elif ( - abs(angle - self.PI_OVER_2) < TOLERANCE - or abs(angle + self.THREE_PI_OVER_2) < TOLERANCE - ): - self.builder.call(self.s_func, [target]) - elif ( - angle < TOLERANCE - or abs(angle - self.TWO_PI) < TOLERANCE - or abs(angle + self.TWO_PI) < TOLERANCE - ): - # I, drop it - pass - else: - raise ValueError( - f"Angle {angle} used in RZ is not a Clifford compatible rotation angle" - ) - - call.erase() - - -class ReplaceResetWithMResetZ(QirModuleVisitor): - """ - Replaces all reset operations with a call to mresetz using a new, ignored result identifier. - """ - - context: Context - mresetz_func: Function - next_result_id: int - - def _on_module(self, module): - self.context = module.context - void = Type.void(self.context) - qubit_ty = PointerType(Type.void(self.context)) - result_ty = PointerType(Type.void(self.context)) - # Find or create the intrinsic mresetz function - for func in module.functions: - match func.name: - case "__quantum__qis__mresetz__body": - self.mresetz_func = func - if not hasattr(self, "mresetz_func"): - self.mresetz_func = Function( - FunctionType(void, [qubit_ty, result_ty]), - Linkage.EXTERNAL, - "__quantum__qis__mresetz__body", - module, - ) - super()._on_module(module) - - def _on_function(self, function): - self.next_result_id = required_num_results(function) or 0 - super()._on_function(function) - - def _on_qis_reset(self, call, target): - self.builder.insert_before(call) - # Create a new result identifier to ignore the measurement result - result_id = result(self.context, self.next_result_id) - self.next_result_id += 1 - self.builder.call(self.mresetz_func, [target, result_id]) - call.erase() diff --git a/source/pip/qsharp/_device/_atom/_optimize.py b/source/pip/qsharp/_device/_atom/_optimize.py deleted file mode 100644 index 090a2fa16b..0000000000 --- a/source/pip/qsharp/_device/_atom/_optimize.py +++ /dev/null @@ -1,315 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from pyqir import ( - Type, - Function, - FunctionType, - FloatConstant, - Linkage, - PointerType, - const, - ptr_id, - is_entry_point, - QirModuleVisitor, -) -from math import pi - -from ._utils import TOLERANCE - - -class OptimizeSingleQubitGates(QirModuleVisitor): - """ - Optimizes single qubit gates by looking for sequences of a gate and its adjoint on a given qubit. - Will also try to replace certain patterns with simpler gates. - """ - - sx_func: Function - mresetz_func: Function - - def _on_module(self, module): - void = Type.void(module.context) - qubit_ty = PointerType(Type.void(module.context)) - result_ty = qubit_ty - self.double_ty = Type.double(module.context) - self.used_qubits = set() - # Find or create the intrinsic gate functions - for func in module.functions: - match func.name: - case "__quantum__qis__mresetz__body": - self.mresetz_func = func - case "__quantum__qis__sx__body": - self.sx_func = func - if not hasattr(self, "sx_fun"): - self.sx_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__sx__body", - module, - ) - if not hasattr(self, "mresetz_func"): - self.mresetz_func = Function( - FunctionType(void, [qubit_ty, result_ty]), - Linkage.EXTERNAL, - "__quantum__qis__mresetz__body", - module, - ) - super()._on_module(module) - - def _drop_ops(self, qubits): - # Since instructions are only removed when they are canceled out by their adjoint or folded with another - # instruction, we can just pop the entries for these qubits so they start fresh with the next gates. - for qubit in qubits: - q = ptr_id(qubit) - self.qubit_ops.pop(q, None) - self.last_meas.pop(q, None) - self.used_qubits.add(q) - - def _schedule_gate(self, instr, key, name, adj): - if key in self.qubit_ops: - # There are previous operations on this qubit, so check if the last one was the adjoint of this one. - if self.qubit_ops[key][-1][1] == adj: - (other_instr, _) = self.qubit_ops[key].pop() - # Erase the adjoint instruction and the current instruction since they cancel out. - other_instr.erase() - instr.erase() - elif ( - len(self.qubit_ops[key]) > 1 - and name == "h" - and self.qubit_ops[key][-1][1] == "s" - and self.qubit_ops[key][-2][1] == "h" - ): - # We have a sequence of h s h, which can be replaced with a single sx. - self.builder.insert_before(instr) - self.builder.call(self.sx_func, [instr.args[0]]) - instr.erase() - (other_instr, _) = self.qubit_ops[key].pop() - other_instr.erase() - (other_instr, _) = self.qubit_ops[key].pop() - other_instr.erase() - else: - # The last operation was not the adjoint of this one, so add this instruction to the list. - self.qubit_ops[key].append((instr, name)) - self.used_qubits.add(key) - self.last_meas.pop(key, None) - - if len(self.qubit_ops[key]) == 0: - # There are no more operations on this qubit, so pop it's entry to avoid having empty lists in the dict. - self.qubit_ops.pop(key) - - else: - # No previous operations on this qubit, so create a new list from this instruction. - self.qubit_ops[key] = [(instr, name)] - self.used_qubits.add(key) - self.last_meas.pop(key, None) - - def _schedule_rotation(self, instr, key, name): - if isinstance(instr.args[0], FloatConstant): - # The angle is constant, so we can try to fold this rotation with other instances of the same rotation - # tht are constant. - if key in self.qubit_ops: - if self.qubit_ops[key][-1][1] == name and isinstance( - self.qubit_ops[key][-1][0].args[0], FloatConstant - ): - # The last operation on this qubit was also a rotation of the same type by a constant angle. - (other_instr, _) = self.qubit_ops[key].pop() - new_angle = instr.args[0].value + other_instr.args[0].value - sign = -1 if new_angle < 0 else 1 - abs_new_angle = abs(new_angle) - # Normalize the angle to be within 0 to 2*pi - while abs_new_angle > 2 * pi: - abs_new_angle -= 2 * pi - new_angle = sign * abs_new_angle - if ( - abs(new_angle) > TOLERANCE - and abs(abs(new_angle) - (2 * pi)) > TOLERANCE - ): - # Create a new rotation instruction with the sum of the angles, - # and insert it, but only if the angle is above our threshold. - self.builder.insert_before(instr) - new_instr = self.builder.call( - instr.callee, - [const(self.double_ty, new_angle), instr.args[1]], - ) - self.qubit_ops[key].append((new_instr, name)) - self.used_qubits.add(key) - self.last_meas.pop(key, None) - # Erase the old instructions the new rotation replaces. - other_instr.erase() - instr.erase() - else: - # Can't fold this rotation with the previous one, so just add it to the list. - self.qubit_ops[key].append((instr, name)) - self.used_qubits.add(key) - self.last_meas.pop(key, None) - - if len(self.qubit_ops[key]) == 0: - # There are no more operations on this qubit, so pop it's entry to avoid having empty lists in the dict. - self.qubit_ops.pop(key) - - else: - # No previous operations on this qubit, so create a new list from this instruction. - self.qubit_ops[key] = [(instr, name)] - self.used_qubits.add(key) - self.last_meas.pop(key, None) - else: - # This angle is not constant, so append it to the list of operations on this qubit. - if key in self.qubit_ops: - self.qubit_ops[key].append((instr, name)) - else: - self.qubit_ops[key] = [(instr, name)] - self.used_qubits.add(key) - self.last_meas.pop(key, None) - - def _on_function(self, function): - self.last_meas = {} - self.qubit_ops = {} - super()._on_function(function) - # At the end of a function, if there are any remaining entries in self.last_meas, it means - # that there were measurements on qubits that were never reset. Convert those into mresetz. - for key, (instr, target, result) in self.last_meas.items(): - self.builder.insert_before(instr) - self.builder.call( - self.mresetz_func, - [target, result], - ) - instr.erase() - for key in self.qubit_ops: - if self.qubit_ops[key][-1][1] == "reset": - # The last operation on this qubit was a reset, so we can drop it. - (instr, _) = self.qubit_ops[key].pop() - instr.erase() - - def _on_block(self, block): - # Each block is independent, so start from an empty list of operations per qubit. - self.qubit_ops = {} - self.last_meas = {} - super()._on_block(block) - - def _on_call_instr(self, call): - if call.callee.name == "__quantum__qis__sx__body": - self._drop_ops([call.args[0]]) - elif call.callee.name == "__quantum__qis__move__body": - self._drop_ops([call.args[0]]) - elif call.callee.name == "__quantum__qis__barrier__body": - # Don't optimize across barrier calls. Treat this as a drop of all tracked gates, - # which effectively flushes all scheduled operations. - self.qubit_ops = {} - self.last_meas = {} - else: - super()._on_call_instr(call) - - def _on_qis_h(self, call, target): - self._schedule_gate(call, ptr_id(target), "h", "h") - - def _on_qis_s(self, call, target): - self._schedule_gate(call, ptr_id(target), "s", "s_adj") - - def _on_qis_s_adj(self, call, target): - self._schedule_gate(call, ptr_id(target), "s_adj", "s") - - def _on_qis_t(self, call, target): - self._schedule_gate(call, ptr_id(target), "t", "t_adj") - - def _on_qis_t_adj(self, call, target): - self._schedule_gate(call, ptr_id(target), "t_adj", "t") - - def _on_qis_x(self, call, target): - self._schedule_gate(call, ptr_id(target), "x", "x") - - def _on_qis_y(self, call, target): - self._schedule_gate(call, ptr_id(target), "y", "y") - - def _on_qis_z(self, call, target): - self._schedule_gate(call, ptr_id(target), "z", "z") - - def _on_qis_rx(self, call, angle, target): - self._schedule_rotation(call, ptr_id(target), "rx") - - def _on_qis_rxx(self, call, angle, target1, target2): - self._drop_ops([target1, target2]) - - def _on_qis_ry(self, call, angle, target): - self._schedule_rotation(call, ptr_id(target), "ry") - - def _on_qis_ryy(self, call, angle, target1, target2): - self._drop_ops([target1, target2]) - - def _on_qis_rz(self, call, angle, target): - self._schedule_rotation(call, ptr_id(target), "rz") - - def _on_qis_rzz(self, call, angle, target1, target2): - self._drop_ops([target1, target2]) - - def _on_qis_ccx(self, call, ctrl1, ctrl2, target): - self._drop_ops([ctrl1, ctrl2, target]) - - def _on_qis_cx(self, call, target1, target2): - self._drop_ops([target1, target2]) - - def _on_qis_cy(self, call, target1, target2): - self._drop_ops([target1, target2]) - - def _on_qis_cz(self, call, target1, target2): - self._drop_ops([target1, target2]) - - def _on_qis_swap(self, call, target1, target2): - self._drop_ops([target1, target2]) - - def _on_qis_m(self, call, target, result): - self._drop_ops([target]) - self.last_meas[ptr_id(target)] = (call, target, result) - - def _on_qis_mz(self, call, target, result): - self._on_qis_m(call, target, result) - - def _on_qis_mresetz(self, call, target, result): - self._on_qis_m(call, target, result) - - def _on_qis_reset(self, call, target): - id = ptr_id(target) - if id in self.last_meas: - # Since the last operation on this qubit was a measurement, - # we can combine that measurement with the reset. - (instr, target, result) = self.last_meas.pop(id) - instr.erase() - self.builder.insert_before(call) - new_call = self.builder.call( - self.mresetz_func, - [target, result], - ) - call.erase() - self.last_meas[ptr_id(target)] = (new_call, target, result) - elif not id in self.used_qubits: - # This qubit was never used, so we can just erase the reset instruction. - call.erase() - elif id in self.qubit_ops and self.qubit_ops[id][-1][1] == "reset": - # The last operation on this qubit was also a reset, so we drop the current, - # extra one. - call.erase() - else: - self._drop_ops([target]) - self._schedule_gate(call, id, "reset", "") - - -class PruneUnusedFunctions(QirModuleVisitor): - def _on_module(self, module): - # Assume every non-entry point function is unused. - self.funcs_to_drop = [f for f in module.functions if not is_entry_point(f)] - super()._on_module(module) - # Delete all unused functions. - for func in self.funcs_to_drop: - func.delete() - - def _on_call_instr(self, call): - # Remove calls to initialization and barrier functions, since they aren't handled - # by most of the stack. - if call.callee.name == "__quantum__rt__initialize": - call.erase() - elif call.callee.name == "__quantum__qis__barrier__body": - call.erase() - elif call.callee in self.funcs_to_drop: - # This function is used in a call, so remove it from the list of - # functions to drop. - assert isinstance(call.callee, Function) - self.funcs_to_drop.remove(call.callee) diff --git a/source/pip/qsharp/_device/_atom/_reorder.py b/source/pip/qsharp/_device/_atom/_reorder.py deleted file mode 100644 index 3efed6a4f0..0000000000 --- a/source/pip/qsharp/_device/_atom/_reorder.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from ._utils import as_qis_gate, get_used_values, uses_any_value -from .._device import Device -from pyqir import ( - Call, - Instruction, - Function, - QirModuleVisitor, -) - - -def is_output_recording(instr: Instruction): - if isinstance(instr, Call): - return instr.callee.name.endswith("_record_output") - return False - - -def is_irreversible(instr: Instruction): - if isinstance(instr, Call) and isinstance(instr.callee, Function): - return "irreversible" in instr.callee.attributes.func - return False - - -class Reorder(QirModuleVisitor): - """ - Reorder instructions within a block to find contiguous sequences of the same gate on - different qubits. This enables both layout and scheduling at a later stage. - """ - - def __init__(self, device: Device): - super().__init__() - self.device = device - - def instr_key(self, instr: Instruction): - gate = as_qis_gate(instr) - if gate != {}: - qubits = sorted(map(self.device.get_ordering, gate["qubit_args"])) - return qubits[0] - return 0 - - def _on_block(self, block): - # The instructions are collected into an ordered list of steps, where each step - # contains instructions of the same type that do not depend on each other. - steps = [] - - # A list of all values or resultsused in the current step. This is used to determine if an instruction - # can be added to the current step or if it needs to go into a new step by checking dependencies. - values_used_in_step = [] - results_used_in_step = [] - - # Output recording instructions and terminator must be treated separately, as those - # must be at the end of the block. - output_recording = [] - terminator = block.terminator - if terminator: - terminator.remove() - - for instr in block.instructions: - # Remove the instruction from the block, which keeps it alive in the module - # and available for later insertion. - instr.remove() - if is_output_recording(instr): - # Gather output recording instructions to be placed at the end of the block just before - # the terminator. - output_recording.append(instr) - else: - # Find the last step that contains instructions that the current instruction - # depends on. We want to insert the current instruction on the earliest possible - # step without violating dependencies. - last_dependent_step_idx = len(steps) - 1 - (used_values, used_results) = get_used_values(instr) - while last_dependent_step_idx >= 0: - if uses_any_value( - used_values, values_used_in_step[last_dependent_step_idx] - ) or uses_any_value( - used_results, results_used_in_step[last_dependent_step_idx] - ): - break - last_dependent_step_idx -= 1 - - if isinstance(instr, Call): - while ( - last_dependent_step_idx < len(steps) - 1 - and isinstance(steps[last_dependent_step_idx + 1][0], Call) - and instr.callee != steps[last_dependent_step_idx + 1][0].callee - ): - last_dependent_step_idx += 1 - - if last_dependent_step_idx == len(steps) - 1: - # The current instruction depends on the last step, so add it to a new step at the end. - steps.append([instr]) - values_used_in_step.append(set(used_values)) - results_used_in_step.append(set(used_results)) - else: - # The last dependent step is before the end, so add the current instruction to the - # step after it. - steps[last_dependent_step_idx + 1].append(instr) - values_used_in_step[last_dependent_step_idx + 1].update(used_values) - results_used_in_step[last_dependent_step_idx + 1].update( - used_results - ) - - # Insert the instructions back into the block in the correct order. - self.builder.insert_at_end(block) - for step in steps: - for instr in sorted(step, key=self.instr_key): - self.builder.instr(instr) - # Add output recording instructions and terminator at the end of the block. - for instr in output_recording: - self.builder.instr(instr) - if terminator: - self.builder.instr(terminator) diff --git a/source/pip/qsharp/_device/_atom/_scheduler.py b/source/pip/qsharp/_device/_atom/_scheduler.py deleted file mode 100644 index 0ca1285e51..0000000000 --- a/source/pip/qsharp/_device/_atom/_scheduler.py +++ /dev/null @@ -1,938 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from ._utils import as_qis_gate, get_used_values, uses_any_value -from pyqir import ( - Call, - Instruction, - Function, - QirModuleVisitor, - FunctionType, - PointerType, - Type, - Linkage, - ptr_id, - IntType, - Value, -) -from .._device import Device, Zone, ZoneType -from collections import defaultdict -from dataclasses import dataclass -from itertools import chain -from typing import Iterable, TypeAlias, Optional -from fractions import Fraction -from functools import lru_cache - -QubitId: TypeAlias = Value -Location: TypeAlias = tuple[int, int] -MoveGroupScaleFactor: TypeAlias = tuple[bool | Fraction, bool | Fraction] -MOVE_GROUPS_PER_PARALLEL_SECTION = 1 - - -@dataclass -class Move: - __slots__ = ("qubit_id_ptr", "src_loc", "dst_loc") - - qubit_id_ptr: Value - src_loc: Location - dst_loc: Location - - def __hash__(self): - return hash(self.qubit_id_ptr) - - def __str__(self): - return f"Move Qubit({self.qubit_id}): {self.src_loc} -> {self.dst_loc}" - - def __repr__(self): - return self.__str__() - - @property - def qubit_id(self) -> int: - q_id = ptr_id(self.qubit_id_ptr) - assert q_id is not None, "Qubit id should be known" - return q_id - - def parity(self): - return move_parity(self.src_loc, self.dst_loc) - - def direction(self): - return move_direction(self.src_loc, self.dst_loc) - - -@dataclass -class PartialMove: - """A move missing its destination location.""" - - __slots__ = ("qubit_id_ptr", "src_loc") - - qubit_id_ptr: Value - src_loc: Location - - @property - def qubit_id(self) -> int: - q_id = ptr_id(self.qubit_id_ptr) - assert q_id is not None, "Qubit id should be known" - return q_id - - def into_move(self, dst_loc: Location) -> Move: - return Move(self.qubit_id_ptr, self.src_loc, dst_loc) - - -PartialMovePair: TypeAlias = tuple[PartialMove, PartialMove] - - -def move_parity(source: Location, destination: Location) -> tuple[int, int]: - """Returns a tuple representing the parities of the source and destination columns of a move.""" - return (source[1] % 2, destination[1] % 2) - - -def move_direction(source: Location, destination: Location) -> tuple[int, int]: - """Returns a tuple representing if the move is up or down, and left or right.""" - return (int(source[0] < destination[0]), int(source[1] < destination[1])) - - -def index_from_parity_and_direction(ud: int, lr: int) -> int: - return 2 * ud + lr - - -def is_invalid_move_pair(move1: Move, move2: Move) -> bool: - """ - Returns true if the two moves are incompatible, i.e., if they have the same - source row then they must have the same destination row, and if they have the - same source column then they must have the same destination column. - """ - - source_row_diff = move1.src_loc[0] - move2.src_loc[0] - destination_row_diff = move1.dst_loc[0] - move2.dst_loc[0] - source_col_diff = move1.src_loc[1] - move2.src_loc[1] - destination_col_diff = move1.dst_loc[1] - move2.dst_loc[1] - - return ( - (source_row_diff == 0 and destination_row_diff != 0) - or (source_row_diff != 0 and destination_row_diff == 0) - or (source_col_diff == 0 and destination_col_diff != 0) - or (source_col_diff != 0 and destination_col_diff == 0) - ) - - -@lru_cache(maxsize=1 << 14) -def scale_factor_helper(source_diff, destination_diff): - if destination_diff == 0: - return True - if (s := Fraction(source_diff, destination_diff)) >= 0: - return s - - -def scale_factor(move1: Move, move2: Move) -> Optional[MoveGroupScaleFactor]: - """ - Returns a tuple of two elements, representing the row displacement ratio and column - displacement ratio between the moves. - """ - - if is_invalid_move_pair(move1, move2): - return None - - source_row_diff = move1.src_loc[0] - move2.src_loc[0] - destination_row_diff = move1.dst_loc[0] - move2.dst_loc[0] - source_col_diff = move1.src_loc[1] - move2.src_loc[1] - destination_col_diff = move1.dst_loc[1] - move2.dst_loc[1] - row_scale_factor = scale_factor_helper(source_row_diff, destination_row_diff) - col_scale_factor = scale_factor_helper(source_col_diff, destination_col_diff) - - if row_scale_factor is not None and col_scale_factor is not None: - return row_scale_factor, col_scale_factor - - -class MoveGroup: - """ - Represents a group of moves that can be done at the same time. - - ``moves`` is the set of moves that can be performed in parallel. - ``scale_factor`` is a tuple of fractions representing the scale factors in the - row and col axes between moves, or ``None`` if there is a single element in the set. - ``ref_move`` is a move used as a representative of the group to test compatibility - of other moves. - """ - - __slots__ = ("moves", "scale_factor", "ref_move") - - def __init__(self, moves: Iterable[Move]): - self.moves = set(moves) - self.scale_factor = scale_factor(*moves) if len(self.moves) > 1 else None - self.ref_move = next(iter(moves)) - - def __len__(self) -> int: - return len(self.moves) - - def add(self, move: Move): - """ - Adds a move to this move group. - - :param move: The move to add. - """ - - # A move group with a single move doesn't have an associated scale factor. - # Therefore, we cannot test if a move is compatible with it, which means - # we cannot add moves to it. - assert ( - self.scale_factor - ), "cannot add to move group candidate with a single move" - self.moves.add(move) - - def remove(self, move: Move): - self.moves.remove(move) - - def discard(self, move: Move): - self.moves.discard(move) - - -class MoveGroupPool: - """A data structure that takes individual moves as input and organizes them - into groups of moves that can be executed in parallel. - - ``moves`` contains all moves in the pool. ``move_group_candidates`` is a dict - organizing the move-group candidates by scale factor. ``parity`` is the parity - of source and destination columns of all moves in the pool. ``direction`` is the - up/down and left/right direction of all moves in the pool. - """ - - def __init__(self): - """Initializes a move-group pool for moves of the given ``parity`` and ``direction``. - - :param parity: The parity of source and destination columns of all the moves in this pool. - :param direction: The up/down and left/right direction of all the moves in this pool. - """ - self.moves: Optional[list[Move]] = [] - self.move_group_candidates: dict[MoveGroupScaleFactor, list[MoveGroup]] = ( - defaultdict(list) - ) - self.single_moves: set[Move] | list[Move] = set() - - def move_group_candidates_iter(self) -> Iterable[MoveGroup]: - return chain(*self.move_group_candidates.values()) - - def is_empty(self) -> bool: - """Returns `True` if there are no moves left, `False` otherwise.""" - return ( - not any(s.moves for s in self.move_group_candidates_iter()) - and not self.single_moves - ) - - def largest_move_group_candidate(self) -> Optional[MoveGroup]: - try: - return max(self.move_group_candidates_iter(), key=len) - except ValueError: - return None - - def add(self, move: Move): - """Adds a move to the move-group pool. - - :param move: The move to add. It must be of the same parity and direction as - the rest of the moves in this pool. - """ - assert self.moves is not None - - move_added = False - - # Add the move to all the groups it is compatible with - for group_scale_factor, groups in self.move_group_candidates.items(): - for group in groups: - if scale_factor(move, group.ref_move) == group_scale_factor: - group.add(move) - move_added = True - - # Build a table organizing the moves by scale factor with respect to `move`. - moves_by_scale: dict[MoveGroupScaleFactor, list[Move]] = defaultdict(list) - for move2 in self.moves: - s = scale_factor(move, move2) - if s is None: - continue - moves_by_scale[s].append(move2) - - # Try to create new candidates having the new move as the ref_move. - for s, moves in moves_by_scale.items(): - candidates_with_same_scale_factor = self.move_group_candidates[s] - for move2 in moves: - for group in candidates_with_same_scale_factor: - if move2 in group.moves: - # This pair already belongs to an existing move group candidate, - # so we don't need to create a new one. - break - else: - # Create a new move group candidate. - new_candidate = MoveGroup((move, move2)) - - # Add previous moves to the new candidate. - new_candidate.moves.update(moves_by_scale[s]) - - candidates_with_same_scale_factor.append(new_candidate) - move_added = True - - # This case triggers if `move` is not compatible with any move in `self.moves`. - if not move_added: - assert isinstance(self.single_moves, set) - self.single_moves.add(move) - - self.moves.append(move) - - def try_take(self, number_of_moves: int) -> list[Move]: - """Take up to ``number_of_moves`` from the largest move group candidate. - - :param number_of_moves: The number of moves to take from this pool. - """ - # Once we start taking moves from the MoveGroupPool, we don't need to add - # new moves. So we set `self.moves` to `None` as a safety measure. - if self.moves is not None: - self.moves = None - - if largest_move_group_candidate := self.largest_move_group_candidate(): - # Ensure moves are sorted by qubit ID to have a deterministic order. - moves = sorted( - largest_move_group_candidate.moves, key=lambda m: m.qubit_id - )[:number_of_moves] - moves_set = set(moves) - # Remove the taken moves from all candidates. - for group in self.move_group_candidates_iter(): - group.moves -= moves_set - assert isinstance(self.single_moves, set) - self.single_moves -= moves_set - return moves - else: - if isinstance(self.single_moves, set): - self.single_moves = sorted( - self.single_moves, key=lambda m: m.qubit_id, reverse=True - ) - if m := self.single_moves.pop(): - return [m] - else: - return [] - - def take_largest_candidate(self) -> list[Move]: - """Take all the moves from the largest move group candidate.""" - # Once we start taking moves from the MoveGroupPool, we don't need to add - # new moves. So we set `self.moves` to `None` as a safety measure. - if self.moves is not None: - self.moves = None - - if largest_move_group_candidate := self.largest_move_group_candidate(): - # Ensure moves are sorted by qubit ID to have a deterministic order. - moves = sorted(largest_move_group_candidate.moves, key=lambda m: m.qubit_id) - moves_set = largest_move_group_candidate.moves - # Remove the taken moves from all candidates. - for group in self.move_group_candidates_iter(): - if group is not largest_move_group_candidate: - group.moves -= moves_set - assert isinstance(self.single_moves, set) - self.single_moves -= moves_set - moves_set.clear() - return moves - else: - if isinstance(self.single_moves, set): - self.single_moves = sorted( - self.single_moves, key=lambda m: m.qubit_id, reverse=True - ) - if m := self.single_moves.pop(): - return [m] - else: - return [] - - -class MoveScheduler: - """ - Takes a device, a target zone, and a list of qubits to move to that - target zone and builds an iterator that returns groups of moves - that can be executed in parallel. - - ``device`` contains information about the device. ``zone`` is the target zone. - ``available_dst_locations`` holds the available destinations in the zone. - ``partial_moves`` are moves not yet assigned a destination. ``disjoint_pools`` - is a list containing one pool of move-groups for each parity and direction. - """ - - def __init__( - self, - device: Device, - zone: Zone, - qubits_to_move: list[QubitId | tuple[QubitId, QubitId]], - ): - """Initializes the move scheduler from a device, a target zone, - and a list of qubits to move to that target zone. - - :param device: An object containing information about the device. - :param zone: The zone the moves will be scheduled to. - :param qubits_to_move: A list of qubits to move. - """ - self.device = device - self.zone = zone - self.available_dst_locations = self.build_zone_locations(zone) - self.move_group_pool = MoveGroupPool() - - # Step through the partial moves and push them to the largest - # candidate they are compatible with. - partial_moves = self.qubits_to_partial_moves(qubits_to_move) - for partial_move in partial_moves: - if isinstance(partial_move, PartialMove): - self.add_to_largest_compatible_move_group(partial_move) - else: - self.add_pair_to_largest_compatible_move_group(partial_move) - - def build_zone_locations(self, zone: Zone) -> dict[Location, None]: - zone_row_offset = zone.offset // self.device.column_count - # We use a dict with None values instead of a set to preserve order. - return { - (row, col): None - for row in range( - zone_row_offset, - zone_row_offset + zone.row_count, - ) - for col in range(self.device.column_count) - } - - def qubits_to_partial_moves( - self, qubits_to_move: list[QubitId | tuple[QubitId, QubitId]] - ) -> list[PartialMove | PartialMovePair]: - partial_moves = [] - for elt in qubits_to_move: - if isinstance(elt, tuple): - q_id1 = ptr_id(elt[0]) - q_id2 = ptr_id(elt[1]) - assert q_id1 is not None - assert q_id2 is not None - mov1 = PartialMove(elt[0], self.device.get_home_loc(q_id1)) - mov2 = PartialMove(elt[1], self.device.get_home_loc(q_id2)) - partial_moves.append((mov1, mov2)) - else: - q_id = ptr_id(elt) - assert q_id is not None - mov = PartialMove(elt, self.device.get_home_loc(q_id)) - partial_moves.append(mov) - - def sort_key(partial_move: PartialMove | PartialMovePair): - if isinstance(partial_move, PartialMove): - return self.device.get_ordering(partial_move.qubit_id) - else: - return self.device.get_ordering(partial_move[0].qubit_id) - - return sorted(partial_moves, key=sort_key) - - def is_empty(self): - """ - Returns `True` if all moves were scheduled. - That is, there are no partial moves and all disjoint pools are empty. - """ - return self.move_group_pool.is_empty() - - def largest_move_group_pool(self) -> MoveGroupPool: - return self.move_group_pool - - def add_to_largest_compatible_move_group( - self, partial_move: PartialMove - ) -> MoveGroupPool: - zone_row_offset = self.zone.offset // self.device.column_count - - # Heuristic: Prefer moves that are straight up or down. - for row in range(zone_row_offset, zone_row_offset + self.zone.row_count): - dst_loc = (row, partial_move.src_loc[1]) - if dst_loc in self.available_dst_locations: - move = partial_move.into_move(dst_loc) - pool = self.move_group_pool - pool.add(move) - del self.available_dst_locations[move.dst_loc] - return pool - - if move := self.get_compatible_move(self.move_group_pool, partial_move): - self.move_group_pool.add(move) - del self.available_dst_locations[move.dst_loc] - return self.move_group_pool - - raise Exception("not enough IZ space to schedule all moves") - - def add_pair_to_largest_compatible_move_group( - self, partial_move_pair: PartialMovePair - ) -> MoveGroupPool: - zone_row_offset = self.zone.offset // self.device.column_count - partial_move = partial_move_pair[0] - - # Heuristic: Prefer moves that are straight up or down. - if partial_move.src_loc[1] % 2 == 0: - for row in range(zone_row_offset, zone_row_offset + self.zone.row_count): - dst_loc1 = (row, partial_move.src_loc[1]) - dst_loc2 = (row, partial_move.src_loc[1] + 1) - if ( - dst_loc1 in self.available_dst_locations - and dst_loc2 in self.available_dst_locations - ): - move1 = partial_move.into_move(dst_loc1) - move2 = partial_move_pair[1].into_move(dst_loc2) - pool1 = self.move_group_pool - pool2 = self.move_group_pool - pool1.add(move1) - pool2.add(move2) - del self.available_dst_locations[dst_loc1] - del self.available_dst_locations[dst_loc2] - return pool1 - - if move1 := self.get_compatible_move( - self.move_group_pool, partial_move, is_pair=True - ): - # Push the move corresponding to the first qubit of the CZ pair. - self.move_group_pool.add(move1) - - # Build the move corresponding to the second qubit of the CZ pair. - dest2 = (move1.dst_loc[0], move1.dst_loc[1] + 1) - move2 = partial_move_pair[1].into_move(dest2) - self.move_group_pool.add(move2) - del self.available_dst_locations[move1.dst_loc] - del self.available_dst_locations[move2.dst_loc] - return self.move_group_pool - raise Exception("not enough IZ space to schedule all moves") - - def get_destination( - self, - partial_move: PartialMove, - scale_factor: MoveGroupScaleFactor, - group: MoveGroup, - ) -> Optional[Location]: - """ - Returns an available destination location that would make `partial_move` - fit in the given group, or `None` if no such location exists. - """ - row_scale_factor, col_scale_factor = scale_factor - - if row_scale_factor is True: - dst_row = group.ref_move.dst_loc[0] - else: - # We compute the destination row by solving this equation for `dst_row`: - # src_row_diff / (group.ref_move.dst_loc[0] - dst_row) == row_scale_factor - src_row_diff = group.ref_move.src_loc[0] - partial_move.src_loc[0] - dst_row = group.ref_move.dst_loc[0] - src_row_diff / row_scale_factor - assert isinstance(dst_row, Fraction) - if dst_row.denominator == 1: - dst_row = dst_row.numerator - else: - return None - - if col_scale_factor is True: - dst_col = group.ref_move.dst_loc[1] - else: - # We compute the destination col by solving this equation for `dst_col`: - # src_col_diff / (group.ref_move.dst_loc[1] - dst_col) == col_scale_factor - src_col_diff = group.ref_move.src_loc[1] - partial_move.src_loc[1] - dst_col = group.ref_move.dst_loc[1] - src_col_diff / col_scale_factor - assert isinstance(dst_col, Fraction) - if dst_col.denominator == 1: - dst_col = dst_col.numerator - else: - return None - - loc = (dst_row, dst_col) - if loc in self.available_dst_locations: - return loc - - def get_compatible_move( - self, - pool: MoveGroupPool, - partial_move: PartialMove, - is_pair=False, - ) -> Optional[Move]: - # First, try finding a large enough group to place the partial move in. - if self.zone.type != ZoneType.MEAS: - GROUP_SIZE_THRESHOLD = self.device.column_count // 4 - best_destination: Optional[Location] = None - best_destination_group_len = 0 - for scale, groups in pool.move_group_candidates.items(): - for group in sorted(groups, key=len, reverse=True): - if ( - len(group) < GROUP_SIZE_THRESHOLD - or len(group) < best_destination_group_len - ): - break - if destination := self.get_destination(partial_move, scale, group): - if (not is_pair) or destination[1] % 2 == 0: - best_destination = destination - best_destination_group_len = len(group) - break - if best_destination: - return partial_move.into_move(best_destination) - - # If we didn't find a group to place the partial_move in, - # just pick the next available IZ location. - for destination in self.available_dst_locations: - if (not is_pair) or destination[1] % 2 == 0: - return partial_move.into_move(destination) - - def __iter__(self): - return self - - def __next__(self) -> list[Move]: - # If there are no moves left to schedule, stop the iteration. - if self.is_empty(): - raise StopIteration - - # Try_get from the largest candidate. - return self.largest_move_group_pool().take_largest_candidate() - - -class Schedule(QirModuleVisitor): - """ - Schedule instructions within a block, adding appropriate moves to the interaction zone to perform operations - """ - - begin_func: Function - end_func: Function - move_funcs: list[Function] - - def __init__(self, device: Device): - super().__init__() - self.device = device - self.num_qubits = len(self.device.home_locs) - self.pending_moves: list[list[Move]] = [] - - def _on_module(self, module): - i64_ty = IntType(module.context, 64) - # Find or create the necessary runtime functions. - for func in module.functions: - if func.name == "__quantum__rt__begin_parallel": - self.begin_func = func - elif func.name == "__quantum__rt__end_parallel": - self.end_func = func - if not hasattr(self, "begin_func"): - self.begin_func = Function( - FunctionType( - Type.void(module.context), - [], - ), - Linkage.EXTERNAL, - "__quantum__rt__begin_parallel", - module, - ) - if not hasattr(self, "end_func"): - self.end_func = Function( - FunctionType( - Type.void(module.context), - [], - ), - Linkage.EXTERNAL, - "__quantum__rt__end_parallel", - module, - ) - self.move_func = Function( - FunctionType( - Type.void(module.context), - [PointerType(Type.void(module.context)), i64_ty, i64_ty], - ), - Linkage.EXTERNAL, - "__quantum__qis__move__body", - module, - ) - - super()._on_module(module) - - def _on_block(self, block): - # Use only the first interaction and measurement zone; more could be supported in future. - interaction_zone = self.device.get_interaction_zones()[0] - measurement_zone = self.device.get_measurement_zones()[0] - max_iz_pairs = (self.device.column_count // 2) * interaction_zone.row_count - max_measurements = self.device.column_count * measurement_zone.row_count - - # Track pending/queued single qubit operations by qubit id. - self.single_qubit_ops = [[] for _ in range(self.num_qubits)] - - # Track pending CZ operations. - self.curr_cz_ops = [] - - # Track pending measurements. - self.measurements = [] - - # Track pending qubits to move to an interaction or measurement zone. - self.pending_qubits_to_move: list[QubitId | tuple[QubitId, QubitId]] = [] - - # Track values used in CZ ops and measurements to avoid putting operations on the - # same qubit in the same batch. - self.vals_used_in_cz_ops = set() - self.vals_used_in_measurements = set() - - instructions = [instr for instr in block.instructions] - for instr in instructions: - gate = as_qis_gate(instr) - if ( - gate != {} - and len(gate["qubit_args"]) == 1 - and len(gate["result_args"]) == 0 - ): - # This is a single qubit gate; queue it up for later execution when this qubit is needed for CZ or measurement. - - # If this qubit is involved in pending moves, that implies a CZ or measurement is pending, so flush now. - if any( - ( - gate["qubit_args"][0] == ptr_id(q) - if isinstance(q, QubitId) - else ( - gate["qubit_args"][0] == ptr_id(q[0]) - or gate["qubit_args"][0] == ptr_id(q[1]) - ) - ) - for q in self.pending_qubits_to_move - ): - self.flush_pending(instr) - - # Remove the instruction from the block and queue by the qubit id. - instr.remove() - self.single_qubit_ops[gate["qubit_args"][0]].append((instr, gate)) - - elif gate != {} and len(gate["qubit_args"]) == 2: - # This is a CZ gate; queue it up to be executed in the next available interaction zone row. - - # Pick next available interaction zone pair for these qubits. If none, flush the current set and start a fresh set. - # Create move instructions to move qubits to interaction zone and save them in pending moves for later insertion. - assert isinstance(instr, Call) - (vals_used, _) = get_used_values(instr) - if ( - self.measurements - or uses_any_value(vals_used, self.vals_used_in_cz_ops) - or len(self.curr_cz_ops) >= max_iz_pairs - ): - self.flush_pending(instr) - instr.remove() - self.curr_cz_ops.append(instr) - self.vals_used_in_cz_ops.update(vals_used) - - # Prefer using matching relative column ordering to home locations to reduce move crossings. - if ( - self.device.get_home_loc(gate["qubit_args"][0])[1] - > self.device.get_home_loc(gate["qubit_args"][1])[1] - ): - self.pending_qubits_to_move.append((instr.args[1], instr.args[0])) - else: - self.pending_qubits_to_move.append((instr.args[0], instr.args[1])) - - elif gate != {} and len(gate["result_args"]) == 1: - # This is a measurement; queue it up to be executed in the measurement zone. - - # Pick next available measurement zone location for this qubit. If none, flush the current set and start a fresh set. - # Create move instructions to move qubit to measurement zone and save them in pending moves for later insertion. - assert isinstance(instr, Call) - (vals_used, _) = get_used_values(instr) - if ( - not self.measurements - or len(self.measurements) >= max_measurements - or uses_any_value(vals_used, self.vals_used_in_measurements) - ): - self.flush_pending(instr) - if len(self.single_qubit_ops[gate["qubit_args"][0]]) > 0: - # There are still pending single qubits ops for the qubit we want to measure, - # so trigger another flush. - # We need to cache and restore the measurements and pending moves that have already - # been queued so that this flush affects the single qubit ops but not the measurements. - temp_meas = self.measurements - self.measurements = [] - temp_moves = self.pending_qubits_to_move - self.pending_qubits_to_move = [] - self.flush_pending(instr) - self.measurements = temp_meas - self.pending_qubits_to_move = temp_moves - - # Remove the measurement from the block and queue it. - instr.remove() - self.measurements.append((instr, gate)) - self.vals_used_in_measurements.update(vals_used) - self.pending_qubits_to_move.append(instr.args[0]) - else: - # This is not a gate or measurement; flush any pending operations and leave the instruction in place. - # This uses a while loop to ensure all pending operations are flushed before the instruction. - while self.any_pending_ops(): - self.flush_pending(instr) - - def any_pending_single_qubit_ops(self): - return any(ops for ops in self.single_qubit_ops) - - def any_pending_czs(self): - return bool(self.curr_cz_ops) - - def any_pending_measurements(self): - return bool(self.measurements) - - def any_pending_ops(self): - return ( - self.any_pending_czs() - or self.any_pending_single_qubit_ops() - or self.any_pending_measurements() - ) - - def flush_pending(self, insert_before: Instruction): - interaction_zone = self.device.get_interaction_zones()[0] - self.builder.insert_before(insert_before) - # If cz ops pending, insert accumulated moves, single qubits ops matching cz rows, then the cz ops, then move back. - if self.curr_cz_ops: - self.schedule_pending_moves(interaction_zone) - self.insert_moves() - qubits_by_row = self.target_qubits_by_row(interaction_zone) - for qubits_in_row in qubits_by_row: - self.flush_single_qubit_ops(qubits_in_row) - self.builder.call(self.begin_func, []) - for cz_op in self.curr_cz_ops: - self.builder.instr(cz_op) - self.builder.call(self.end_func, []) - self.curr_cz_ops = [] - self.insert_moves_back() - self.vals_used_in_cz_ops = set() - return - # If measurements pending, insert accumulated moves, then measurements, then move back. - elif len(self.measurements) > 0: - self.schedule_pending_moves(self.device.get_measurement_zones()[0]) - self.insert_moves() - self.builder.call(self.begin_func, []) - for meas_op, meas_gate in self.measurements: - self.builder.instr(meas_op) - self.builder.call(self.end_func, []) - self.measurements = [] - self.vals_used_in_measurements = set() - self.insert_moves_back() - return - # Else, create movements for remaining single qubit ops to the first interaction zone, - # insert those moves, then the ops, then move back. - else: - while self.any_pending_single_qubit_ops(): - target_qubits_by_row = [[] for _ in range(interaction_zone.row_count)] - curr_row = 0 - for q in range(self.num_qubits): - if len(self.single_qubit_ops[q]) > 0: - target_qubits_by_row[curr_row].append(q) - if ( - len(target_qubits_by_row[curr_row]) - >= self.device.column_count - ): - curr_row += 1 - if curr_row >= interaction_zone.row_count: - break - for target_qubits in target_qubits_by_row: - for q in target_qubits: - qubit = self.single_qubit_ops[q][0][0].args[0] - if self.single_qubit_ops[q][0][1]["gate"] == "rz": - qubit = self.single_qubit_ops[q][0][0].args[1] - self.pending_qubits_to_move.append(qubit) - self.schedule_pending_moves(interaction_zone) - self.insert_moves() - qubits_by_row = self.target_qubits_by_row(interaction_zone) - for qubits_in_row in qubits_by_row: - self.flush_single_qubit_ops(qubits_in_row) - self.insert_moves_back() - return - - def target_qubits_by_row(self, zone: Zone) -> list[list[int]]: - zone_row_offset = zone.offset // self.device.column_count - qubits_by_row: list[list[int]] = [[] for _ in range(zone.row_count)] - for group in self.pending_moves: - for move in group: - row_idx = move.dst_loc[0] - zone_row_offset - qubits_by_row[row_idx].append(move.qubit_id) - # Organize qubits in each row by qubit_id, so that parallel sections - # of single-qubit ops in the generated QIR are easier to read. - for row in qubits_by_row: - row.sort() - return qubits_by_row - - def schedule_pending_moves(self, zone: Zone): - move_scheduler = MoveScheduler(self.device, zone, self.pending_qubits_to_move) - for move_group in move_scheduler: - self.pending_moves.append(move_group) - # self.verify_that_all_moves_were_scheduled() - self.pending_qubits_to_move = [] - - def verify_that_all_moves_were_scheduled(self): - moves_to_schedule = sum( - len(x) if isinstance(x, tuple) else 1 for x in self.pending_qubits_to_move - ) - scheduled_moves = sum(len(group) for group in self.pending_moves) - assert ( - moves_to_schedule == scheduled_moves - ), f"{moves_to_schedule} != {scheduled_moves}" - - def insert_moves(self): - """ - For each pending move, insert a call to the move function that moves the - given qubit to the given (row, col) location. - """ - move_group_id = 0 - for move_group in self.pending_moves: - # We can execute `MOVE_GROUPS_PER_PARALLEL_SECTION`, if - # this is the first one, start a parallel section. - if move_group_id == 0: - self.builder.call(self.begin_func, []) - - # Insert all the moves in a group using the same move function. - for move in move_group: - self.builder.call(self.move_func, (move.qubit_id_ptr, *move.dst_loc)) - - # There `MOVE_GROUPS_PER_PARALLEL_SECTION` move groups, - # so we increment the id modulo `MOVE_GROUPS_PER_PARALLEL_SECTION`. - move_group_id = (move_group_id + 1) % MOVE_GROUPS_PER_PARALLEL_SECTION - - # We can execute `MOVE_GROUPS_PER_PARALLEL_SECTION`, if - # this is the last one, end the parallel section. - if move_group_id == 0: - self.builder.call(self.end_func, []) - - # End the parallel section if it hasn't been ended. - if move_group_id != 0: - self.builder.call(self.end_func, []) - - def insert_moves_back(self): - move_group_id = 0 - for move_group in self.pending_moves: - # We can execute `MOVE_GROUPS_PER_PARALLEL_SECTION`, if - # this is the first one, start a parallel section. - if move_group_id == 0: - self.builder.call(self.begin_func, []) - - # Insert all the moves in a group using the same move function. - for move in move_group: - self.builder.call(self.move_func, (move.qubit_id_ptr, *move.src_loc)) - - # There `MOVE_GROUPS_PER_PARALLEL_SECTION` move groups, - # so we increment the id modulo `MOVE_GROUPS_PER_PARALLEL_SECTION`. - move_group_id = (move_group_id + 1) % MOVE_GROUPS_PER_PARALLEL_SECTION - - # We can execute `MOVE_GROUPS_PER_PARALLEL_SECTION`, if - # this is the last one, end the parallel section. - if move_group_id == 0: - self.builder.call(self.end_func, []) - - # End the parallel section if it hasn't been ended. - if move_group_id != 0: - self.builder.call(self.end_func, []) - - # Clear pending moves. - self.pending_moves = [] - - def flush_single_qubit_ops(self, target_qubits): - # Flush all pending single qubit ops for the given target qubits, combining - # consecutive ops of the same type into a single parallel region by row in - # the interaction zone. - ops_to_flush = [] - for q in target_qubits: - ops_to_flush.append(list(reversed(self.single_qubit_ops[q]))) - self.single_qubit_ops[q] = [] - while any(len(q_ops) > 0 for q_ops in ops_to_flush): - rz_ops = [] - for q_ops in ops_to_flush: - if len(q_ops) == 0: - continue - if q_ops[-1][1]["gate"] == "rz": - rz_ops.append(q_ops.pop()[0]) - if len(rz_ops) > 0: - self.builder.call(self.begin_func, []) - for rz_op in rz_ops: - self.builder.instr(rz_op) - self.builder.call(self.end_func, []) - sx_ops = [] - for q_ops in ops_to_flush: - if len(q_ops) == 0: - continue - if q_ops[-1][1]["gate"] == "sx": - sx_ops.append(q_ops.pop()[0]) - if len(sx_ops) > 0: - self.builder.call(self.begin_func, []) - for sx_op in sx_ops: - self.builder.instr(sx_op) - self.builder.call(self.end_func, []) diff --git a/source/pip/qsharp/_device/_atom/_trace.py b/source/pip/qsharp/_device/_atom/_trace.py deleted file mode 100644 index 3308b52548..0000000000 --- a/source/pip/qsharp/_device/_atom/_trace.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from pyqir import QirModuleVisitor, ptr_id, required_num_qubits -from .._device import Device - - -class Trace(QirModuleVisitor): - - def __init__( - self, - device: Device, - ): - self.in_parallel = False - self.trace = { - "qubits": device.home_locs, - "steps": [], - } - self.q_cols = {} - super().__init__() - - def _next_step(self): - self.trace["steps"].append({"id": len(self.trace["steps"]), "ops": []}) - - def _on_function(self, function): - num_qubits = required_num_qubits(function) - if num_qubits: - self.trace["qubits"] = self.trace["qubits"][:num_qubits] - super()._on_function(function) - - def _on_call_instr(self, call): - if call.callee.name == "__quantum__rt__begin_parallel": - self._next_step() - self.in_parallel = True - elif call.callee.name == "__quantum__rt__end_parallel": - self.in_parallel = False - elif call.callee.name == "__quantum__qis__move__body": - self._on_qis_move(call, call.args[0], call.args[1], call.args[2]) - elif call.callee.name == "__quantum__qis__sx__body": - self._on_qis_sx(call, call.args[0]) - else: - super()._on_call_instr(call) - - def _on_qis_move(self, call, qubit, row, col): - if not self.in_parallel: - self._next_step() - q = ptr_id(qubit) - self.q_cols[q] = col.value - self.trace["steps"][-1]["ops"].append(f"move({row.value}, {col.value}) {q}") - - def _on_qis_sx(self, call, qubit): - if not self.in_parallel: - self._next_step() - q = ptr_id(qubit) - self.trace["steps"][-1]["ops"].append(f"sx {q}") - - def _on_qis_rz(self, call, angle, qubit): - if not self.in_parallel: - self._next_step() - q = ptr_id(qubit) - self.trace["steps"][-1]["ops"].append(f"rz({angle.value}) {q}") - - def _on_qis_cz(self, call, qubit1, qubit2): - if not self.in_parallel: - self._next_step() - q1 = ptr_id(qubit1) - q2 = ptr_id(qubit2) - if self.q_cols.get(q1, -1) > self.q_cols.get(q2, -1): - q1, q2 = q2, q1 - self.trace["steps"][-1]["ops"].append(f"cz {q1}, {q2}") - - def _on_qis_mresetz(self, call, target, result): - if not self.in_parallel: - self._next_step() - q = ptr_id(target) - self.trace["steps"][-1]["ops"].append(f"mz {q}") diff --git a/source/pip/qsharp/_device/_atom/_utils.py b/source/pip/qsharp/_device/_atom/_utils.py deleted file mode 100644 index 17d1eb3248..0000000000 --- a/source/pip/qsharp/_device/_atom/_utils.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from pyqir import ( - Instruction, - Call, - Constant, - PointerType, - Value, - ptr_id, -) -from typing import Dict - -TOLERANCE: float = 1.1920929e-7 # Machine epsilon for 32-bit IEEE FP numbers. - -# QIS gates that consume a measurement result; the value is the 0-based index -# of the result argument. All other pointer-typed arguments of a QIS call are -# qubit arguments. -_RESULT_ARG_INDEX: Dict[str, int] = { - "__quantum__qis__m__body": 1, - "__quantum__qis__mz__body": 1, - "__quantum__qis__mresetz__body": 1, - "__quantum__qis__read_result__body": 0, -} - - -# If this is a call to a __qis__ gate, return a dict describing the gate and its arguments. -def as_qis_gate(instr: Instruction) -> Dict: - if isinstance(instr, Call) and instr.callee.name.startswith("__quantum__qis__"): - parts = instr.callee.name.split("__") - result_idx = _RESULT_ARG_INDEX.get(instr.callee.name) - qubit_args = [] - result_args = [] - other_args = [] - for i, arg in enumerate(instr.args): - if isinstance(arg.type, PointerType): - pid = ptr_id(arg) - if pid is None: - other_args.append(arg) - elif result_idx is not None and i == result_idx: - result_args.append(pid) - else: - qubit_args.append(pid) - else: - other_args.append(arg) - return { - "gate": parts[3] + ("_adj" if parts[4] == "adj" else ""), - "qubit_args": qubit_args, - "result_args": result_args, - "other_args": other_args, - } - return {} - - -# Returns all values and, separately, all measurement results used by the instruction. -def get_used_values(instr: Instruction) -> tuple[list[Value], list[Value]]: - vals = [] - meas_results = [] - if isinstance(instr, Call): - vals = instr.args - if ( - instr.callee.name == "__quantum__qis__mresetz__body" - or instr.callee.name == "__quantum__qis__m__body" - or instr.callee.name == "__quantum__qis__mz__body" - ): - # Measurement uses a result as the second argument - meas_results += vals[1:] - vals = vals[:1] - elif ( - instr.callee.name == "__quantum__qis__read_result__body" - or instr.callee.name == "__quantum__rt__read_result" - or instr.callee.name == "__quantum__rt__read_atom_result" - ): - # Read result uses a result as the first argument - meas_results += vals - vals = [] - else: - vals = instr.operands - vals.append(instr) - return (vals, meas_results) - - -# Returns true if any of the used values are in the existing values. -# Useful for determining if an instruction depends on any instructions in a set. -def uses_any_value(used_values, existing_values) -> bool: - return any( - [ - val in existing_values - for val in used_values - if not isinstance(val, Constant) or isinstance(val.type, PointerType) - ] - ) diff --git a/source/pip/qsharp/_device/_atom/_validate.py b/source/pip/qsharp/_device/_atom/_validate.py deleted file mode 100644 index 0ebab719f8..0000000000 --- a/source/pip/qsharp/_device/_atom/_validate.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from pyqir import QirModuleVisitor, is_entry_point, Opcode - - -class ValidateAllowedIntrinsics(QirModuleVisitor): - """ - Ensure that the module only contains allowed intrinsics. - """ - - def _on_function(self, function): - name = function.name - if ( - not is_entry_point(function) - and not name.endswith("_record_output") - and name - not in [ - "__quantum__rt__begin_parallel", - "__quantum__rt__end_parallel", - "__quantum__qis__read_result__body", - "__quantum__rt__read_result", - "__quantum__qis__move__body", - "__quantum__qis__cz__body", - "__quantum__qis__sx__body", - "__quantum__qis__rz__body", - "__quantum__qis__mresetz__body", - ] - ): - raise ValueError(f"{name} is not a supported intrinsic") - - -class ValidateNoConditionalBranches(QirModuleVisitor): - """ - Ensure that the function(s) only use unconditional branches. - """ - - def _on_block(self, block): - if ( - block.terminator - and block.terminator.opcode == Opcode.BR - and len(block.terminator.operands) > 1 - ): - raise ValueError("programs with branching control flow are not supported") - super()._on_block(block) diff --git a/source/pip/qsharp/_device/_device.py b/source/pip/qsharp/_device/_device.py deleted file mode 100644 index 991dc46b24..0000000000 --- a/source/pip/qsharp/_device/_device.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from enum import Enum -from .._qsharp import QirInputData - - -class ZoneType(Enum): - """ - Enum representing different types of zones in the device layout. - """ - - REG = "register" - INTER = "interaction" - MEAS = "measurement" - - -class Zone: - """ - Represents a zone in the device layout. - """ - - offset: int = 0 - - def __init__(self, name: str, row_count: int, type: ZoneType): - self.name = name - self.row_count = row_count - self.type = type - - def set_offset(self, offset: int): - self.offset = offset - - -class Device: - """ - Represents a quantum device with specific layout expressed as zones. - """ - - def __init__(self, column_count: int, zones: list[Zone]): - self.column_count = column_count - self.zones = zones - offset = 0 - # Ensure the zones have correct offsets set based on their ordering when passed in. - for zone in self.zones: - zone.set_offset(offset) - offset += zone.row_count * self.column_count - - self.home_locs = [] - self._init_home_locs() - - def _init_home_locs(self): - """ - Initialize the home locations of qubits in the device layout. - """ - raise NotImplementedError("Subclasses must implement _init_home_locs") - - def get_home_loc(self, qubit_id: int) -> tuple[int, int]: - """ - Get the home location (row, column) of the qubit with the given id. - - :param qubit_id: The id of the qubit. - :return: The (row, column) location of the qubit. - :rtype: tuple[int, int] - """ - if qubit_id < 0 or qubit_id >= len(self.home_locs): - raise ValueError(f"Qubit id {qubit_id} is out of range") - return self.home_locs[qubit_id] - - def get_ordering(self, qubit_id: int) -> int: - """ - Get the ordering index of the qubit with the given id. - - :param qubit_id: The id of the qubit. - :return: The ordering index of the qubit. - :rtype: int - """ - if qubit_id < 0 or qubit_id >= len(self.home_locs): - raise ValueError(f"Qubit id {qubit_id} is out of range") - row, col = self.home_locs[qubit_id] - return row * self.column_count + col - - def get_register_zones(self) -> list[Zone]: - """ - Get the register zones in the device. - - :return: The register zones. - :rtype: list[Zone] - """ - return [zone for zone in self.zones if zone.type == ZoneType.REG] - - def get_interaction_zones(self) -> list[Zone]: - """ - Get the interaction zones in the device. - - :return: The interaction zones. - :rtype: list[Zone] - """ - return [zone for zone in self.zones if zone.type == ZoneType.INTER] - - def get_measurement_zones(self) -> list[Zone]: - """ - Get the measurement zones in the device. - - :return: The measurement zones. - :rtype: list[Zone] - """ - return [zone for zone in self.zones if zone.type == ZoneType.MEAS] - - def compile(self, program: str) -> QirInputData: - """ - Compile the given program for the device. - - :param program: The program to compile. - """ - raise NotImplementedError("Subclasses must implement compile") - - def as_dict(self) -> dict: - """ - Get the device layout as a dictionary. - - :return: The device layout as a dictionary. - :rtype: dict - """ - return { - "cols": self.column_count, - "zones": [ - {"title": zone.name, "rows": zone.row_count, "kind": zone.type.value} - for zone in self.zones - ], - } - - def get_layout(self) -> dict: - """ - Get the device layout as a dictionary. - - :return: The device layout as a dictionary. - :rtype: dict - """ - return self.as_dict() diff --git a/source/pip/qsharp/_fs.py b/source/pip/qsharp/_fs.py index c317007fd1..da53438a92 100644 --- a/source/pip/qsharp/_fs.py +++ b/source/pip/qsharp/_fs.py @@ -1,90 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -""" -_fs.py - -This module provides file system utility functions for working with the file -system as Python sees it. These are used as callbacks passed into native code -to allow the native code to interact with the file system in an -environment-specific way. -""" - -import os -from typing import Dict, List, Tuple - - -def read_file(path: str) -> Tuple[str, str]: - """ - Read the contents of a file. - - :param path: The path to the file. - :return: A tuple containing the path and the file contents. - :rtype: Tuple[str, str] - """ - with open(path, mode="r", encoding="utf-8-sig") as f: - return (path, f.read()) - - -def list_directory(dir_path: str) -> List[Dict[str, str]]: - """ - Lists the contents of a directory and returns a list of dictionaries, - where each dictionary represents an entry in the directory. - - :param dir_path: The path of the directory to list. - :return: A list of dictionaries representing the entries in the directory. - Each dictionary contains the following keys: - - ``"path"``: The full path of the entry. - - ``"entry_name"``: The name of the entry. - - ``"type"``: The type of the entry: ``"file"``, ``"folder"``, or ``"unknown"``. - :rtype: List[Dict[str, str]] - """ - - def map_dir(e: str) -> Dict[str, str]: - path = os.path.join(dir_path, e) - return { - "path": path, - "entry_name": e, - "type": ( - "file" - if os.path.isfile(path) - else "folder" if os.path.isdir(path) else "unknown" - ), - } - - return list(map(map_dir, os.listdir(dir_path))) - - -def resolve(base: str, path: str) -> str: - """ - Resolves a relative path with respect to a base path. - - :param base: The base path. - :param path: The relative path. - :return: The resolved path. - :rtype: str - """ - return os.path.normpath(join(base, path)) - - -def exists(path) -> bool: - """ - Check if a file or directory exists at the given path. - - :param path: The path to the file or directory. - :return: ``True`` if the file or directory exists, ``False`` otherwise. - :rtype: bool - """ - return os.path.exists(path) - - -def join(path: str, *paths) -> str: - """ - Joins one or more path components intelligently. - - :param path: The base path. - :param *paths: Additional path components to be joined. - :return: The concatenated path. - :rtype: str - """ - return os.path.join(path, *paths) +# Deprecated: use qdk._fs instead. +from qdk._fs import * # noqa: F401,F403 diff --git a/source/pip/qsharp/_http.py b/source/pip/qsharp/_http.py index 240ddcc67f..ffe23a2d3e 100644 --- a/source/pip/qsharp/_http.py +++ b/source/pip/qsharp/_http.py @@ -1,30 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -""" -_http.py - -This module provides HTTP utility functions for interacting with -GitHub repositories. -""" - - -def fetch_github(owner: str, repo: str, ref: str, path: str) -> str: - """ - Fetches the content of a file from a GitHub repository. - - :param owner: The owner of the GitHub repository. - :param repo: The name of the GitHub repository. - :param ref: The reference (branch, tag, or commit) of the repository. - :param path: The path to the file within the repository. - :return: The content of the file as a string. - :rtype: str - :raises urllib.error.HTTPError: If there is an error fetching the file from GitHub. - :raises urllib.error.URLError: If there is an error with the URL. - """ - - import urllib.request - - path_no_leading_slash = path[1:] if path.startswith("/") else path - url = f"https://raw.githubusercontent.com/{owner}/{repo}/{ref}/{path_no_leading_slash}" - return urllib.request.urlopen(url).read().decode("utf-8-sig") +# Deprecated: use qdk._http instead. +from qdk._http import * # noqa: F401,F403 diff --git a/source/pip/qsharp/_ipython.py b/source/pip/qsharp/_ipython.py index c010befe72..3fe5e165be 100644 --- a/source/pip/qsharp/_ipython.py +++ b/source/pip/qsharp/_ipython.py @@ -1,88 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -""" -_ipython.py - -This module provides IPython magic functions for integrating Q# code -execution within Jupyter notebooks. -""" - -from time import monotonic -from IPython.display import display, Javascript, clear_output -from IPython.core.magic import register_cell_magic -from ._native import QSharpError -from ._qsharp import get_interpreter, qsharp_value_to_python_value -from . import telemetry_events -import pathlib - - -def register_magic(): - @register_cell_magic - def qsharp(line, cell): - """Cell magic to interpret Q# code in Jupyter notebooks.""" - # This effectively pings the kernel to ensure it recognizes the cell is running and helps with - # accureate cell execution timing. - clear_output() - - def callback(output): - display(output) - # This is a workaround to ensure that the output is flushed. This avoids an issue - # where the output is not displayed until the next output is generated or the cell - # is finished executing. - display(display_id=True) - - telemetry_events.on_run_cell() - start_time = monotonic() - - try: - results = qsharp_value_to_python_value( - get_interpreter().interpret(cell, callback) - ) - - durationMs = (monotonic() - start_time) * 1000 - telemetry_events.on_run_cell_end(durationMs) - - return results - except QSharpError as e: - # pylint: disable=raise-missing-from - raise QSharpCellError(str(e)) - - -def enable_classic_notebook_codemirror_mode(): - """ - Registers %%qsharp cells with MIME type text/x-qsharp - and defines a CodeMirror mode to enable syntax highlighting. - This only works in "classic" Jupyter notebooks, not Notebook v7. - """ - js_to_inject = open( - pathlib.Path(__file__) - .parent.resolve() - .joinpath(".data", "qsharp_codemirror.js"), - mode="r", - encoding="utf-8", - ).read() - - # Extend the JavaScript display helper to print nothing when used - # in a non-browser context (i.e. IPython console) - class JavaScriptWithPlainTextFallback(Javascript): - def __repr__(self): - return "" - - # This will run the JavaScript in the context of the frontend. - display(JavaScriptWithPlainTextFallback(js_to_inject)) - - -class QSharpCellError(BaseException): - """ - Error raised when a %%qsharp cell fails. - """ - - def __init__(self, traceback: str): - self.traceback = traceback.splitlines() - - def _render_traceback_(self): - # We want to specifically override the traceback so that - # the Q# error directly from the interpreter is shown - # instead of the Python error. - return self.traceback +# Deprecated: use qdk._ipython instead. +from qdk._ipython import * # noqa: F401,F403 diff --git a/source/pip/qsharp/_native.pyi b/source/pip/qsharp/_native.pyi deleted file mode 100644 index a84d950584..0000000000 --- a/source/pip/qsharp/_native.pyi +++ /dev/null @@ -1,1140 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from enum import Enum -from typing import Any, Callable, Optional, Dict, List, Tuple, TypedDict, overload - -# pylint: disable=unused-argument -# E302 is fighting with the formatter for number of blank lines -# flake8: noqa: E302 - -class OutputSemantics(Enum): - """ - Represents the output semantics for OpenQASM 3 compilation. - Each has implications on the output of the compilation - and the semantic checks that are performed. - """ - - Qiskit: OutputSemantics - """ - The output is in Qiskit format meaning that the output - is all of the classical registers, in reverse order - in which they were added to the circuit with each - bit within each register in reverse order. - """ - - OpenQasm: OutputSemantics - """ - [OpenQASM 3 has two output modes](https://openqasm.com/language/directives.html#input-output) - - If the programmer provides one or more `output` declarations, then - variables described as outputs will be returned as output. - The spec make no mention of endianness or order of the output. - - Otherwise, assume all of the declared variables are returned as output. - """ - - ResourceEstimation: OutputSemantics - """ - No output semantics are applied. The entry point returns `Unit`. - """ - -class ProgramType(Enum): - """ - Represents the type of compilation output to create - """ - - File: ProgramType - """ - Creates an operation in a namespace as if the program is a standalone - file. Inputs are lifted to the operation params. Output are lifted to - the operation return type. The operation is marked as `@EntryPoint` - as long as there are no input parameters. - """ - - Operation: ProgramType - """ - Programs are compiled to a standalone function. Inputs are lifted to - the operation params. Output are lifted to the operation return type. - """ - - Fragments: ProgramType - """ - Creates a list of statements from the program. This is useful for - interactive environments where the program is a list of statements - imported into the current scope. - This is also useful for testing individual statements compilation. - """ - -class TargetProfile(Enum): - """ - A Q# target profile. - - A target profile describes the capabilities of the hardware or simulator - which will be used to run the Q# program. - """ - - @classmethod - def from_str(cls, value: str) -> TargetProfile: ... - """ - Creates a target profile from a string. - :param value: The string to parse. - :raises ValueError: If the string does not match any target profile. - """ - - Base: TargetProfile - """ - Target supports the minimal set of capabilities required to run a quantum - program. - - This option maps to the Base Profile as defined by the QIR specification. - """ - - Adaptive_RI: TargetProfile - """ - Target supports the Adaptive profile with the integer computation extension. - - This profile includes all of the required Adaptive Profile - capabilities, as well as the optional integer computation - extension defined by the QIR specification. - """ - - Adaptive_RIF: TargetProfile - """ - Target supports the Adaptive profile with integer & floating-point - computation extensions. - - This profile includes all required Adaptive Profile and `Adaptive_RI` - capabilities, as well as the optional floating-point computation - extension defined by the QIR specification. - """ - - Adaptive_RIFLA: TargetProfile - """ - Target supports the Adaptive profile with integer & floating-point - computation extensions as well as loop extension and statically-sized - arrays extension. - """ - - Unrestricted: TargetProfile - """ - Describes the unrestricted set of capabilities required to run any Q# program. - """ - -class GlobalCallable: - """ - A callable reference that can be invoked with arguments. - """ - - ... - -class Closure: - """ - A closure reference that can be passed back into Q#. - """ - - ... - -class Interpreter: - """A Q# interpreter.""" - - def __init__( - self, - target_profile: TargetProfile, - language_features: Optional[List[str]], - project_root: Optional[str], - read_file: Callable[[str], Tuple[str, str]], - list_directory: Callable[[str], List[Dict[str, str]]], - resolve_path: Callable[[str, str], str], - fetch_github: Callable[[str, str, str, str], str], - make_callable: Optional[Callable[[GlobalCallable, List[str], str], None]], - make_class: Optional[Callable[[TypeIR, List[str], str], None]], - trace_circuit: Optional[bool], - ) -> None: - """ - Initializes the Q# interpreter. - - :param target_profile: The target profile to use for the interpreter. - :param project_root: A directory that contains a `qsharp.json` manifest. - :param read_file: A function that reads a file from the file system. - :param list_directory: A function that lists the contents of a directory. - :param resolve_path: A function that joins path segments and normalizes the resulting path. - :param make_callable: A function that registers a Q# callable in the in the environment module. - :param trace_circuit: Enables tracing of circuit during execution. - Passing `True` is required for the `dump_circuit` function to return a circuit. - The `circuit` function is *NOT* affected by this parameter will always generate a circuit. - """ - ... - - def interpret(self, input: str, output_fn: Callable[[Output], None]) -> Any: - """ - Interprets Q# source code. - - :param input: The Q# source code to interpret. - :param output_fn: A callback function that will be called with each output. - - :returns value: The value returned by the last statement in the input. - - :raises QSharpError: If there is an error interpreting the input. - """ - ... - - def run( - self, - entry_expr: Optional[str], - output_fn: Optional[Callable[[Output], None]], - noise_config: Optional[NoiseConfig], - noise: Optional[Tuple[float, float, float]], - qubit_loss: Optional[float], - callable: Optional[GlobalCallable | Closure], - args: Optional[Any], - seed: Optional[int], - ) -> Any: - """ - Runs the given Q# expression with an independent instance of the simulator. - - :param entry_expr: The entry expression. - :param output_fn: A callback function that will be called with each output. - :param noise_config: The noise configuration to use in simulation. - :param noise: A tuple with probabilities of Pauli-X, Pauli-Y, and Pauli-Z errors - to use in simulation as a parametric Pauli noise. - :param qubit_loss: The probability of qubit loss in simulation. - :param callable: The callable to run, if no entry expression is provided. - :param args: The arguments to pass to the callable, if any. - :param seed: The seed to use for the random number generator in simulation, if any. - - :returns values: A result or runtime errors. - - :raises QSharpError: If there is an error interpreting the input. - """ - ... - - def invoke( - self, - callable: GlobalCallable | Closure, - args: Any, - output_fn: Callable[[Output], None], - ) -> Any: - """ - Invokes the callable with the given arguments, converted into the appropriate Q# values. - :param callable: The callable to invoke. - :param args: The arguments to pass to the callable. - :param output_fn: A callback function that will be called with each output. - :returns values: A result or runtime errors. - :raises QSharpError: If there is an error interpreting the input. - """ - ... - - def qir( - self, - entry_expr: Optional[str] = None, - callable: Optional[GlobalCallable | Closure] = None, - args: Optional[Any] = None, - ) -> str: - """ - Generates QIR from Q# source code. Either an entry expression or a callable with arguments must be provided. - - :param entry_expr: The entry expression. - :param callable: The callable to generate QIR for, if no entry expression is provided. - :param args: The arguments to pass to the callable, if any. - - :returns qir: The QIR string. - """ - ... - - def circuit( - self, - config: CircuitConfig, - entry_expr: Optional[str] = None, - *, - operation: Optional[str] = None, - callable: Optional[GlobalCallable | Closure] = None, - args: Optional[Any] = None, - ) -> Circuit: - """ - Synthesizes a circuit for a Q# program. Either an entry - expression or an operation must be provided. - - :param config: Circuit generation options. - - :param entry_expr: An entry expression. - - :keyword operation: The operation to synthesize. This can be a name of - an operation of a lambda expression. The operation must take only - qubits or arrays of qubits as parameters. - - :keyword callable: The callable to synthesize the circuit for, if no entry expression is provided. - - :keyword args: The arguments to pass to the callable, if any. - - :raises QSharpError: If there is an error synthesizing the circuit. - """ - ... - - def estimate( - self, - params: str, - entry_expr: Optional[str] = None, - callable: Optional[GlobalCallable | Closure] = None, - args: Optional[Any] = None, - ) -> str: - """ - Estimates resources for Q# source code. - - :param params: The parameters to configure estimation. - :param entry_expr: The entry expression to estimate. - :param callable: The callable to estimate resources for, if no entry expression is provided. - :param args: The arguments to pass to the callable, if any. - - :returns resources: The estimated resources. - """ - ... - - def logical_counts( - self, - entry_expr: Optional[str] = None, - callable: Optional[GlobalCallable | Closure] = None, - args: Optional[Any] = None, - ) -> Dict[str, int]: - """ - Estimates logical operation counts for Q# source code. - - :param entry_expr: The entry expression to estimate. - :param callable: The callable to estimate resources for, if no entry expression is provided. - :param args: The arguments to pass to the callable, if any. - - :returns resources: The logical resources. - """ - ... - - def set_quantum_seed(self, seed: Optional[int]) -> None: - """ - Sets the seed for the quantum random number generator. - - :param seed: The seed to use for the quantum random number generator. If None, - the seed will be generated from entropy. - """ - ... - - def set_classical_seed(self, seed: Optional[int]) -> None: - """ - Sets the seed for the classical random number generator. - - :param seed: The seed to use for the classical random number generator. If None, - the seed will be generated from entropy. - """ - ... - - def dump_machine(self) -> StateDumpData: - """ - Returns the sparse state vector of the simulator as a StateDump object. - - :return: The state of the simulator. - """ - ... - - def dump_circuit(self) -> Circuit: - """ - Dumps a circuit showing the current state of the simulator. - - This circuit will contain the gates that have been applied - in the simulator up to the current point. - - Requires the interpreter to be initialized with `trace_circuit=True`. - - :raises QSharpError: If the interpreter was not initialized with ``trace_circuit=True``. - """ - ... - - def import_qasm( - self, - source: str, - output_fn: Callable[[Output], None], - read_file: Callable[[str], Tuple[str, str]], - list_directory: Callable[[str], List[Dict[str, str]]], - resolve_path: Callable[[str, str], str], - fetch_github: Callable[[str, str, str, str], str], - **kwargs, - ) -> Any: - """ - Imports OpenQASM source code into the active Q# interpreter. - - :param source: An OpenQASM program or fragment. - :param output_fn: The function to handle the output of the execution. - :param read_file: A callable that reads a file and returns its content and path. - :param list_directory: A callable that lists the contents of a directory. - :param resolve_path: A callable that resolves a file path given a base path and a relative path. - :param fetch_github: A callable that fetches a file from GitHub. - :param **kwargs: Common options: - - - ``name`` (str): The name of the program. - - ``search_path`` (str): The optional search path for resolving file references. - - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. - - ``program_type`` (ProgramType): The type of program compilation to perform. - :return: The value returned by the last statement in the source code. - :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. - :raises QSharpError: If there is an error compiling or evaluating the program. - """ - ... - -class Result(Enum): - """ - A Q# measurement result. - """ - - Zero: int - One: int - Loss: int - -class Pauli(Enum): - """ - A Q# Pauli operator. - """ - - I: int - X: int - Y: int - Z: int - -class Output: - """ - An output returned from the Q# interpreter. - Outputs can be a state dumps or messages. These are normally printed to the console. - """ - - def __repr__(self) -> str: ... - def __str__(self) -> str: ... - def _repr_markdown_(self) -> Optional[str]: ... - def state_dump(self) -> Optional[StateDumpData]: ... - def is_state_dump(self) -> bool: ... - def is_matrix(self) -> bool: ... - def is_message(self) -> bool: ... - -class StateDumpData: - """ - A state dump returned from the Q# interpreter. - """ - - """ - The number of allocated qubits at the time of the dump. - """ - qubit_count: int - - """ - Get the amplitudes of the state vector as a dictionary from state integer to - complex amplitudes. - """ - def get_dict(self) -> dict: ... - def __repr__(self) -> str: ... - def __str__(self) -> str: ... - def _repr_markdown_(self) -> str: ... - def _repr_latex_(self) -> Optional[str]: ... - -class CircuitConfig: - """ - Configuration options for circuit generation. - """ - - def __init__( - self, - *, - max_operations: Optional[int] = None, - generation_method: Optional["CircuitGenerationMethod"] = None, - source_locations: bool = False, - group_by_scope: bool = False, - prune_classical_qubits: bool = False, - ) -> None: ... - - max_operations: Optional[int] - """ - The maximum number of operations to include in the generated circuit. - """ - - generation_method: Optional[CircuitGenerationMethod] - """ - The method to use for circuit generation. - """ - - source_locations: Optional[bool] - """ - Whether to include source locations in the generated circuit. - """ - -class CircuitGenerationMethod(Enum): - """ - The method to use for circuit generation. - """ - - ClassicalEval: CircuitGenerationMethod - """ - Use classical evaluation to generate the circuit. - """ - - Simulate: CircuitGenerationMethod - """ - Use simulation to generate the circuit. - """ - - Static: CircuitGenerationMethod - """ - Compile the program and transform to a circuit using partial evaluation. - Only works for AdaptiveRIF-compliant programs. - Requires a non-Unrestricted target profile (e.g. TargetProfile.Adaptive_RIF). - """ - -class Circuit: - """ - A quantum circuit diagram generated from a Q# or OpenQASM program. - - Returned by :func:`qsharp.circuit` and :func:`qsharp.dump_circuit`. - """ - - def json(self) -> str: ... - def __repr__(self) -> str: ... - def __str__(self) -> str: ... - -class QSharpError(BaseException): - """ - An error returned from the Q# interpreter. - """ - - ... - -class QasmError(BaseException): - """ - An error returned from the OpenQASM parser. - """ - - ... - -def physical_estimates(logical_resources: str, params: str) -> str: - """ - Estimates physical resources from pre-calculated logical resources. - - :param logical_resources: The logical resources to estimate from. - :param params: The parameters to configure physical estimation. - - :return: The estimated resources. - :rtype: str - """ - ... - -def circuit_qasm_program( - source: str, - read_file: Callable[[str], Tuple[str, str]], - list_directory: Callable[[str], List[Dict[str, str]]], - resolve_path: Callable[[str, str], str], - fetch_github: Callable[[str, str, str, str], str], - **kwargs, -) -> Circuit: - """ - Synthesizes a circuit for an OpenQASM program. - - .. note:: - This call while exported is not intended to be used directly by the user. - It is intended to be used by the Python wrapper which will handle the - callbacks and other Python specific details. - - :param source: An OpenQASM program. - :param read_file: A callable that reads a file and returns its content and path. - :param list_directory: A callable that lists the contents of a directory. - :param resolve_path: A callable that resolves a file path given a base path and a relative path. - :param fetch_github: A callable that fetches a file from GitHub. - :param **kwargs: Common options: - - - ``name`` (str): The name of the program. - - ``search_path`` (str): The optional search path for resolving file references. - :return: The synthesized circuit. - :rtype: Circuit - :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. - :raises QSharpError: If there is an error evaluating or synthesizing the circuit. - """ - ... - -def compile_qasm_program_to_qir( - source: str, - read_file: Callable[[str], Tuple[str, str]], - list_directory: Callable[[str], List[Dict[str, str]]], - resolve_path: Callable[[str, str], str], - fetch_github: Callable[[str, str, str, str], str], - **kwargs, -) -> str: - """ - Compiles the OpenQASM source code into a program that can be submitted to a - target as QIR (Quantum Intermediate Representation). - - .. note:: - This call while exported is not intended to be used directly by the user. - It is intended to be used by the Python wrapper which will handle the - callbacks and other Python specific details. - - :param source: The OpenQASM source code to compile to QIR. - :param read_file: A callable that reads a file and returns its content and path. - :param list_directory: A callable that lists the contents of a directory. - :param resolve_path: A callable that resolves a file path given a base path and a relative path. - :param fetch_github: A callable that fetches a file from GitHub. - :param **kwargs: Common options: - - - ``name`` (str): The name of the circuit. - - ``target_profile`` (TargetProfile): The target profile to use for code generation. - - ``search_path`` (str): The optional search path for resolving file references. - - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. - :return: The converted QIR code as a string. - :rtype: str - :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. - :raises QSharpError: If there is an error compiling the program. - """ - ... - -def compile_qasm_to_qsharp( - source: str, - read_file: Callable[[str], Tuple[str, str]], - list_directory: Callable[[str], List[Dict[str, str]]], - resolve_path: Callable[[str, str], str], - fetch_github: Callable[[str, str, str, str], str], - **kwargs, -) -> str: - """ - Converts a OpenQASM program to Q#. - - .. note:: - This call while exported is not intended to be used directly by the user. - It is intended to be used by the Python wrapper which will handle the - callbacks and other Python specific details. - - :param source: The OpenQASM source code to convert. - :param read_file: A callable that reads a file and returns its content and path. - :param list_directory: A callable that lists the contents of a directory. - :param resolve_path: A callable that resolves a file path given a base path and a relative path. - :param fetch_github: A callable that fetches a file from GitHub. - :param **kwargs: Common options: - - - ``name`` (str): The name of the circuit. - - ``search_path`` (str): The optional search path for resolving file references. - :return: The converted Q# code as a string. - :rtype: str - """ - ... - -def resource_estimate_qasm_program( - source: str, - job_params: str, - read_file: Callable[[str], Tuple[str, str]], - list_directory: Callable[[str], List[Dict[str, str]]], - resolve_path: Callable[[str, str], str], - fetch_github: Callable[[str, str, str, str], str], - **kwargs, -) -> str: - """ - Estimates the resource requirements for executing OpenQASM source code. - - .. note:: - This call while exported is not intended to be used directly by the user. - It is intended to be used by the Python wrapper which will handle the - callbacks and other Python specific details. - - :param source: The OpenQASM source code to estimate resource requirements for. - :param job_params: The parameters for the job as a JSON string. - :param read_file: A callable that reads a file and returns its content and path. - :param list_directory: A callable that lists the contents of a directory. - :param resolve_path: A callable that resolves a file path given a base path and a relative path. - :param fetch_github: A callable that fetches a file from GitHub. - :param **kwargs: Common options: - - - ``name`` (str): The name of the circuit. Defaults to ``'program'``. - - ``search_path`` (str): The optional search path for resolving imports. - :return: The estimated resource requirements as a JSON string. - :rtype: str - """ - ... - -def run_qasm_program( - source: str, - output_fn: Callable[[Output], None], - noise_config: Optional[NoiseConfig], - noise: Optional[Tuple[float, float, float]], - qubit_loss: Optional[float], - read_file: Callable[[str], Tuple[str, str]], - list_directory: Callable[[str], List[Dict[str, str]]], - resolve_path: Callable[[str, str], str], - fetch_github: Callable[[str, str, str, str], str], - **kwargs, -) -> Any: - """ - Runs the given OpenQASM program for the given number of shots. - Each shot uses an independent instance of the simulator. - - .. note:: - This call while exported is not intended to be used directly by the user. - It is intended to be used by the Python wrapper which will handle the - callbacks and other Python specific details. - - :param source: The OpenQASM source code to execute. - :param output_fn: The function to handle the output of the execution. - :param noise_config: Optional noise configuration for noisy simulation. - :param noise: Optional Pauli noise as a tuple of ``(x, y, z)`` probabilities. - :param qubit_loss: The probability of qubit loss in simulation. - :param read_file: A callable that reads a file and returns its contents. - :param list_directory: A callable that lists the contents of a directory. - :param resolve_path: A callable that resolves a path given a base path and a relative path. - :param fetch_github: A callable that fetches a file from GitHub. - :param **kwargs: Common options: - - - ``target_profile`` (TargetProfile): The target profile to use for execution. - - ``name`` (str): The name of the circuit. Defaults to ``'program'``. - - ``search_path`` (str): The optional search path for resolving imports. - - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. - - ``shots`` (int): The number of shots to run. Defaults to ``1``. - - ``seed`` (int): The seed to use for the random number generator. - :return: The result of the execution. - :rtype: Any - :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. - :raises QSharpError: If there is an error interpreting the input. - """ - ... - -def estimate_custom( - algorithm, - qubit, - qec, - factories: List = [], - *, - error_budget: float = 0.01, - max_factories: Optional[int] = None, - logical_depth_factor: Optional[float] = None, - max_physical_qubits: Optional[int] = None, - max_duration: Optional[int] = None, - error_budget_pruning: bool = False, -) -> Dict: - """ - Estimates quantum resources for a given algorithm, qubit, and code. - - :param algorithm: Python object representing the algorithm. - :param qubit: The qubit properties as a dictionary. - :param qec: Python object representing the quantum error correction code. - :param factories: List of python objects representing factories. Defaults to ``[]``. - :type factories: List - :keyword error_budget: The total error budget, which is uniformly distributed. Defaults to ``0.01``. - :kwtype error_budget: float - :keyword max_factories: Constrains the number of factories. Defaults to ``None``. - :kwtype max_factories: int - :keyword logical_depth_factor: Extends algorithmic logical depth by a factor >= 1. Defaults to ``None``. - :kwtype logical_depth_factor: float - :keyword max_physical_qubits: Forces estimator to not exceed provided number of physical qubits, may fail. - Defaults to ``None``. - :kwtype max_physical_qubits: int - :keyword max_duration: Allows estimator to run for given runtime in nanoseconds, may fail. - Defaults to ``None``. - :kwtype max_duration: int - :keyword error_budget_pruning: Will try to prune the error budget to increase magic state error budget. - Defaults to ``False``. - :kwtype error_budget_pruning: bool - :return: A dictionary with resource estimation results. - :rtype: Dict - """ - ... - -class UdtValue: - """ - A Q# UDT value. Objects of this class represent UDT values generated - in Q# and sent to Python. It is then converted into a Python object - in the `qsharp_value_to_python_value` function in `_qsharp.py`. - """ - - name: str - fields: List[Tuple[str, Any]] - -class TypeIR: - """ - A Q# type. Objects of this class represent a Q# type. This is used - to send the definitions of the Q# UDTs defined by the user to Python - and creating equivalent Python dataclasses in `qsharp.code.*`. - """ - - def kind(self) -> TypeKind: ... - def unwrap_primitive(self) -> PrimitiveKind: ... - def unwrap_tuple(self) -> List[TypeIR]: ... - def unwrap_array(self) -> List[TypeIR]: ... - def unwrap_udt(self) -> UdtIR: ... - -class TypeKind(Enum): - """ - A Q# type kind. - """ - - Primitive: int - Tuple: int - Array: int - Udt: int - -class PrimitiveKind(Enum): - """ - A Q# primitive. - """ - - Bool: int - Int: int - Double: int - Complex: int - String: int - Pauli: int - Result: int - -class UdtIR: - """ - A Q# Udt. - """ - - name: str - fields: List[Tuple[str, TypeIR]] - -class QirInstructionId(Enum): - I: QirInstructionId - H: QirInstructionId - X: QirInstructionId - Y: QirInstructionId - Z: QirInstructionId - S: QirInstructionId - SAdj: QirInstructionId - SX: QirInstructionId - SXAdj: QirInstructionId - T: QirInstructionId - TAdj: QirInstructionId - CNOT: QirInstructionId - CX: QirInstructionId - CY: QirInstructionId - CZ: QirInstructionId - CCX: QirInstructionId - SWAP: QirInstructionId - RX: QirInstructionId - RY: QirInstructionId - RZ: QirInstructionId - RXX: QirInstructionId - RYY: QirInstructionId - RZZ: QirInstructionId - RESET: QirInstructionId - M: QirInstructionId - MResetZ: QirInstructionId - MZ: QirInstructionId - Move: QirInstructionId - ReadResult: QirInstructionId - ResultRecordOutput: QirInstructionId - BoolRecordOutput: QirInstructionId - IntRecordOutput: QirInstructionId - DoubleRecordOutput: QirInstructionId - TupleRecordOutput: QirInstructionId - ArrayRecordOutput: QirInstructionId - CorrelatedNoise: QirInstructionId - -class QirInstruction: ... - -class IdleNoiseParams: - s_probability: float - -class NoiseTable: - loss: float - - def __init__(self, num_qubits: int): - """ - Initializes a new noise table for an operation that targets `num_qubits` qubits. - """ - - def __getattr__(self, name: str) -> float: - """ - Defining __getattr__ allows getting noise like this - - noise_table.ziz - - for arbitrary pauli fields. - """ - - def __setattr__(self, name: str, value: float): - """ - Defining __setattr__ allows setting noise like this - - noise_table = NoiseTable(3) - noise_table.ziz = 0.005 - - for arbitrary pauli fields. Setting an element that was - previously set overrides that entry with the new value. - """ - - @overload - def set_pauli_noise(self, lst: list[tuple[str, float]]): - """ - The correlated pauli noise to use in simulation. Setting an element - that was previously set overrides that entry with the new value. - - Example:: - - noise_table = NoiseTable(2) - noise_table.set_pauli_noise([("XI", 1e-10), ("XZ", 1e-8)]) - """ - - @overload - def set_pauli_noise(self, pauli_strings: list[str], values: list[float]): - """ - The correlated pauli noise to use in simulation. Setting an element - that was previously set overrides that entry with the new value. - - Example:: - - noise_table = NoiseTable(2) - noise_table.set_pauli_noise(["XI", "XZ"], [1e-10, 3.7e-8]) - """ - - @overload - def set_pauli_noise(self, pauli_string: str, value: float): - """ - The correlated pauli noise to use in simulation. Setting an element - that was previously set overrides that entry with the new value. - - Example:: - - noise_table = NoiseTable(2) - noise_table.set_pauli_noise("XZ", 1e-10) - """ - - def set_depolarizing(self, value: float): - """ - The depolarizing noise to use in simulation. - """ - - def set_bitflip(self, value: float): - """ - The bit flip noise to use in simulation. - """ - - def set_phaseflip(self, value: float): - """ - The phase flip noise to use in simulation. - """ - -class NoiseIntrinsicsTable: - def __contains__(self, name: str) -> bool: - """ - This enables support for `in` membership checks. - """ - - def __getitem__(self, name: str) -> NoiseTable: - """ - Defining __getitem__ allows getting intrinsic noise tables like this: - noise_config = NoiseConfig() - my_intrinsic_noise_table = noise_config.intrinsics["my_intrinsic"] - """ - - def __setitem__(self, name: str, value: float): - """ - Defining __setitem__ allows setting intrinsic noise tables like this: - noise_config = NoiseConfig() - my_intrinsic_noise_table = NoiseTable(3) - my_intrinsic_noise_table.ziz = 0.01 - noise_config.intrinsics["my_intrinsic"] = my_intrinsic_noise_table - """ - - def get_intrinsic_id(self, name: str) -> int: - """ - Each intrinsic inserted in the table is assigned an integer id. - This method returns that id given an intrinsic's name. - """ - -class NoiseConfig: - x: NoiseTable - y: NoiseTable - z: NoiseTable - h: NoiseTable - s: NoiseTable - s_adj: NoiseTable - t: NoiseTable - t_adj: NoiseTable - sx: NoiseTable - sx_adj: NoiseTable - rx: NoiseTable - ry: NoiseTable - rz: NoiseTable - cx: NoiseTable - cy: NoiseTable - cz: NoiseTable - rxx: NoiseTable - ryy: NoiseTable - rzz: NoiseTable - swap: NoiseTable - mov: NoiseTable - mresetz: NoiseTable - # idle: IdleNoiseParams - intrinsics: NoiseIntrinsicsTable - - def intrinsic(self, name: str, num_qubits: int) -> NoiseTable: - """ - The noise table for a custom intrinsic. - """ - - def load_csv_dir(self, dir_path: str): - """ - Loads noise tables from the specified directory path. For each .csv file found in the directory, - the noise table is loaded and associated with a unique identifier. The name of the file (without the .csv extension) - is used as the label for the noise table, which should match the QIR instruction that will apply noise using this table. - - Each line of the table should be of the format: "IXYZ,1.345e-4" where IXYZ is a string of Pauli operators - representing the error on each qubit (Z applying to the first qubit argument, Y to the second, etc.), and the second value - is the corresponding error probability for that specific Pauli string. - - Blank lines, lines starting with #, or lines that start with the string "pauli" (i.e., a column header) are ignored. - """ - ... - -def run_clifford( - input: List[QirInstruction], - num_qubits: int, - num_results: int, - shots: int, - noise: Optional[NoiseConfig], - seed: Optional[int], -) -> List[str]: - """ - Run the given list of QIR instructions in a Clifford simulator, - using the given `NoiseConfig`, if any. - - Returns a list of result strings. Each result string is composed - of '0's, '1's, and 'L's, representing if each measurement result - was a Zero, One, or Loss respectively. - """ - ... - -def run_cpu_full_state( - input: List[QirInstruction], - num_qubits: int, - num_results: int, - shots: int, - noise: Optional[NoiseConfig], - seed: Optional[int], -) -> List[str]: - """ - Run the given list of QIR instructions in a CPU full-state simulator, - using the given `NoiseConfig`, if any. - - Returns a list of result strings. Each result string is composed - of '0's, '1's, and 'L's, representing if each measurement result - was a Zero, One, or Loss respectively. - """ - ... - -def try_create_gpu_adapter() -> str: - """ - Checks if a compatible GPU adapter is available on the system. - - This function attempts to request a GPU adapter to determine if GPU-accelerated - quantum simulation is supported. It's useful for capability detection before - attempting to run GPU-based simulations. - - # Errors - - Raises `OSError` if: - - No compatible GPU is found - - GPU drivers are missing or not functioning properly - """ - pass - -def run_parallel_shots( - input: List[QirInstruction], - shots: int, - qubit_count: int, - result_count: int, - noise: Optional[NoiseConfig], - seed: Optional[int], -) -> List[str]: - """ """ - ... - -def run_adaptive_parallel_shots( - input: dict, - shots: int, - noise: Optional[NoiseConfig], - seed: Optional[int], -) -> List[str]: - """ - Run the given list of QIR instructions in a CPU full-state simulator, - using the given `NoiseConfig`, if any. - - The input is an `AdaptiveProgram` converted to a dict using the - .as_dict() method. - - Returns a list of result strings. Each result string is composed - of '0's, '1's, and 'L's, representing if each measurement result - was a Zero, One, or Loss respectively. - """ - ... - -# This is a little clunky, but until we move to Python 3.11 as a minimum, the NotRequired annotation -# for Dict fields that may be missing is not availalble. See https://peps.python.org/pep-0655/#motivation -class _GpuShotResultsBase(TypedDict): - shot_results: List[str] - """Bit strings for each shot ('0', '1', or 'L' for lost qubits).""" - - shot_result_codes: List[int] - """Result codes for each shot. 0 = Success, else Failure (Specific codes are an internal detail).""" - -class GpuShotResults(_GpuShotResultsBase, total=False): - """ - Results from running shots on the GPU simulator. - """ - - diagnostics: str - """Diagnostic information if available. (Useful primarly for debugging by the development team)""" - -class GpuContext: - def load_noise_tables(self, dir_path: str) -> List[Tuple[int, str, int]]: - """ - Loads noise tables from the specified directory path. For each .csv file found in the directory, - the noise table is loaded and associated with a unique identifier. The name of the file (without the .csv extension) - is used as the label for the noise table, which should match the QIR instruction that will apply noise using this table. - - Each line of the table should be for the format: "IXYZ,1.345e-4" where IXYZ is a string of Pauli operators - representing the error on each qubit (Z applying to the first qubit argument, Y to the second, etc.), and the second value - is the corresponding error probability for that specific Pauli string. - - Blank lines, lines starting with #, or lines that start with the string "pauli" (i.e., a column header) are ignored. - """ - ... - - def get_noise_table_ids(self) -> List[Tuple[int, str, int]]: - """ - Retrieves the currently loaded noise table as a string. - """ - ... - - def set_program( - self, - input: List[QirInstruction], - qubit_count: int, - result_count: int, - ) -> None: - """ - Sets the QIR program to be executed on the GPU. - """ - ... - - def set_adaptive_program(self, program: dict) -> None: - """ - Sets an Adaptive Profile QIR program for GPU execution. - - The program dict contains bytecode instructions, block/function tables, - quantum op pool, and side tables produced by AdaptiveProfilePass. - """ - ... - - def set_noise(self, noise: NoiseConfig) -> None: - """ - Sets the noise configuration for the GPU simulation. - """ - ... - - def run_shots(self, shot_count: int, seed: int) -> GpuShotResults: - """ - Runs the specified number of shots of the loaded program on the GPU. - """ - ... - - def run_adaptive_shots(self, shot_count: int, seed: int) -> GpuShotResults: - """ - Runs the specified number of shots of the loaded adaptive program on the GPU. - """ - ... diff --git a/source/pip/qsharp/_qsharp.py b/source/pip/qsharp/_qsharp.py index a1a7d1ae37..2210eb7486 100644 --- a/source/pip/qsharp/_qsharp.py +++ b/source/pip/qsharp/_qsharp.py @@ -1,1181 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from . import telemetry_events, code -from ._native import ( # type: ignore - Interpreter, - TargetProfile, - StateDumpData, - QSharpError, - Output, - Circuit, - GlobalCallable, - Closure, - Pauli, - Result, - UdtValue, - TypeIR, - TypeKind, - PrimitiveKind, - CircuitConfig, - CircuitGenerationMethod, - NoiseConfig, -) -from typing import ( - Any, - Callable, - Dict, - Optional, - Tuple, - TypedDict, - Union, - List, - Set, - Iterable, - cast, -) -from .estimator._estimator import ( - EstimatorResult, - EstimatorParams, - LogicalCounts, -) -import json -import os -import sys -import types -from pathlib import Path -from time import monotonic -from dataclasses import make_dataclass - - -def lower_python_obj(obj: object, visited: Optional[Set[object]] = None) -> Any: - if visited is None: - visited = set() - - if id(obj) in visited: - raise QSharpError("Cannot send circular objects from Python to Q#.") - - visited = visited.copy().add(id(obj)) - - # Base case: Primitive types - if isinstance(obj, (bool, int, float, complex, str, Pauli, Result)): - return obj - - # Recursive case: Tuple - if isinstance(obj, tuple): - return tuple(lower_python_obj(elt, visited) for elt in obj) - - # Recursive case: Dict - if isinstance(obj, dict): - return {name: lower_python_obj(val, visited) for name, val in obj.items()} - - # Base case: Callable or Closure - if hasattr(obj, "__global_callable"): - return obj.__getattribute__("__global_callable") - if isinstance(obj, (GlobalCallable, Closure)): - return obj - - # Recursive case: Class with slots - if hasattr(obj, "__slots__"): - fields = {} - for name in getattr(obj, "__slots__"): - if name == "__dict__": - for name, val in obj.__dict__.items(): - fields[name] = lower_python_obj(val, visited) - else: - val = getattr(obj, name) - fields[name] = lower_python_obj(val, visited) - return fields - - # Recursive case: Class - if hasattr(obj, "__dict__"): - fields = { - name: lower_python_obj(val, visited) for name, val in obj.__dict__.items() - } - return fields - - # Recursive case: Array - # By using `Iterable` instead of `list`, we can handle other kind of iterables - # like numpy arrays and generators. - if isinstance(obj, Iterable): - return [lower_python_obj(elt, visited) for elt in obj] - - raise TypeError(f"unsupported type: {type(obj)}") - - -def python_args_to_interpreter_args(args): - """ - Helper function to turn the `*args` argument of this module - to the format expected by the Q# interpreter. - """ - if len(args) == 0: - return None - elif len(args) == 1: - return lower_python_obj(args[0]) - else: - return lower_python_obj(args) - - -_interpreter: Union["Interpreter", None] = None -_config: Union["Config", None] = None - -# Check if we are running in a Jupyter notebook to use the IPython display function -_in_jupyter = False -try: - from IPython.display import display - - if get_ipython().__class__.__name__ == "ZMQInteractiveShell": # type: ignore - _in_jupyter = True # Jupyter notebook or qtconsole -except: - pass - - -# Reporting execution time during IPython cells requires that IPython -# gets pinged to ensure it understands the cell is active. This is done by -# simply importing the display function, which it turns out is enough to begin timing -# while avoiding any UI changes that would be visible to the user. -def ipython_helper(): - try: - if __IPYTHON__: # type: ignore - from IPython.display import display - except NameError: - pass - - -class Config: - """ - Configuration hints for the language service. - """ - - _config: Dict[str, Any] - - def __init__( - self, - target_profile: TargetProfile, - language_features: Optional[List[str]], - manifest: Optional[str], - project_root: Optional[str], - ): - if target_profile == TargetProfile.Adaptive_RI: - self._config = {"targetProfile": "adaptive_ri"} - elif target_profile == TargetProfile.Adaptive_RIF: - self._config = {"targetProfile": "adaptive_rif"} - elif target_profile == TargetProfile.Adaptive_RIFLA: - self._config = {"targetProfile": "adaptive_rifla"} - elif target_profile == TargetProfile.Base: - self._config = {"targetProfile": "base"} - elif target_profile == TargetProfile.Unrestricted: - self._config = {"targetProfile": "unrestricted"} - - if language_features is not None: - self._config["languageFeatures"] = language_features - if manifest is not None: - self._config["manifest"] = manifest - if project_root: - # For now, we only support local project roots, so use a file schema in the URI. - # In the future, we may support other schemes, such as github, if/when - # we have VS Code Web + Jupyter support. - self._config["projectRoot"] = Path(os.getcwd(), project_root).as_uri() - - def __repr__(self) -> str: - return "Q# initialized with configuration: " + str(self._config) - - # See https://ipython.readthedocs.io/en/stable/config/integrating.html#rich-display - # See https://ipython.org/ipython-doc/3/notebook/nbformat.html#display-data - # This returns a custom MIME-type representation of the Q# configuration. - # This data will be available in the cell output, but will not be displayed - # to the user, as frontends would not know how to render the custom MIME type. - # Editor services that interact with the notebook frontend - # (i.e. the language service) can read and interpret the data. - def _repr_mimebundle_( - self, include: Union[Any, None] = None, exclude: Union[Any, None] = None - ) -> Dict[str, Dict[str, Any]]: - return {"application/x.qsharp-config": self._config} - - def get_target_profile(self) -> str: - """ - Returns the target profile as a string, or "unspecified" if not set. - """ - return self._config.get("targetProfile", "unspecified") - - -class PauliNoise(Tuple[float, float, float]): - """ - The Pauli noise to use in simulation represented - as probabilities of Pauli-X, Pauli-Y, and Pauli-Z errors - """ - - def __new__(cls, x: float, y: float, z: float): - """ - Creates a new :class:`PauliNoise` instance with the given error probabilities. - - :param x: Probability of a Pauli-X (bit flip) error. Must be non-negative. - :type x: float - :param y: Probability of a Pauli-Y error. Must be non-negative. - :type y: float - :param z: Probability of a Pauli-Z (phase flip) error. Must be non-negative. - :type z: float - :return: A new :class:`PauliNoise` tuple ``(x, y, z)``. - :rtype: PauliNoise - :raises ValueError: If any probability is negative or if ``x + y + z > 1``. - """ - if x < 0 or y < 0 or z < 0: - raise ValueError("Pauli noise probabilities must be non-negative.") - if x + y + z > 1: - raise ValueError("The sum of Pauli noise probabilities must be at most 1.") - return super().__new__(cls, (x, y, z)) - - -class DepolarizingNoise(PauliNoise): - """ - The depolarizing noise to use in simulation. - """ - - def __new__(cls, p: float): - """ - Creates a new :class:`DepolarizingNoise` instance. - - The depolarizing channel applies Pauli-X, Pauli-Y, or Pauli-Z errors each with - probability ``p / 3``. - - :param p: Total depolarizing error probability. Must satisfy ``0 ≤ p ≤ 1``. - :type p: float - :return: A new :class:`DepolarizingNoise` with equal X, Y, and Z error probabilities. - :rtype: DepolarizingNoise - :raises ValueError: If ``p`` is negative or ``p > 1``. - """ - return super().__new__(cls, p / 3, p / 3, p / 3) - - -class BitFlipNoise(PauliNoise): - """ - The bit flip noise to use in simulation. - """ - - def __new__(cls, p: float): - """ - Creates a new :class:`BitFlipNoise` instance. - - The bit flip channel applies a Pauli-X error with probability ``p``. - - :param p: Probability of a bit flip (Pauli-X) error. Must satisfy ``0 ≤ p ≤ 1``. - :type p: float - :return: A new :class:`BitFlipNoise` with X error probability ``p``. - :rtype: BitFlipNoise - :raises ValueError: If ``p`` is negative or ``p > 1``. - """ - return super().__new__(cls, p, 0, 0) - - -class PhaseFlipNoise(PauliNoise): - """ - The phase flip noise to use in simulation. - """ - - def __new__(cls, p: float): - """ - Creates a new :class:`PhaseFlipNoise` instance. - - The phase flip channel applies a Pauli-Z error with probability ``p``. - - :param p: Probability of a phase flip (Pauli-Z) error. Must satisfy ``0 ≤ p ≤ 1``. - :type p: float - :return: A new :class:`PhaseFlipNoise` with Z error probability ``p``. - :rtype: PhaseFlipNoise - :raises ValueError: If ``p`` is negative or ``p > 1``. - """ - return super().__new__(cls, 0, 0, p) - - -def init( - *, - target_profile: TargetProfile = TargetProfile.Unrestricted, - target_name: Optional[str] = None, - project_root: Optional[str] = None, - language_features: Optional[List[str]] = None, - trace_circuit: Optional[bool] = None, -) -> Config: - """ - Initializes the Q# interpreter. - - :keyword target_profile: Setting the target profile allows the Q# - interpreter to generate programs that are compatible - with a specific target. See :class:`TargetProfile`. - - :keyword target_name: An optional name of the target machine to use for inferring the compatible - target_profile setting. - - :keyword project_root: An optional path to a root directory with a Q# project to include. - It must contain a qsharp.json project manifest. - - :keyword language_features: An optional list of language feature flags to enable. - These correspond to experimental or preview Q# language features. - Valid values are: - - - ``"v2-preview-syntax"``: Enables Q# v2 preview syntax. This removes support for - the scoped qubit allocation block form (``use q = Qubit() { ... }``), requiring - the statement form instead (``use q = Qubit();``). It also removes the requirement - to use the ``set`` keyword for mutable variable assignments. - - :keyword trace_circuit: Enables tracing of circuit during execution. - Passing `True` is required for the `dump_circuit` function to return a circuit. - The `circuit` function is *NOT* affected by this parameter will always generate a circuit. - :return: The Q# interpreter configuration. - :rtype: Config - """ - from ._fs import read_file, list_directory, exists, join, resolve - from ._http import fetch_github - - global _interpreter - global _config - - if isinstance(target_name, str): - target = target_name.split(".")[0].lower() - if target == "ionq" or target == "rigetti": - target_profile = TargetProfile.Base - elif target == "quantinuum": - target_profile = TargetProfile.Adaptive_RI - else: - raise QSharpError( - f'target_name "{target_name}" not recognized. Please set target_profile directly.' - ) - - manifest_contents = None - if project_root is not None: - # Normalize the project path (i.e. fix file separators and remove unnecessary '.' and '..') - project_root = resolve(".", project_root) - qsharp_json = join(project_root, "qsharp.json") - if not exists(qsharp_json): - raise QSharpError( - f"{qsharp_json} not found. qsharp.json should exist at the project root and be a valid JSON file." - ) - - try: - (_, manifest_contents) = read_file(qsharp_json) - except Exception as e: - raise QSharpError( - f"Error reading {qsharp_json}. qsharp.json should exist at the project root and be a valid JSON file." - ) from e - - # Loop through the environment module and remove any dynamically added attributes that represent - # Q# callables or structs. This is necessary to avoid conflicts with the new interpreter instance. - keys_to_remove = [] - for key, val in code.__dict__.items(): - if ( - hasattr(val, "__global_callable") - or hasattr(val, "__qsharp_class") - or isinstance(val, types.ModuleType) - ): - keys_to_remove.append(key) - for key in keys_to_remove: - code.__delattr__(key) - - # Also remove any namespace modules dynamically added to the system. - keys_to_remove = [] - for key in sys.modules: - if key.startswith("qsharp.code."): - keys_to_remove.append(key) - for key in keys_to_remove: - sys.modules.__delitem__(key) - - _interpreter = Interpreter( - target_profile, - language_features, - project_root, - read_file, - list_directory, - resolve, - fetch_github, - _make_callable, - _make_class, - trace_circuit, - ) - - _config = Config(target_profile, language_features, manifest_contents, project_root) - # Return the configuration information to provide a hint to the - # language service through the cell output. - return _config - - -def get_interpreter() -> Interpreter: - """ - Returns the Q# interpreter. - - :return: The Q# interpreter. - :rtype: Interpreter - """ - global _interpreter - if _interpreter is None: - init() - assert _interpreter is not None, "Failed to initialize the Q# interpreter." - return _interpreter - - -def get_config() -> Config: - """ - Returns the Q# interpreter configuration. - - :return: The Q# interpreter configuration. - :rtype: Config - """ - global _config - if _config is None: - init() - assert _config is not None, "Failed to initialize the Q# interpreter." - return _config - - -class StateDump: - """ - A state dump returned from the Q# interpreter. - """ - - """ - The number of allocated qubits at the time of the dump. - """ - qubit_count: int - - __inner: dict - __data: StateDumpData - - def __init__(self, data: StateDumpData): - self.__data = data - self.__inner = data.get_dict() - self.qubit_count = data.qubit_count - - def __getitem__(self, index: int) -> complex: - return self.__inner.__getitem__(index) - - def __iter__(self): - return self.__inner.__iter__() - - def __len__(self) -> int: - return len(self.__inner) - - def __repr__(self) -> str: - return self.__data.__repr__() - - def __str__(self) -> str: - return self.__data.__str__() - - def _repr_markdown_(self) -> str: - return self.__data._repr_markdown_() - - def check_eq( - self, state: Union[Dict[int, complex], List[complex]], tolerance: float = 1e-10 - ) -> bool: - """ - Checks if the state dump is equal to the given state. This is not mathematical equality, - as the check ignores global phase. - - :param state: The state to check against, provided either as a dictionary of state indices to complex amplitudes, - or as a list of real amplitudes. - :param tolerance: The tolerance for the check. Defaults to 1e-10. - :return: ``True`` if the state dump is equal to the given state within the given tolerance, ignoring global phase. - :rtype: bool - """ - phase = None - # Convert a dense list of real amplitudes to a dictionary of state indices to complex amplitudes - if isinstance(state, list): - state = {i: val for i, val in enumerate(state)} - # Filter out zero states from the state dump and the given state based on tolerance - state = {k: v for k, v in state.items() if abs(v) > tolerance} - inner_state = {k: v for k, v in self.__inner.items() if abs(v) > tolerance} - if len(state) != len(inner_state): - return False - for key in state: - if key not in inner_state: - return False - if phase is None: - # Calculate the phase based on the first state pair encountered. - # Every pair of states after this must have the same phase for the states to be equivalent. - phase = inner_state[key] / state[key] - elif abs(phase - inner_state[key] / state[key]) > tolerance: - # This pair of states does not have the same phase, - # within tolerance, so the equivalence check fails. - return False - return True - - def as_dense_state(self) -> List[complex]: - """ - Returns the state dump as a dense list of complex amplitudes. This will include zero amplitudes. - - :return: A dense list of complex amplitudes, one per computational basis state. - :rtype: List[complex] - """ - return [self.__inner.get(i, complex(0)) for i in range(2**self.qubit_count)] - - -class ShotResult(TypedDict): - """ - A single result of a shot. - """ - - events: List[Output | StateDump | str] - result: Any - messages: List[str] - matrices: List[Output] - dumps: List[StateDump] - - -def eval( - source: str, - *, - save_events: bool = False, -) -> Any: - """ - Evaluates Q# source code. - - Output is printed to console. - - :param source: The Q# source code to evaluate. - :keyword save_events: If true, all output will be saved and returned. If false, they will be printed. - :return: The value returned by the last statement in the source code, or the saved output if ``save_events`` is true. - :rtype: Any - :raises QSharpError: If there is an error evaluating the source code. - """ - ipython_helper() - - results: ShotResult = { - "events": [], - "result": None, - "messages": [], - "matrices": [], - "dumps": [], - } - - def on_save_events(output: Output) -> None: - # Append the output to the last shot's output list - if output.is_matrix(): - results["events"].append(output) - results["matrices"].append(output) - elif output.is_state_dump(): - dump_data = cast(StateDumpData, output.state_dump()) - state_dump = StateDump(dump_data) - results["events"].append(state_dump) - results["dumps"].append(state_dump) - elif output.is_message(): - stringified = str(output) - results["events"].append(stringified) - results["messages"].append(stringified) - - def callback(output: Output) -> None: - if _in_jupyter: - try: - display(output) - return - except: - # If IPython is not available, fall back to printing the output - pass - print(output, flush=True) - - telemetry_events.on_eval() - start_time = monotonic() - - output = get_interpreter().interpret( - source, on_save_events if save_events else callback - ) - results["result"] = qsharp_value_to_python_value(output) - - durationMs = (monotonic() - start_time) * 1000 - telemetry_events.on_eval_end(durationMs) - - if save_events: - return results - else: - return results["result"] - - -# Helper function that knows how to create a function that invokes a callable. This will be -# used by the underlying native code to create functions for callables on the fly that know -# how to get the currently initialized global interpreter instance. -def _make_callable(callable: GlobalCallable, namespace: List[str], callable_name: str): - module = code - # Create a name that will be used to collect the hierarchy of namespace identifiers if they exist and use that - # to register created modules with the system. - accumulated_namespace = "qsharp.code" - accumulated_namespace += "." - for name in namespace: - accumulated_namespace += name - # Use the existing entry, which should already be a module. - if hasattr(module, name): - module = module.__getattribute__(name) - if sys.modules.get(accumulated_namespace) is None: - # This is an existing entry that is not yet registered in sys.modules, so add it. - # This can happen if a callable with the same name as this namespace is already - # defined. - sys.modules[accumulated_namespace] = module - else: - # This namespace entry doesn't exist as a module yet, so create it, add it to the environment, and - # add it to sys.modules so it supports import properly. - new_module = types.ModuleType(accumulated_namespace) - module.__setattr__(name, new_module) - sys.modules[accumulated_namespace] = new_module - module = new_module - accumulated_namespace += "." - - def _callable(*args): - ipython_helper() - - def callback(output: Output) -> None: - if _in_jupyter: - try: - display(output) - return - except: - # If IPython is not available, fall back to printing the output - pass - print(output, flush=True) - - args = python_args_to_interpreter_args(args) - - output = get_interpreter().invoke(callable, args, callback) - return qsharp_value_to_python_value(output) - - # Each callable is annotated so that we know it is auto-generated and can be removed on a re-init of the interpreter. - _callable.__global_callable = callable - - # Add the callable to the module. - if module.__dict__.get(callable_name) is None: - module.__setattr__(callable_name, _callable) - else: - # Preserve any existing attributes on the attribute with the matching name, - # since this could be a collision with an existing namespace/module. - for key, val in module.__dict__.get(callable_name).__dict__.items(): - if key != "__global_callable": - _callable.__dict__[key] = val - module.__setattr__(callable_name, _callable) - - -def qsharp_value_to_python_value(obj): - # Base case: Primitive types - if isinstance(obj, (bool, int, float, complex, str, Pauli, Result)): - return obj - - # Recursive case: Tuple - if isinstance(obj, tuple): - # Special case Value::UNIT maps to None. - if not obj: - return None - return tuple(qsharp_value_to_python_value(elt) for elt in obj) - - # Recursive case: Array - if isinstance(obj, list): - return [qsharp_value_to_python_value(elt) for elt in obj] - - # Recursive case: Callable or Closure - if isinstance(obj, (GlobalCallable, Closure)): - return obj - - # Recursive case: Udt - if isinstance(obj, UdtValue): - class_name = obj.name - fields = [] - args = [] - for name, value_ir in obj.fields: - val = qsharp_value_to_python_value(value_ir) - ty = type(val) - args.append(val) - fields.append((name, ty)) - return make_dataclass(class_name, fields)(*args) - - -def make_class_rec(qsharp_type: TypeIR) -> type: - class_name = qsharp_type.unwrap_udt().name - fields = {} - for field in qsharp_type.unwrap_udt().fields: - ty = None - kind = field[1].kind() - - if kind == TypeKind.Primitive: - prim_kind = field[1].unwrap_primitive() - if prim_kind == PrimitiveKind.Bool: - ty = bool - elif prim_kind == PrimitiveKind.Int: - ty = int - elif prim_kind == PrimitiveKind.Double: - ty = float - elif prim_kind == PrimitiveKind.Complex: - ty = complex - elif prim_kind == PrimitiveKind.String: - ty = str - elif prim_kind == PrimitiveKind.Pauli: - ty = Pauli - elif prim_kind == PrimitiveKind.Result: - ty = Result - else: - raise QSharpError(f"unknown primitive {prim_kind}") - elif kind == TypeKind.Tuple: - # Special case Value::UNIT maps to None. - if not field[1].unwrap_tuple(): - ty = type(None) - else: - ty = tuple - elif kind == TypeKind.Array: - ty = list - elif kind == TypeKind.Udt: - ty = make_class_rec(field[1]) - else: - raise QSharpError(f"unknown type {kind}") - fields[field[0]] = ty - - return make_dataclass( - class_name, - fields, - ) - - -def _make_class(qsharp_type: TypeIR, namespace: List[str], class_name: str): - """ - Helper function to create a python class given a description of it. This will be - used by the underlying native code to create classes on the fly corresponding to - the currently initialized interpreter instance. - """ - - module = code - # Create a name that will be used to collect the hierarchy of namespace identifiers if they exist and use that - # to register created modules with the system. - accumulated_namespace = "qsharp.code" - accumulated_namespace += "." - for name in namespace: - accumulated_namespace += name - # Use the existing entry, which should already be a module. - if hasattr(module, name): - module = module.__getattribute__(name) - else: - # This namespace entry doesn't exist as a module yet, so create it, add it to the environment, and - # add it to sys.modules so it supports import properly. - new_module = types.ModuleType(accumulated_namespace) - module.__setattr__(name, new_module) - sys.modules[accumulated_namespace] = new_module - module = new_module - accumulated_namespace += "." - - QSharpClass = make_class_rec(qsharp_type) - - # Each class is annotated so that we know it is auto-generated and can be removed on a re-init of the interpreter. - QSharpClass.__qsharp_class = True - - # Add the class to the module. - module.__setattr__(class_name, QSharpClass) - - -def run( - entry_expr: Union[str, Callable, GlobalCallable, Closure], - shots: int, - *args, - on_result: Optional[Callable[[ShotResult], None]] = None, - save_events: bool = False, - noise: Optional[ - Union[ - Tuple[float, float, float], - PauliNoise, - BitFlipNoise, - PhaseFlipNoise, - DepolarizingNoise, - NoiseConfig, - ] - ] = None, - qubit_loss: Optional[float] = None, - seed: Optional[int] = None, -) -> List[Any]: - """ - Runs the given Q# expression for the given number of shots. - Each shot uses an independent instance of the simulator. - - :param entry_expr: The entry expression. Alternatively, a callable can be provided, - which must be a Q# callable. - :param shots: The number of shots to run. - :param *args: The arguments to pass to the callable, if one is provided. - :param on_result: A callback function that will be called with each result. - :param save_events: If true, the output of each shot will be saved. If false, they will be printed. - :param noise: The noise to use in simulation. - :param qubit_loss: The probability of qubit loss in simulation. - :param seed: The seed to use for the random number generator in simulation, if any. - - :return: A list of results or runtime errors. If ``save_events`` is true, a list of ``ShotResult`` is returned. - :rtype: List[Any] - :raises QSharpError: If there is an error interpreting the input. - :raises ValueError: If the number of shots is less than 1. - """ - ipython_helper() - - if shots < 1: - raise ValueError("The number of shots must be greater than 0.") - - telemetry_events.on_run( - shots, - noise=(noise is not None and noise != (0.0, 0.0, 0.0)), - qubit_loss=(qubit_loss is not None and qubit_loss > 0.0), - ) - start_time = monotonic() - - results: List[ShotResult] = [] - - def print_output(output: Output) -> None: - if _in_jupyter: - try: - display(output) - return - except: - # If IPython is not available, fall back to printing the output - pass - print(output, flush=True) - - def on_save_events(output: Output) -> None: - # Append the output to the last shot's output list - results[-1]["events"].append(output) - if output.is_matrix(): - results[-1]["matrices"].append(output) - elif output.is_state_dump(): - dump_data = cast(StateDumpData, output.state_dump()) - results[-1]["dumps"].append(StateDump(dump_data)) - elif output.is_message(): - results[-1]["messages"].append(str(output)) - - callable = None - run_entry_expr = None - if isinstance(entry_expr, Callable) and hasattr(entry_expr, "__global_callable"): - args = python_args_to_interpreter_args(args) - callable = entry_expr.__global_callable - elif isinstance(entry_expr, (GlobalCallable, Closure)): - args = python_args_to_interpreter_args(args) - callable = entry_expr - else: - assert isinstance(entry_expr, str) - run_entry_expr = entry_expr - - noise_config = None - if isinstance(noise, NoiseConfig): - noise_config = noise - noise = None - - shot_seed = seed - for shot in range(shots): - # We also don't want every shot to return the same results, so we update the seed for - # the next shot with the shot number. This keeps the behavior deterministic if a seed - # was provided. - if seed is not None: - shot_seed = shot + seed - - results.append( - {"result": None, "events": [], "messages": [], "matrices": [], "dumps": []} - ) - run_results = get_interpreter().run( - run_entry_expr, - on_save_events if save_events else print_output, - noise_config, - noise, - qubit_loss, - callable, - args, - shot_seed, - ) - run_results = qsharp_value_to_python_value(run_results) - results[-1]["result"] = run_results - if on_result: - on_result(results[-1]) - # For every shot after the first, treat the entry expression as None to trigger - # a rerun of the last executed expression without paying the cost for any additional - # compilation. - run_entry_expr = None - - durationMs = (monotonic() - start_time) * 1000 - telemetry_events.on_run_end(durationMs, shots) - - if save_events: - return results - else: - return [shot["result"] for shot in results] - - -# Class that wraps generated QIR, which can be used by -# azure-quantum as input data. -# -# This class must implement the QirRepresentable protocol -# that is defined by the azure-quantum package. -# See: https://github.com/microsoft/qdk-python/blob/fcd63c04aa871e49206703bbaa792329ffed13c4/azure-quantum/azure/quantum/target/target.py#L21 -class QirInputData: - # The name of this variable is defined - # by the protocol and must remain unchanged. - _name: str - - def __init__(self, name: str, ll_str: str): - self._name = name - self._ll_str = ll_str - - # The name of this method is defined - # by the protocol and must remain unchanged. - def _repr_qir_(self, **kwargs) -> bytes: - return self._ll_str.encode("utf-8") - - def __str__(self) -> str: - return self._ll_str - - -def compile( - entry_expr: Union[str, Callable, GlobalCallable, Closure], *args -) -> QirInputData: - """ - Compiles the Q# source code into a program that can be submitted to a target. - Either an entry expression or a callable with arguments must be provided. - - :param entry_expr: The Q# expression that will be used as the entrypoint - for the program. Alternatively, a callable can be provided, which must - be a Q# callable. - :param *args: The arguments to pass to the callable, if one is provided. - - :return: The compiled program. Use ``str()`` to get the QIR string. - :rtype: QirInputData - - Example: - - .. code-block:: python - program = qsharp.compile("...") - with open('myfile.ll', 'w') as file: - file.write(str(program)) - """ - ipython_helper() - start = monotonic() - interpreter = get_interpreter() - target_profile = get_config().get_target_profile() - telemetry_events.on_compile(target_profile) - if isinstance(entry_expr, Callable) and hasattr(entry_expr, "__global_callable"): - args = python_args_to_interpreter_args(args) - ll_str = interpreter.qir(callable=entry_expr.__global_callable, args=args) - elif isinstance(entry_expr, (GlobalCallable, Closure)): - args = python_args_to_interpreter_args(args) - ll_str = interpreter.qir(callable=entry_expr, args=args) - else: - assert isinstance(entry_expr, str) - ll_str = interpreter.qir(entry_expr=entry_expr) - res = QirInputData("main", ll_str) - durationMs = (monotonic() - start) * 1000 - telemetry_events.on_compile_end(durationMs, target_profile) - return res - - -def circuit( - entry_expr: Optional[Union[str, Callable, GlobalCallable, Closure]] = None, - *args, - operation: Optional[str] = None, - generation_method: Optional[CircuitGenerationMethod] = None, - max_operations: Optional[int] = None, - source_locations: bool = False, - group_by_scope: bool = True, - prune_classical_qubits: bool = False, -) -> Circuit: - """ - Synthesizes a circuit for a Q# program. Either an entry - expression or an operation must be provided. - - :param entry_expr: An entry expression. Alternatively, a callable can be provided, - which must be a Q# callable. - :type entry_expr: str or Callable - - :param *args: The arguments to pass to the callable, if one is provided. - - :keyword operation: The operation to synthesize. This can be a name of - an operation or a lambda expression. The operation must take only - qubits or arrays of qubits as parameters. - :kwtype operation: str - - :keyword generation_method: The method to use for circuit generation. - :attr:`~qsharp.CircuitGenerationMethod.ClassicalEval` evaluates classical - control flow at circuit generation time. - :attr:`~qsharp.CircuitGenerationMethod.Simulate` runs a full simulation to - trace the circuit. - :attr:`~qsharp.CircuitGenerationMethod.Static` uses partial evaluation and - requires a non-``Unrestricted`` target profile. Defaults to ``None`` which - auto-selects the generation method. - :kwtype generation_method: :class:`~qsharp.CircuitGenerationMethod` - - :keyword max_operations: The maximum number of operations to include in the circuit. - Defaults to ``None`` which means no limit. - :kwtype max_operations: int - - :keyword source_locations: If ``True``, annotates each gate with its source location. - :kwtype source_locations: bool - - :keyword group_by_scope: If ``True``, groups operations by their containing scope, such as function declarations or loop blocks. - :kwtype group_by_scope: bool - - :keyword prune_classical_qubits: If ``True``, removes qubits that are never used in a quantum - gate (e.g. qubits only used as classical controls). - :kwtype prune_classical_qubits: bool - - :return: The synthesized circuit. - :rtype: :class:`~qsharp._native.Circuit` - :raises QSharpError: If there is an error synthesizing the circuit. - """ - ipython_helper() - start = monotonic() - telemetry_events.on_circuit() - config = CircuitConfig( - max_operations=max_operations, - generation_method=generation_method, - source_locations=source_locations, - group_by_scope=group_by_scope, - prune_classical_qubits=prune_classical_qubits, - ) - - if isinstance(entry_expr, Callable) and hasattr(entry_expr, "__global_callable"): - args = python_args_to_interpreter_args(args) - res = get_interpreter().circuit( - config=config, callable=entry_expr.__global_callable, args=args - ) - elif isinstance(entry_expr, (GlobalCallable, Closure)): - args = python_args_to_interpreter_args(args) - res = get_interpreter().circuit(config=config, callable=entry_expr, args=args) - else: - assert entry_expr is None or isinstance(entry_expr, str) - res = get_interpreter().circuit(config, entry_expr, operation=operation) - - durationMs = (monotonic() - start) * 1000 - telemetry_events.on_circuit_end(durationMs) - - return res - - -def estimate( - entry_expr: Union[str, Callable, GlobalCallable, Closure], - params: Optional[Union[Dict[str, Any], List, EstimatorParams]] = None, - *args, -) -> EstimatorResult: - """ - Estimates resources for Q# source code. - Either an entry expression or a callable with arguments must be provided. - - :param entry_expr: The entry expression. Alternatively, a callable can be provided, - which must be a Q# callable. - :param params: The parameters to configure physical estimation. - - :return: The estimated resources. - :rtype: EstimatorResult - """ - - ipython_helper() - - def _coerce_estimator_params( - params: Optional[ - Union[Dict[str, Any], List[Dict[str, Any]], EstimatorParams] - ] = None, - ) -> List[Dict[str, Any]]: - if params is None: - return [{}] - elif isinstance(params, EstimatorParams): - if params.has_items: - return cast(List[Dict[str, Any]], params.as_dict()["items"]) - else: - return [params.as_dict()] - elif isinstance(params, dict): - return [params] - return params - - params = _coerce_estimator_params(params) - param_str = json.dumps(params) - telemetry_events.on_estimate() - start = monotonic() - if isinstance(entry_expr, Callable) and hasattr(entry_expr, "__global_callable"): - args = python_args_to_interpreter_args(args) - res_str = get_interpreter().estimate( - param_str, callable=entry_expr.__global_callable, args=args - ) - elif isinstance(entry_expr, (GlobalCallable, Closure)): - args = python_args_to_interpreter_args(args) - res_str = get_interpreter().estimate(param_str, callable=entry_expr, args=args) - else: - assert isinstance(entry_expr, str) - res_str = get_interpreter().estimate(param_str, entry_expr=entry_expr) - res = json.loads(res_str) - - try: - qubits = res[0]["logicalCounts"]["numQubits"] - except (KeyError, IndexError): - qubits = "unknown" - - durationMs = (monotonic() - start) * 1000 - telemetry_events.on_estimate_end(durationMs, qubits) - return EstimatorResult(res) - - -def logical_counts( - entry_expr: Union[str, Callable, GlobalCallable, Closure], - *args, -) -> LogicalCounts: - """ - Extracts logical resource counts from Q# source code. - Either an entry expression or a callable with arguments must be provided. - - :param entry_expr: The entry expression. Alternatively, a callable can be provided, - which must be a Q# callable. - - :return: Program resources in terms of logical gate counts. - :rtype: LogicalCounts - """ - - ipython_helper() - - if isinstance(entry_expr, Callable) and hasattr(entry_expr, "__global_callable"): - args = python_args_to_interpreter_args(args) - res_dict = get_interpreter().logical_counts( - callable=entry_expr.__global_callable, args=args - ) - elif isinstance(entry_expr, (GlobalCallable, Closure)): - args = python_args_to_interpreter_args(args) - res_dict = get_interpreter().logical_counts(callable=entry_expr, args=args) - else: - assert isinstance(entry_expr, str) - res_dict = get_interpreter().logical_counts(entry_expr=entry_expr) - return LogicalCounts(res_dict) - - -def set_quantum_seed(seed: Optional[int]) -> None: - """ - Sets the seed for the random number generator used for quantum measurements. - This applies to all Q# code executed, compiled, or estimated. - - :param seed: The seed to use for the quantum random number generator. - If None, the seed will be generated from entropy. - """ - get_interpreter().set_quantum_seed(seed) - - -def set_classical_seed(seed: Optional[int]) -> None: - """ - Sets the seed for the random number generator used for standard - library classical random number operations. - This applies to all Q# code executed, compiled, or estimated. - - :param seed: The seed to use for the classical random number generator. - If None, the seed will be generated from entropy. - """ - get_interpreter().set_classical_seed(seed) - - -def dump_machine() -> StateDump: - """ - Returns the sparse state vector of the simulator as a StateDump object. - - :return: The state of the simulator. - :rtype: StateDump - """ - ipython_helper() - return StateDump(get_interpreter().dump_machine()) - - -def dump_circuit() -> Circuit: - """ - Dumps a circuit showing the current state of the simulator. - - This circuit will contain the gates that have been applied - in the simulator up to the current point. - - Requires the interpreter to be initialized with `trace_circuit=True`. - - :return: The current circuit trace. - :rtype: Circuit - :raises QSharpError: If the interpreter was not initialized with ``trace_circuit=True``. - """ - ipython_helper() - return get_interpreter().dump_circuit() +# Deprecated: use qdk._qsharp instead. +from qdk._qsharp import * # noqa: F401,F403 diff --git a/source/pip/qsharp/_simulation.py b/source/pip/qsharp/_simulation.py index 3d2f8f7399..f024496ccd 100644 --- a/source/pip/qsharp/_simulation.py +++ b/source/pip/qsharp/_simulation.py @@ -1,727 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from pathlib import Path -import random -from typing import Callable, Literal, List, Optional, Tuple, TypeAlias, Union -import pyqir -from ._native import ( - QirInstructionId, - QirInstruction, - run_clifford, - run_parallel_shots, - run_adaptive_parallel_shots, - run_cpu_full_state, - NoiseConfig, - GpuContext, - try_create_gpu_adapter, -) -from pyqir import ( - Function, - FunctionType, - PointerType, - Type, - Linkage, -) -from ._qsharp import QirInputData, Result -from typing import TYPE_CHECKING -from ._adaptive_pass import AdaptiveProfilePass, OP_RECORD_OUTPUT - -if TYPE_CHECKING: # This is in the pyi file only - from ._native import GpuShotResults - - -class AggregateGatesPass(pyqir.QirModuleVisitor): - def __init__(self): - super().__init__() - self.gates: List[QirInstruction | Tuple] = [] - self.required_num_qubits = None - self.required_num_results = None - - def _get_value_as_string(self, value: pyqir.Value) -> str: - value = pyqir.extract_byte_string(value) - if value is None: - return "" - value = value.decode("utf-8") - return value - - def run(self, mod: pyqir.Module) -> Tuple[List[QirInstruction | Tuple], int, int]: - errors = mod.verify() - if errors is not None: - raise ValueError(f"Module verification failed: {errors}") - - # verify that the module is base profile - func = next(filter(pyqir.is_entry_point, mod.functions)) - self.required_num_qubits = pyqir.required_num_qubits(func) - self.required_num_results = pyqir.required_num_results(func) - - super().run(mod) - return (self.gates, self.required_num_qubits, self.required_num_results) - - def _on_block(self, block): - if ( - block.terminator - and block.terminator.opcode == pyqir.Opcode.BR - and len(block.terminator.operands) > 1 - ): - raise ValueError( - "simulation of programs with branching control flow is not supported" - ) - super()._on_block(block) - - def _on_call_instr(self, call: pyqir.Call) -> None: - callee_name = call.callee.name - if callee_name == "__quantum__qis__ccx__body": - self.gates.append( - ( - QirInstructionId.CCX, - pyqir.ptr_id(call.args[0]), - pyqir.ptr_id(call.args[1]), - pyqir.ptr_id(call.args[2]), - ) - ) - elif callee_name == "__quantum__qis__cx__body": - self.gates.append( - ( - QirInstructionId.CX, - pyqir.ptr_id(call.args[0]), - pyqir.ptr_id(call.args[1]), - ) - ) - elif callee_name == "__quantum__qis__cy__body": - self.gates.append( - ( - QirInstructionId.CY, - pyqir.ptr_id(call.args[0]), - pyqir.ptr_id(call.args[1]), - ) - ) - elif callee_name == "__quantum__qis__cz__body": - self.gates.append( - ( - QirInstructionId.CZ, - pyqir.ptr_id(call.args[0]), - pyqir.ptr_id(call.args[1]), - ) - ) - elif callee_name == "__quantum__qis__swap__body": - self.gates.append( - ( - QirInstructionId.SWAP, - pyqir.ptr_id(call.args[0]), - pyqir.ptr_id(call.args[1]), - ) - ) - elif callee_name == "__quantum__qis__rx__body": - self.gates.append( - ( - QirInstructionId.RX, - call.args[0].value, - pyqir.ptr_id(call.args[1]), - ) - ) - elif callee_name == "__quantum__qis__rxx__body": - self.gates.append( - ( - QirInstructionId.RXX, - call.args[0].value, - pyqir.ptr_id(call.args[1]), - pyqir.ptr_id(call.args[2]), - ) - ) - elif callee_name == "__quantum__qis__ry__body": - self.gates.append( - ( - QirInstructionId.RY, - call.args[0].value, - pyqir.ptr_id(call.args[1]), - ) - ) - elif callee_name == "__quantum__qis__ryy__body": - self.gates.append( - ( - QirInstructionId.RYY, - call.args[0].value, - pyqir.ptr_id(call.args[1]), - pyqir.ptr_id(call.args[2]), - ) - ) - elif callee_name == "__quantum__qis__rz__body": - self.gates.append( - ( - QirInstructionId.RZ, - call.args[0].value, - pyqir.ptr_id(call.args[1]), - ) - ) - elif callee_name == "__quantum__qis__rzz__body": - self.gates.append( - ( - QirInstructionId.RZZ, - call.args[0].value, - pyqir.ptr_id(call.args[1]), - pyqir.ptr_id(call.args[2]), - ) - ) - elif callee_name == "__quantum__qis__h__body": - self.gates.append((QirInstructionId.H, pyqir.ptr_id(call.args[0]))) - elif callee_name == "__quantum__qis__s__body": - self.gates.append((QirInstructionId.S, pyqir.ptr_id(call.args[0]))) - elif callee_name == "__quantum__qis__s__adj": - self.gates.append((QirInstructionId.SAdj, pyqir.ptr_id(call.args[0]))) - elif callee_name == "__quantum__qis__sx__body": - self.gates.append((QirInstructionId.SX, pyqir.ptr_id(call.args[0]))) - elif callee_name == "__quantum__qis__t__body": - self.gates.append((QirInstructionId.T, pyqir.ptr_id(call.args[0]))) - elif callee_name == "__quantum__qis__t__adj": - self.gates.append((QirInstructionId.TAdj, pyqir.ptr_id(call.args[0]))) - elif callee_name == "__quantum__qis__x__body": - self.gates.append((QirInstructionId.X, pyqir.ptr_id(call.args[0]))) - elif callee_name == "__quantum__qis__y__body": - self.gates.append((QirInstructionId.Y, pyqir.ptr_id(call.args[0]))) - elif callee_name == "__quantum__qis__z__body": - self.gates.append((QirInstructionId.Z, pyqir.ptr_id(call.args[0]))) - elif callee_name == "__quantum__qis__m__body": - self.gates.append( - ( - QirInstructionId.M, - pyqir.ptr_id(call.args[0]), - pyqir.ptr_id(call.args[1]), - ) - ) - elif callee_name == "__quantum__qis__mz__body": - self.gates.append( - ( - QirInstructionId.MZ, - pyqir.ptr_id(call.args[0]), - pyqir.ptr_id(call.args[1]), - ) - ) - elif callee_name == "__quantum__qis__mresetz__body": - self.gates.append( - ( - QirInstructionId.MResetZ, - pyqir.ptr_id(call.args[0]), - pyqir.ptr_id(call.args[1]), - ) - ) - elif callee_name == "__quantum__qis__reset__body": - self.gates.append((QirInstructionId.RESET, pyqir.ptr_id(call.args[0]))) - elif callee_name == "__quantum__qis__move__body": - self.gates.append( - ( - QirInstructionId.Move, - pyqir.ptr_id(call.args[0]), - ) - ) - elif callee_name == "__quantum__rt__result_record_output": - tag = self._get_value_as_string(call.args[1]) - self.gates.append( - ( - QirInstructionId.ResultRecordOutput, - str(pyqir.ptr_id(call.args[0])), - tag, - ) - ) - elif callee_name == "__quantum__rt__tuple_record_output": - tag = self._get_value_as_string(call.args[1]) - self.gates.append( - (QirInstructionId.TupleRecordOutput, str(call.args[0].value), tag) - ) - elif callee_name == "__quantum__rt__array_record_output": - tag = self._get_value_as_string(call.args[1]) - self.gates.append( - (QirInstructionId.ArrayRecordOutput, str(call.args[0].value), tag) - ) - elif ( - callee_name == "__quantum__rt__initialize" - or callee_name == "__quantum__rt__begin_parallel" - or callee_name == "__quantum__rt__end_parallel" - or callee_name == "__quantum__qis__barrier__body" - # We only hit this during noiseless simulations - or "qdk_noise" in call.callee.attributes.func - ): - pass - else: - raise ValueError(f"Unsupported call instruction: {callee_name}") - - -class CorrelatedNoisePass(AggregateGatesPass): - """ - This pass replaces the QIR intrinsics that are in the provided NoiseConfig - by correlated noise instructions that the simulator understands. - """ - - def __init__(self, noise_config: NoiseConfig): - super().__init__() - self.noise_intrinsics_table = noise_config.intrinsics - - def _on_call_instr(self, call: pyqir.Call) -> None: - callee_name = call.callee.name - if callee_name in self.noise_intrinsics_table: - self.gates.append( - ( - QirInstructionId.CorrelatedNoise, - self.noise_intrinsics_table.get_intrinsic_id(callee_name), - [pyqir.ptr_id(arg) for arg in call.args], - ) - ) - elif "qdk_noise" in call.callee.attributes.func: - # If we are running a noisy simulation, we treat - # missing noise intrinsics as an error. - raise ValueError(f"Missing noise intrinsic: {callee_name}") - else: - super()._on_call_instr(call) - - -class GpuCorrelatedNoisePass(AggregateGatesPass): - """ - A special case of the CorrelatedNoisePass that uses data loaded - directly from rust instead of a NoiseConfig object to detect the - correlated noise intrinsics. - """ - - def __init__(self, noise_table: List[Tuple[int, str, int]]): - super().__init__() - self.noise_table = dict() - for table_id, name, _count in noise_table: - self.noise_table[name] = table_id - - def _on_call_instr(self, call: pyqir.Call) -> None: - callee_name = call.callee.name - if callee_name in self.noise_table: - self.gates.append( - ( - QirInstructionId.CorrelatedNoise, - int(self.noise_table[callee_name]), # Noise table ID - [pyqir.ptr_id(qubit) for qubit in call.args], # qubit args - ) - ) - elif "qdk_noise" in call.callee.attributes.func: - # If we are running a noisy simulation, we treat - # missing noise intrinsics as an error. - raise ValueError(f"Missing noise intrinsic: {callee_name}") - else: - super()._on_call_instr(call) - - -class OutputRecordingPass(pyqir.QirModuleVisitor): - _output_str = "" - _closers = [] - _counters = [] - - def process_output(self, bitstring: str): - return eval( - self._output_str, - { - "o": [ - Result.Zero if x == "0" else Result.One if x == "1" else Result.Loss - for x in bitstring - ] - }, - ) - - def _on_function(self, function): - if pyqir.is_entry_point(function): - super()._on_function(function) - while len(self._closers) > 0: - self._output_str += self._closers.pop() - self._counters.pop() - - def _on_rt_result_record_output(self, call, result, target): - self._output_str += f"o[{pyqir.ptr_id(result)}]" - while len(self._counters) > 0: - self._output_str += "," - self._counters[-1] -= 1 - if self._counters[-1] == 0: - self._output_str += self._closers[-1] - self._closers.pop() - self._counters.pop() - else: - break - - def _on_rt_array_record_output(self, call, value, target): - self._output_str += "[" - self._closers.append("]") - # if len(self._counters) > 0: - # self._counters[-1] -= 1 - self._counters.append(value.value) - - def _on_rt_tuple_record_output(self, call, value, target): - self._output_str += "(" - self._closers.append(")") - # if len(self._counters) > 0: - # self._counters[-1] -= 1 - self._counters.append(value.value) - - -class DecomposeCcxPass(pyqir.QirModuleVisitor): - - h_func: Function - t_func: Function - tadj_func: Function - cz_func: Function - - def __init__(self): - super().__init__() - - def _on_module(self, module): - void = Type.void(module.context) - qubit_ty = PointerType(Type.void(module.context)) - - # Find or create all the needed functions. - for func in module.functions: - match func.name: - case "__quantum__qis__h__body": - self.h_func = func - case "__quantum__qis__t__body": - self.t_func = func - case "__quantum__qis__t__adj": - self.tadj_func = func - case "__quantum__qis__cz__body": - self.cz_func = func - if not hasattr(self, "h_func"): - self.h_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__h__body", - module, - ) - if not hasattr(self, "t_func"): - self.t_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__t__body", - module, - ) - if not hasattr(self, "tadj_func"): - self.tadj_func = Function( - FunctionType(void, [qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__t__adj", - module, - ) - if not hasattr(self, "cz_func"): - self.cz_func = Function( - FunctionType(void, [qubit_ty, qubit_ty]), - Linkage.EXTERNAL, - "__quantum__qis__cz__body", - module, - ) - super()._on_module(module) - - def _on_qis_ccx(self, call, ctrl1, ctrl2, target): - self.builder.insert_before(call) - self.builder.call(self.h_func, [target]) - self.builder.call(self.tadj_func, [ctrl1]) - self.builder.call(self.tadj_func, [ctrl2]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.cz_func, [target, ctrl1]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.t_func, [ctrl1]) - self.builder.call(self.h_func, [target]) - self.builder.call(self.cz_func, [ctrl2, target]) - self.builder.call(self.h_func, [target]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.cz_func, [ctrl2, ctrl1]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.t_func, [target]) - self.builder.call(self.tadj_func, [ctrl1]) - self.builder.call(self.h_func, [target]) - self.builder.call(self.cz_func, [ctrl2, target]) - self.builder.call(self.h_func, [target]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.cz_func, [target, ctrl1]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.tadj_func, [target]) - self.builder.call(self.t_func, [ctrl1]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.cz_func, [ctrl2, ctrl1]) - self.builder.call(self.h_func, [ctrl1]) - self.builder.call(self.h_func, [target]) - call.erase() - - -Simulator: TypeAlias = Callable[ - [List[QirInstruction], int, int, int, NoiseConfig, int], str -] - - -def preprocess_simulation_input( - input: Union[QirInputData, str, bytes], - shots: Optional[int] = 1, - noise: Optional[NoiseConfig] = None, - seed: Optional[int] = None, -) -> tuple[pyqir.Module, int, Optional[NoiseConfig], int]: - if shots is None: - shots = 1 - # If no seed specified, generate a random u32 to use - if seed is None: - seed = random.randint(0, 2**32 - 1) - if isinstance(noise, tuple): - raise ValueError( - "Specifying Pauli noise via a tuple is not supported. Use a NoiseConfig instead." - ) - - context = pyqir.Context() - if isinstance(input, QirInputData): - mod = pyqir.Module.from_ir(context, str(input)) - elif isinstance(input, str): - mod = pyqir.Module.from_ir(context, input) - else: - mod = pyqir.Module.from_bitcode(context, input) - - return (mod, shots, noise, seed) - - -def is_adaptive(mod: pyqir.Module) -> bool: - """Check if the QIR module uses the Adaptive Profile.""" - entry = next(filter(pyqir.is_entry_point, mod.functions), None) - if entry is None: - return False - func_attrs = entry.attributes.func - if "qir_profiles" not in func_attrs: - return False - return func_attrs["qir_profiles"].string_value == "adaptive_profile" - - -def run_qir_clifford( - input: Union[QirInputData, str, bytes], - shots: Optional[int] = 1, - noise: Optional[NoiseConfig] = None, - seed: Optional[int] = None, -) -> List: - (mod, shots, noise, seed) = preprocess_simulation_input(input, shots, noise, seed) - if noise is None: - (gates, num_qubits, num_results) = AggregateGatesPass().run(mod) - else: - (gates, num_qubits, num_results) = CorrelatedNoisePass(noise).run(mod) - recorder = OutputRecordingPass() - recorder.run(mod) - - return list( - map( - recorder.process_output, - run_clifford(gates, num_qubits, num_results, shots, noise, seed), - ) - ) - - -def run_qir_cpu( - input: Union[QirInputData, str, bytes], - shots: Optional[int] = 1, - noise: Optional[NoiseConfig] = None, - seed: Optional[int] = None, -) -> List: - (mod, shots, noise, seed) = preprocess_simulation_input(input, shots, noise, seed) - DecomposeCcxPass().run(mod) - if noise is None: - (gates, num_qubits, num_results) = AggregateGatesPass().run(mod) - else: - (gates, num_qubits, num_results) = CorrelatedNoisePass(noise).run(mod) - recorder = OutputRecordingPass() - recorder.run(mod) - - return list( - map( - recorder.process_output, - run_cpu_full_state(gates, num_qubits, num_results, shots, noise, seed), - ) - ) - - -def str_to_result(result: str): - match result: - case "0": - return Result.Zero - case "1": - return Result.One - case "L": - return Result.Loss - case _: - raise ValueError(f"Invalid result {result}") - - -def run_qir_gpu( - input: Union[QirInputData, str, bytes], - shots: Optional[int] = 1, - noise: Optional[NoiseConfig] = None, - seed: Optional[int] = None, -) -> List: - (mod, shots, noise, seed) = preprocess_simulation_input(input, shots, noise, seed) - # Ccx is not support in the GPU simulator, decompose it - DecomposeCcxPass().run(mod) - if is_adaptive(mod): - program = AdaptiveProfilePass().run(mod, noise) - results = run_adaptive_parallel_shots(program.as_dict(), shots, noise, seed) - - # Extract recorded output result indices from the bytecode. - # OP_RECORD_OUTPUT with aux1=0 is result_record_output where - # src0 is the result index in the results buffer. - recorded_result_indices = [] - for ins in program.instructions: - if (ins.opcode & 0xFF) == OP_RECORD_OUTPUT and ins.aux1 == 0: - recorded_result_indices.append(ins.src0) - # Filter shot_results to only include recorded output indices - filtered = [] - for s in results: - filtered.append([str_to_result(s[i]) for i in recorded_result_indices]) - return filtered - else: - if noise is None: - (gates, num_qubits, num_results) = AggregateGatesPass().run(mod) - else: - (gates, num_qubits, num_results) = CorrelatedNoisePass(noise).run(mod) - recorder = OutputRecordingPass() - recorder.run(mod) - return list( - map( - recorder.process_output, - run_parallel_shots(gates, shots, num_qubits, num_results, noise, seed), - ) - ) - - -def prepare_qir_with_correlated_noise( - input: Union[QirInputData, str, bytes], - noise_tables: List[Tuple[int, str, int]], -) -> Tuple[List[QirInstruction], int, int]: - # Turn the input into a QIR module - (mod, _, _, _) = preprocess_simulation_input(input, None, None, None) - - # Ccx is not support in the GPU simulator, decompose it - DecomposeCcxPass().run(mod) - - # Extract the gates including correlated noise instructions - (gates, required_num_qubits, required_num_results) = GpuCorrelatedNoisePass( - noise_tables - ).run(mod) - - return (gates, required_num_qubits, required_num_results) - - -class GpuSimulator: - """ - Represents a GPU-based QIR simulator. This is a 'full state' simulator that can simulate - quantum programs, including non-Clifford gates, up to a limit of 27 qubits. - """ - - def __init__(self): - self.gpu_context = GpuContext() - self._is_adaptive = False - self._recorded_result_indices = [] - self.tables = None - - def load_noise_tables( - self, - noise_dir: str, - ): - """ - Loads noise tables from the specified directory path. For each .csv file found in the directory, - the noise table is loaded and associated with a unique identifier. The name of the file (without the .csv extension) - is used as the label for the noise table, which should match the QIR instruction that will apply noise using this table. - - If testing various noise models, you may load new noise models at any time by calling this method again - with a different directory path. Previously loaded noise tables will be replaced. The program currently loaded - into the simulator (if any) will remain loaded, but any subsequent calls to `run_shots` will use the newly loaded noise tables. - - Each line of the table should be of the format: "IXYZ,1.345e-4" where IXYZ is a string of Pauli operators - representing the error on each qubit (Z applying to the first qubit argument, Y to the second, etc.), and the second value - is the corresponding error probability for that specific Pauli string. - - Blank lines, lines starting with #, or lines that start with the string "pauli" (i.e., a column header) are ignored. - """ - self.tables = self.gpu_context.load_noise_tables(noise_dir) - - def set_program(self, input: Union[QirInputData, str, bytes]): - """ - Load the QIR program into the GPU simulator, preparing it for execution. You may load and run - multiple programs sequentially by calling this method multiple times before calling `run_shots` - without needing to create a new simulator instance or reloading noise tables. - """ - # Parse the QIR module to detect profile - (mod, _, _, _) = preprocess_simulation_input(input, None, None, None) - if is_adaptive(mod): - self._is_adaptive = True - # Build noise_intrinsics dict from loaded noise tables (if any) - noise_intrinsics = None - if self.tables is not None: - noise_intrinsics = {name: table_id for table_id, name, _ in self.tables} - program = AdaptiveProfilePass().run(mod, noise_intrinsics=noise_intrinsics) - self.gpu_context.set_adaptive_program(program.as_dict()) - - # Extract recorded output result indices from the bytecode. - # OP_RECORD_OUTPUT with aux1=0 is result_record_output where - # src0 is the result index in the results buffer. - self._recorded_result_indices = [] - for instr in program.instructions: - if instr.opcode & 0xFF == OP_RECORD_OUTPUT and instr.aux1 == 0: - self._recorded_result_indices.append(instr.src0) - else: - (self.gates, self.required_num_qubits, self.required_num_results) = ( - prepare_qir_with_correlated_noise( - input, self.tables if not self.tables is None else [] - ) - ) - self.gpu_context.set_program( - self.gates, self.required_num_qubits, self.required_num_results - ) - - def run_shots(self, shots: int, seed: Optional[int] = None) -> "GpuShotResults": - """ - Run the loaded QIR program for the specified number of shots, using an optional seed for reproducibility. - If noise is to be applied, ensure that noise has been loaded prior to running shots. - """ - seed = seed if seed is not None else random.randint(0, 2**32 - 1) - if self._is_adaptive: - results = self.gpu_context.run_adaptive_shots(shots, seed=seed) - # Filter shot_results to only include recorded output indices - if self._recorded_result_indices: - indices = self._recorded_result_indices - filtered = [] - for s in results["shot_results"]: - filtered.append("".join(s[i] for i in indices)) - results["shot_results"] = filtered - return results - return self.gpu_context.run_shots(shots, seed=seed) - - -def run_qir( - input: Union[QirInputData, str, bytes], - shots: Optional[int] = 1, - noise: Optional[NoiseConfig] = None, - seed: Optional[int] = None, - type: Optional[Literal["clifford", "cpu", "gpu"]] = None, -) -> List: - """ - Simulate the given QIR source. - - :param input: The QIR source to simulate. - :param type: The type of simulator to use. - Use ``"clifford"`` if your QIR only contains Clifford gates and measurements. - Use ``"gpu"`` if you have a GPU available in your system. - Use ``"cpu"`` as a fallback option if you don't have a GPU in your system. - If ``None`` (default), the GPU simulator will be tried first, falling back to - CPU if a suitable GPU device could not be located. - :param shots: The number of shots to run. - :param noise: A noise model to use in the simulation. - :param seed: A seed for reproducibility. - :return: A list of measurement results, in the order they happened during the simulation. - :rtype: List - """ - if type is None: - try: - try_create_gpu_adapter() - type = "gpu" - except OSError: - type = "cpu" - - match type: - case "clifford": - return run_qir_clifford(input, shots, noise, seed) - case "cpu": - return run_qir_cpu(input, shots, noise, seed) - case "gpu": - return run_qir_gpu(input, shots, noise, seed) - case _: - raise ValueError(f"Invalid simulator type: {type}") +# Deprecated: use qdk._simulation instead. +from qdk._simulation import * # noqa: F401,F403 diff --git a/source/pip/qsharp/applications/__init__.py b/source/pip/qsharp/applications/__init__.py index 59e481eb93..bbae5f019b 100644 --- a/source/pip/qsharp/applications/__init__.py +++ b/source/pip/qsharp/applications/__init__.py @@ -1,2 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. + +# Deprecated: use qdk.applications instead. +from qdk.applications import * # noqa: F401,F403 diff --git a/source/pip/qsharp/applications/magnets/__init__.py b/source/pip/qsharp/applications/magnets/__init__.py deleted file mode 100644 index 56c536659e..0000000000 --- a/source/pip/qsharp/applications/magnets/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -# flake8: noqa F403 -# pyright: ignore[reportWildcardImportFromLibrary] - -"""Magnets application module. - -Re-exports from the submodules.""" - -from .geometry import * -from .models import * -from .trotter import * -from .utilities import * diff --git a/source/pip/qsharp/applications/magnets/geometry/__init__.py b/source/pip/qsharp/applications/magnets/geometry/__init__.py deleted file mode 100644 index 4a7a380f86..0000000000 --- a/source/pip/qsharp/applications/magnets/geometry/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""Geometry module for representing quantum system topologies. - -This module provides hypergraph data structures for representing the -geometric structure of quantum systems, including lattice topologies -and interaction graphs. -""" - -from .complete import CompleteBipartiteGraph, CompleteGraph -from .lattice1d import Chain1D, Ring1D -from .lattice2d import Patch2D, Torus2D - -__all__ = [ - "CompleteBipartiteGraph", - "CompleteGraph", - "Chain1D", - "Ring1D", - "Patch2D", - "Torus2D", -] diff --git a/source/pip/qsharp/applications/magnets/geometry/complete.py b/source/pip/qsharp/applications/magnets/geometry/complete.py deleted file mode 100644 index 6c1ce66176..0000000000 --- a/source/pip/qsharp/applications/magnets/geometry/complete.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""Complete graph geometries for quantum simulations. - -This module provides classes for representing complete graphs and complete -bipartite graphs as hypergraphs. These structures are useful for quantum -systems with all-to-all or bipartite all-to-all interactions. -""" - -from ..utilities import ( - Hyperedge, - Hypergraph, - HypergraphEdgeColoring, -) - - -class CompleteGraph(Hypergraph): - """A complete graph where every vertex is connected to every other vertex. - - In a complete graph K_n, there are n vertices and n(n-1)/2 edges, - with each pair of distinct vertices connected by exactly one edge. - - Attributes: - n: Number of vertices in the graph. - - Example: - - .. code-block:: python - >>> graph = CompleteGraph(4) - >>> graph.nvertices - 4 - >>> graph.nedges - 6 - """ - - def __init__(self, n: int, self_loops: bool = False) -> None: - """Initialize a complete graph. - - Args: - n: Number of vertices in the graph. - self_loops: If True, include self-loop edges on each vertex - for single-site terms. - """ - if self_loops: - _edges = [Hyperedge([i]) for i in range(n)] - else: - _edges = [] - - # Add all pairs of vertices - for i in range(n): - for j in range(i + 1, n): - _edges.append(Hyperedge([i, j])) - super().__init__(_edges) - - self.n = n - - def edge_coloring(self) -> HypergraphEdgeColoring: - """Compute edge coloring for this complete graph.""" - coloring = HypergraphEdgeColoring(self) - for edge in self.edges(): - if len(edge.vertices) == 1: - coloring.add_edge(edge, -1) - else: - if self.n % 2 == 0: - i, j = edge.vertices - m = self.n - 1 - if j == m: - coloring.add_edge(edge, i) - elif (j - i) % 2 == 0: - coloring.add_edge(edge, (j - i) // 2) - else: - coloring.add_edge(edge, (j - i + m) // 2) - else: - m = self.n - i, j = edge.vertices - if (j - i) % 2 == 0: - coloring.add_edge(edge, (j - i) // 2) - else: - coloring.add_edge(edge, (j - i + m) // 2) - return coloring - - -class CompleteBipartiteGraph(Hypergraph): - """A complete bipartite graph with two vertex sets. - - In a complete bipartite graph K_{m,n} (m <= n), there are m + n - vertices partitioned into two sets of sizes m and n. Every vertex - in the first set is connected to every vertex in the second set, - giving m * n edges total. - - Vertices 0 to m-1 form the first set, and vertices m to m+n-1 - form the second set. - - Attributes: - m: Number of vertices in the first set. - n: Number of vertices in the second set. - - Requires: - m <= n - - Example: - - .. code-block:: python - >>> graph = CompleteBipartiteGraph(2, 3) - >>> graph.nvertices - 5 - >>> graph.nedges - 6 - """ - - def __init__(self, m: int, n: int, self_loops: bool = False) -> None: - """Initialize a complete bipartite graph. - - Args: - m: Number of vertices in the first set (vertices 0 to m-1). - n: Number of vertices in the second set (vertices m to m+n-1). - self_loops: If True, include self-loop edges on each vertex - for single-site terms. - """ - assert m <= n, "Require m <= n for CompleteBipartiteGraph." - total_vertices = m + n - - if self_loops: - _edges = [Hyperedge([i]) for i in range(total_vertices)] - - else: - _edges = [] - - # Connect every vertex in first set to every vertex in second set - for i in range(m): - for j in range(m, m + n): - _edges.append(Hyperedge([i, j])) - super().__init__(_edges) - - self.m = m - self.n = n - - def edge_coloring(self) -> HypergraphEdgeColoring: - """Compute edge coloring for this complete bipartite graph.""" - coloring = HypergraphEdgeColoring(self) - m = self.m - n = self.n - for edge in self.edges(): - if len(edge.vertices) == 1: - coloring.add_edge(edge, -1) - else: - i, j = edge.vertices - coloring.add_edge(edge, (i + j - m) % n) - return coloring diff --git a/source/pip/qsharp/applications/magnets/geometry/lattice1d.py b/source/pip/qsharp/applications/magnets/geometry/lattice1d.py deleted file mode 100644 index c7ea60fb60..0000000000 --- a/source/pip/qsharp/applications/magnets/geometry/lattice1d.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""One-dimensional lattice geometries for quantum simulations. - -This module provides classes for representing 1D lattice structures as -hypergraphs. These lattices are commonly used in quantum spin chain -simulations and other one-dimensional quantum systems. -""" - -from ..utilities import ( - Hyperedge, - Hypergraph, - HypergraphEdgeColoring, -) - - -class Chain1D(Hypergraph): - """A one-dimensional open chain lattice. - - Represents a linear chain of vertices with nearest-neighbor edges. - The chain has open boundary conditions, meaning the first and last - vertices are not connected. - - Attributes: - length: Number of vertices in the chain. - - Example: - - .. code-block:: python - >>> chain = Chain1D(4) - >>> chain.nvertices - 4 - >>> chain.nedges - 3 - """ - - def __init__(self, length: int, self_loops: bool = False) -> None: - """Initialize a 1D chain lattice. - - Args: - length: Number of vertices in the chain. - self_loops: If True, include self-loop edges on each vertex - for single-site terms. - """ - if self_loops: - _edges = [Hyperedge([i]) for i in range(length)] - - else: - _edges = [] - - for i in range(length - 1): - _edges.append(Hyperedge([i, i + 1])) - - super().__init__(_edges) - self.length = length - - def edge_coloring(self) -> HypergraphEdgeColoring: - """Compute a valid edge coloring for this chain.""" - coloring = HypergraphEdgeColoring(self) - for edge in self.edges(): - if len(edge.vertices) == 1: - coloring.add_edge(edge, -1) - else: - i, j = edge.vertices - color = min(i, j) % 2 - coloring.add_edge(edge, color) - return coloring - - -class Ring1D(Hypergraph): - """A one-dimensional ring (periodic chain) lattice. - - Represents a circular chain of vertices with nearest-neighbor edges. - The ring has periodic boundary conditions, meaning the first and last - vertices are connected. - - Attributes: - length: Number of vertices in the ring. - - Example: - - .. code-block:: python - >>> ring = Ring1D(4) - >>> ring.nvertices - 4 - >>> ring.nedges - 4 - """ - - def __init__(self, length: int, self_loops: bool = False) -> None: - """Initialize a 1D ring lattice. - - Args: - length: Number of vertices in the ring. - self_loops: If True, include self-loop edges on each vertex - for single-site terms. - """ - if self_loops: - _edges = [Hyperedge([i]) for i in range(length)] - else: - _edges = [] - - for i in range(length): - _edges.append(Hyperedge([i, (i + 1) % length])) - super().__init__(_edges) - - self.length = length - - def edge_coloring(self) -> HypergraphEdgeColoring: - """Compute a valid edge coloring for this ring.""" - coloring = HypergraphEdgeColoring(self) - for edge in self.edges(): - if len(edge.vertices) == 1: - coloring.add_edge(edge, -1) - else: - i, j = edge.vertices - if {i, j} == {0, self.length - 1}: - color = (self.length % 2) + 1 - else: - color = min(i, j) % 2 - coloring.add_edge(edge, color) - return coloring diff --git a/source/pip/qsharp/applications/magnets/geometry/lattice2d.py b/source/pip/qsharp/applications/magnets/geometry/lattice2d.py deleted file mode 100644 index 6c75d12c66..0000000000 --- a/source/pip/qsharp/applications/magnets/geometry/lattice2d.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""Two-dimensional lattice geometries for quantum simulations. - -This module provides classes for representing 2D lattice structures as -hypergraphs. These lattices are commonly used in quantum spin system -simulations and other two-dimensional quantum systems. -""" - -from ..utilities import ( - Hyperedge, - Hypergraph, - HypergraphEdgeColoring, -) - - -class Patch2D(Hypergraph): - """A two-dimensional open rectangular lattice. - - Represents a rectangular grid of vertices with nearest-neighbor edges. - The patch has open boundary conditions, meaning edges do not wrap around. - - Vertices are indexed in row-major order: vertex (x, y) has index y * width + x. - - Attributes: - width: Number of vertices in the horizontal direction. - height: Number of vertices in the vertical direction. - - Example: - - .. code-block:: python - >>> patch = Patch2D(3, 2) - >>> str(patch) - '3x2 lattice patch with 6 vertices and 7 edges' - """ - - def __init__(self, width: int, height: int, self_loops: bool = False) -> None: - """Initialize a 2D patch lattice. - - Args: - width: Number of vertices in the horizontal direction. - height: Number of vertices in the vertical direction. - self_loops: If True, include self-loop edges on each vertex - for single-site terms. - """ - self.width = width - self.height = height - - if self_loops: - _edges = [Hyperedge([i]) for i in range(width * height)] - else: - _edges = [] - - # Horizontal edges (connecting (x, y) to (x+1, y)) - for y in range(height): - for x in range(width - 1): - _edges.append(Hyperedge([self._index(x, y), self._index(x + 1, y)])) - - # Vertical edges (connecting (x, y) to (x, y+1)) - for y in range(height - 1): - for x in range(width): - _edges.append(Hyperedge([self._index(x, y), self._index(x, y + 1)])) - super().__init__(_edges) - - def _index(self, x: int, y: int) -> int: - """Convert (x, y) coordinates to vertex index.""" - return y * self.width + x - - def __str__(self) -> str: - """Return the summary string ``"{width}x{height} lattice patch with {nvertices} vertices and {nedges} edges"``.""" - return f"{self.width}x{self.height} lattice patch with {self.nvertices} vertices and {self.nedges} edges" - - def __repr__(self) -> str: - """Return a string representation of the Patch2D geometry.""" - return f"Patch2D(width={self.width}, height={self.height})" - - def edge_coloring(self) -> HypergraphEdgeColoring: - """Compute edge coloring for this 2D patch.""" - coloring = HypergraphEdgeColoring(self) - for edge in self.edges(): - if len(edge.vertices) == 1: - coloring.add_edge(edge, -1) - continue - - u, v = edge.vertices - x_u, y_u = u % self.width, u // self.width - x_v, y_v = v % self.width, v // self.width - - if y_u == y_v: - color = 0 if min(x_u, x_v) % 2 == 0 else 1 - else: - color = 2 if min(y_u, y_v) % 2 == 0 else 3 - coloring.add_edge(edge, color) - return coloring - - -class Torus2D(Hypergraph): - """A two-dimensional toroidal (periodic) lattice. - - Represents a rectangular grid of vertices with nearest-neighbor edges - and periodic boundary conditions in both directions. The topology is - that of a torus. - - Vertices are indexed in row-major order: vertex (x, y) has index y * width + x. - - Attributes: - width: Number of vertices in the horizontal direction. - height: Number of vertices in the vertical direction. - - Example: - - .. code-block:: python - >>> torus = Torus2D(3, 2) - >>> str(torus) - '3x2 lattice torus with 6 vertices and 12 edges' - """ - - def __init__(self, width: int, height: int, self_loops: bool = False) -> None: - """Initialize a 2D torus lattice. - - Args: - width: Number of vertices in the horizontal direction. - height: Number of vertices in the vertical direction. - self_loops: If True, include self-loop edges on each vertex - for single-site terms. - """ - self.width = width - self.height = height - - if self_loops: - _edges = [Hyperedge([i]) for i in range(width * height)] - else: - _edges = [] - - # Horizontal edges (connecting (x, y) to ((x+1) % width, y)) - for y in range(height): - for x in range(width): - _edges.append( - Hyperedge([self._index(x, y), self._index((x + 1) % width, y)]) - ) - - # Vertical edges (connecting (x, y) to (x, (y+1) % height)) - for y in range(height): - for x in range(width): - _edges.append( - Hyperedge([self._index(x, y), self._index(x, (y + 1) % height)]) - ) - - super().__init__(_edges) - - def _index(self, x: int, y: int) -> int: - """Convert (x, y) coordinates to vertex index.""" - return y * self.width + x - - def __str__(self) -> str: - """Return the summary string ``"{width}x{height} lattice torus with {nvertices} vertices and {nedges} edges"``.""" - return f"{self.width}x{self.height} lattice torus with {self.nvertices} vertices and {self.nedges} edges" - - def __repr__(self) -> str: - """Return a string representation of the Torus2D geometry.""" - return f"Torus2D(width={self.width}, height={self.height})" - - def edge_coloring(self) -> HypergraphEdgeColoring: - """Compute edge coloring for this 2D torus.""" - coloring = HypergraphEdgeColoring(self) - for edge in self.edges(): - if len(edge.vertices) == 1: - coloring.add_edge(edge, -1) - continue - - u, v = edge.vertices - x_u, y_u = u % self.width, u // self.width - x_v, y_v = v % self.width, v // self.width - - if y_u == y_v: - if {x_u, x_v} == {0, self.width - 1}: - color = 1 if self.width % 2 == 0 else 4 - else: - color = 0 if min(x_u, x_v) % 2 == 0 else 1 - else: - if {y_u, y_v} == {0, self.height - 1}: - color = 3 if self.height % 2 == 0 else 5 - else: - color = 2 if min(y_u, y_v) % 2 == 0 else 3 - coloring.add_edge(edge, color) - return coloring diff --git a/source/pip/qsharp/applications/magnets/models/__init__.py b/source/pip/qsharp/applications/magnets/models/__init__.py deleted file mode 100644 index 67c7f569ef..0000000000 --- a/source/pip/qsharp/applications/magnets/models/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""Models module for quantum spin models. - -This module provides classes for representing quantum spin models -as Hamiltonians built from Pauli operators. -""" - -from .model import IsingModel, Model, HeisenbergModel - -__all__ = ["Model", "IsingModel", "HeisenbergModel"] diff --git a/source/pip/qsharp/applications/magnets/models/model.py b/source/pip/qsharp/applications/magnets/models/model.py deleted file mode 100755 index 754d6741c1..0000000000 --- a/source/pip/qsharp/applications/magnets/models/model.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -# pyright: reportPrivateImportUsage=false - -from collections.abc import Sequence -from typing import Optional - -from ..utilities import ( - Hyperedge, - Hypergraph, - HypergraphEdgeColoring, - PauliString, -) - -"""Base Model class for quantum spin models. - -This module provides the base class for representing quantum spin models -as Hamiltonians. The Model class integrates with hypergraph geometries -to define interaction topologies and stores coefficients for each edge. -""" - - -class Model: - """Base class for quantum spin models. - - This class represents a quantum spin Hamiltonian defined on a hypergraph - geometry. The Hamiltonian is characterized by: - - - Ops: A list of PauliStrings (one entry per interaction term) - - Terms: Groupings of operator indices for Trotterization or parallel execution - - The model is built on a hypergraph geometry that defines which qubits - interact with each other. - - Attributes: - geometry: The Hypergraph defining the interaction topology. - - Example: - - .. code-block:: python - >>> from qsharp.magnets.geometry import Chain1D - >>> geometry = Chain1D(4) - >>> model = Model(geometry) - >>> model.set_coefficient((0, 1), 1.5) - >>> model.set_pauli_string((0, 1), PauliString.from_qubits((0, 1), "ZZ")) - >>> model.get_coefficient((0, 1)) - 1.5 - """ - - def __init__(self, geometry: Hypergraph): - """Initialize the Model. - - Creates a quantum spin model on the given geometry. - - The model stores operators lazily in ``_ops`` as interaction operators - are defined. Noncommuting collections of operators are collected in - ``_terms`` that stores the indices of its interaction operators. This - list of arrays seperate terms into parallizable groups by color. It is - initialized as one empty term group. - - Args: - geometry: Hypergraph defining the interaction topology. The number - of vertices determines the number of qubits in the model. - """ - self.geometry: Hypergraph = geometry - self._qubits: set[int] = set() - self._ops: list[PauliString] = [] - for edge in geometry.edges(): - self._qubits.update(edge.vertices) - self._terms: dict[int, dict[int, list[int]]] = {} - - def add_interaction( - self, - edge: Hyperedge, - pauli_string: Sequence[int | str] | str, - coefficient: complex = 1.0, - term: Optional[int] = None, - color: int = 0, - ) -> None: - """Add an interaction term to the model. - - Args: - edge: The Hyperedge representing the qubits involved in the interaction. - pauli_string: The PauliString operator for this interaction. - coefficient: The complex coefficient multiplying this term (default 1.0). - """ - if edge not in self.geometry.edges(): - raise ValueError("Edge is not part of the model geometry.") - s = PauliString.from_qubits(edge.vertices, pauli_string, coefficient) - self._ops.append(s) - if term is not None: - if term not in self._terms: - self._terms[term] = {} - if color not in self._terms[term]: - self._terms[term][color] = [] - self._terms[term][color].append(len(self._ops) - 1) - - @property - def nqubits(self) -> int: - """Return the number of qubits in the model.""" - return len(self._qubits) - - @property - def nterms(self) -> int: - """Return the number of term groups in the model.""" - return len(self._terms) - - @property - def terms(self) -> list[int]: - """Get the list of term indices in the model.""" - return list(self._terms.keys()) - - def ncolors(self, term: int) -> int: - """Return the number of colors in a given term.""" - if term not in self._terms: - raise ValueError(f"Term {term} does not exist in the model.") - return len(self._terms[term]) - - def colors(self, term: int) -> list[int]: - """Return the list of colors in a given term.""" - if term not in self._terms: - raise ValueError(f"Term {term} does not exist in the model.") - return list(self._terms[term].keys()) - - def nops(self, term: int, color: int) -> int: - """Return the number of operators in a given term and color.""" - if term not in self._terms: - raise ValueError(f"Term {term} does not exist in the model.") - if color not in self._terms[term]: - raise ValueError(f"Color {color} does not exist in term {term}.") - return len(self._terms[term][color]) - - def ops(self, term: int, color: int) -> list[PauliString]: - """Return the list of operators in a given term and color.""" - if term not in self._terms: - raise ValueError(f"Term {term} does not exist in the model.") - if color not in self._terms[term]: - raise ValueError(f"Color {color} does not exist in term {term}.") - return [self._ops[i] for i in self._terms[term][color]] - - def __str__(self) -> str: - """String representation of the model.""" - return "Generic model with {} terms on {} qubits.".format( - len(self._terms), len(self._qubits) - ) - - def __repr__(self) -> str: - """String representation of the model.""" - return self.__str__() - - -class IsingModel(Model): - """Translation-invariant Ising model on a hypergraph geometry. - - The Hamiltonian is: - H = -J * Σ_{} Z_i Z_j - h * Σ_i X_i - - - Single-vertex edges define X-field terms with coefficient ``-h``. - - Two-vertex edges define ZZ-coupling terms with coefficient ``-J``. - - Terms are grouped into two groups: ``0`` for field terms and ``1`` for - coupling terms. - """ - - def __init__(self, geometry: Hypergraph, h: float, J: float): - super().__init__(geometry) - self.h = h - self.J = J - self._terms = {0: {}, 1: {}} - - coloring: HypergraphEdgeColoring = geometry.edge_coloring() - for edge in geometry.edges(): - vertices = edge.vertices - if len(vertices) == 1: - self.add_interaction(edge, "X", -h, term=0, color=0) - elif len(vertices) == 2: - color = coloring.color(edge.vertices) - if color is None: - raise ValueError("Geometry edge coloring failed to assign a color.") - self.add_interaction(edge, "ZZ", -J, term=1, color=color) - - def __str__(self) -> str: - return ( - f"Ising model with {self.nterms} terms on {self.nqubits} qubits " - f"(h={self.h}, J={self.J})." - ) - - def __repr__(self) -> str: - return ( - f"IsingModel(nqubits={self.nqubits}, nterms={self.nterms}, " - f"h={self.h}, J={self.J})" - ) - - -class HeisenbergModel(Model): - """Translation-invariant Heisenberg model on a hypergraph geometry. - - The Hamiltonian is: - H = -J * Σ_{} (X_i X_j + Y_i Y_j + Z_i Z_j) - - - Two-vertex edges define XX, YY, and ZZ coupling terms with coefficient ``-J``. - - Terms are grouped into three parts: ``0`` for XX, ``1`` for YY, and ``2`` for ZZ. - """ - - def __init__(self, geometry: Hypergraph, J: float): - super().__init__(geometry) - self.J = J - self.coloring: HypergraphEdgeColoring = geometry.edge_coloring() - self._terms = {0: {}, 1: {}, 2: {}} - for edge in geometry.edges(): - vertices = edge.vertices - if len(vertices) == 2: - color = self.coloring.color(edge.vertices) - if color is None: - raise ValueError("Geometry edge coloring failed to assign a color.") - self.add_interaction(edge, "XX", -J, term=0, color=color) - self.add_interaction(edge, "YY", -J, term=1, color=color) - self.add_interaction(edge, "ZZ", -J, term=2, color=color) - - def __str__(self) -> str: - return ( - f"Heisenberg model with {self.nterms} terms on {self.nqubits} qubits " - f"(J={self.J})." - ) - - def __repr__(self) -> str: - return ( - f"HeisenbergModel(nqubits={self.nqubits}, nterms={self.nterms}, " - f"J={self.J})" - ) diff --git a/source/pip/qsharp/applications/magnets/trotter/__init__.py b/source/pip/qsharp/applications/magnets/trotter/__init__.py deleted file mode 100644 index d4beaa68c5..0000000000 --- a/source/pip/qsharp/applications/magnets/trotter/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""Trotter-Suzuki methods for time evolution.""" - -from .trotter import ( - TrotterStep, - TrotterExpansion, - strang_splitting, - suzuki_recursion, - yoshida_recursion, - fourth_order_trotter_suzuki, -) - -__all__ = [ - "TrotterStep", - "TrotterExpansion", - "strang_splitting", - "suzuki_recursion", - "yoshida_recursion", - "fourth_order_trotter_suzuki", -] diff --git a/source/pip/qsharp/applications/magnets/trotter/trotter.py b/source/pip/qsharp/applications/magnets/trotter/trotter.py deleted file mode 100644 index 4caaea5d01..0000000000 --- a/source/pip/qsharp/applications/magnets/trotter/trotter.py +++ /dev/null @@ -1,372 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""Trotter schedule utilities for magnet models. - -This module provides: - -- ``TrotterStep``: a schedule of ``(time, term_index)`` entries, -- recursion helpers (Suzuki and Yoshida) that raise the order by 2, -- factory helpers such as Strang splitting, and -- ``TrotterExpansion`` to apply a step repeatedly to a concrete model. -""" - -from collections.abc import Callable -from typing import Iterator, Optional -from ..models import Model -from ..utilities import PauliString - -import math - -try: - import cirq -except Exception as ex: - raise ImportError( - "qsharp.magnets.models requires the cirq extras. Install with 'pip install \"qsharp[cirq]\"'." - ) from ex - - -class TrotterStep: - """Schedule of Hamiltonian-term applications for one Trotter step. - - A ``TrotterStep`` stores an ordered list of ``(time, term_index)`` tuples. - Each tuple indicates that term group ``term_index`` should be applied for - evolution time ``time``. - - The constructor builds a first-order step over the provided term indices: - - .. math:: - - e^{-i H t} \\approx \\prod_k e^{-i H_k t}, \\quad H = \\sum_k H_k. - - where each supplied term index appears once with duration ``time_step``. - """ - - def __init__(self, terms: list[int] = [], time_step: float = 0.0): - """Initialize a Trotter step from explicit term indices. - - Args: - terms: Ordered term indices to include in this step. - time_step: Duration associated with each listed term. - - Notes: - If ``terms`` is empty, the step is initialized as order 0. - Otherwise, it is initialized as order 1. - """ - self._nterms = len(terms) - self._time_step = time_step - self._order = 1 if self._nterms > 0 else 0 - self._repr_string: Optional[str] = None - self.terms: list[tuple[float, int]] = [(time_step, j) for j in terms] - - @property - def order(self) -> int: - """Get the order of the Trotter decomposition.""" - return self._order - - @property - def nterms(self) -> int: - """Get the number of term entries used to build this schedule.""" - return self._nterms - - @property - def time_step(self) -> float: - """Get the base time step metadata stored on this step.""" - return self._time_step - - def reduce(self) -> None: - """ - Reduce the Trotter step in place by combining consecutive terms that are the same. - - This can be useful for optimizing the Trotter sequence by merging adjacent - applications of the same term into a single application with a longer time step. - - Example: - >>> trotter = TrotterStep() - >>> trotter.terms = [(0.5, 0), (0.5, 0), (0.5, 1)] - >>> trotter.reduce() - >>> list(trotter.step()) - [(1.0, 0), (0.5, 1)] - """ - if len(self.terms) > 1: - reduced_terms: list[tuple[float, int]] = [] - current_time, current_term = self.terms[0] - - for time, term in self.terms[1:]: - if term == current_term: - current_time += time - else: - reduced_terms.append((current_time, current_term)) - current_time, current_term = time, term - - reduced_terms.append((current_time, current_term)) - self.terms = reduced_terms - - def step(self) -> Iterator[tuple[float, int]]: - """Iterate over ``(time, term_index)`` entries for this step.""" - return iter(self.terms) - - def cirq(self, model: Model) -> cirq.Circuit: - """Build a Cirq circuit for one application of this Trotter step. - - Args: - model: Model that maps each term index to grouped Pauli operators. - - Returns: - A ``cirq.Circuit`` containing ``cirq.PauliStringPhasor`` operations - in the same order as ``self.step()``. - """ - _INT_TO_CIRQ = (cirq.I, cirq.X, cirq.Z, cirq.Y) - circuit = cirq.Circuit() - for time, term_index in self.step(): - for color in model.colors(term_index): - for op in model.ops(term_index, color): - pauli = cirq.PauliString( - { - cirq.LineQubit(p.qubit): _INT_TO_CIRQ[p.op] - for p in op._paulis - }, - ) - oper = cirq.PauliStringPhasor(pauli, exponent_neg=time / math.pi) - circuit.append(oper) - return circuit - - def __str__(self) -> str: - """String representation of the Trotter decomposition.""" - return f"Trotter expansion of order {self._order}: time_step={self._time_step}, num_terms={self._nterms}" - - def __repr__(self) -> str: - """String representation of the Trotter decomposition.""" - if self._repr_string is not None: - return self._repr_string - else: - return f"TrotterStep(num_terms={self._nterms}, time_step={self._time_step})" - - -def suzuki_recursion(trotter: TrotterStep) -> TrotterStep: - """ - Apply one level of Suzuki recursion to double the order of a Trotter step. - - Given a k-th order Trotter step S_k(t), this function constructs a (k+2)-nd order - step using the Suzuki fractal decomposition: - - S_{k+2}(t) = S_{k}(p t) S_{k}(p t) S_{k}((1 - 4p) t) S_{k}(p t) S_{k}(p t) - - where p = 1 / (4 - 4^{1/(2k+1)}). - - The resulting step has improved accuracy: the error scales as O(t^{k+3}) instead - of O(t^{k+1}), at the cost of 5x more exponential applications per step. - - Args: - trotter: A TrotterStep of order k to be promoted to order k+2. - - Returns: - A new TrotterStep of order k+2 constructed via Suzuki recursion. - - References: - M. Suzuki, Phys. Lett. A 146, 319 (1990). - """ - - suzuki = TrotterStep() - suzuki._nterms = trotter._nterms - suzuki._time_step = trotter._time_step - suzuki._order = trotter._order + 2 - suzuki._repr_string = f"SuzukiRecursion(order={suzuki._order}, time_step={suzuki._time_step}, num_terms={suzuki._nterms})" - - p = 1 / (4 - 4 ** (1 / (2 * trotter.order + 1))) - - suzuki.terms = [(p * time, term_index) for time, term_index in trotter.step()] - suzuki.terms += [(p * time, term_index) for time, term_index in trotter.step()] - suzuki.terms += [ - ((1 - 4 * p) * time, term_index) for time, term_index in trotter.step() - ] - suzuki.terms += [(p * time, term_index) for time, term_index in trotter.step()] - suzuki.terms += [(p * time, term_index) for time, term_index in trotter.step()] - suzuki.reduce() # Combine consecutive terms that are the same - - return suzuki - - -def yoshida_recursion(trotter: TrotterStep) -> TrotterStep: - """ - Apply one level of Yoshida recursion to increase the order of a Trotter step by 2. - - Given a k-th order Trotter step S_k(t), this function constructs a (k+2)-nd order - step using Yoshida's symmetric triple-jump composition: - - S_{k+2}(t) = S_{k}(w_1 t) S_{k}(w_0 t) S_{k}(w_1 t) - - where: - w_1 = 1 / (2 - 2^{1/(2k+1)}) - w_0 = -2^{1/(2k+1)} / (2 - 2^{1/(2k+1)}) = 1 - 2 w_1 - - The resulting step has improved accuracy: the error scales as O(t^{k+3}) instead - of O(t^{k+1}), at the cost of 3x more exponential applications per step. - - Args: - trotter: A TrotterStep of order k to be promoted to order k+2. - - Returns: - A new TrotterStep of order k+2 constructed via Yoshida recursion. - - References: - H. Yoshida, Phys. Lett. A 150, 262 (1990). - """ - - yoshida = TrotterStep() - yoshida._nterms = trotter._nterms - yoshida._time_step = trotter._time_step - yoshida._order = trotter._order + 2 - yoshida._repr_string = f"YoshidaRecursion(order={yoshida._order}, time_step={yoshida._time_step}, num_terms={yoshida._nterms})" - - cube_root_2 = 2 ** (1 / (2 * trotter.order + 1)) - w1 = 1 / (2 - cube_root_2) - w0 = 1 - 2 * w1 # equivalent to -cube_root_2 / (2 - cube_root_2) - - yoshida.terms = [(w1 * time, term_index) for time, term_index in trotter.step()] - yoshida.terms += [(w0 * time, term_index) for time, term_index in trotter.step()] - yoshida.terms += [(w1 * time, term_index) for time, term_index in trotter.step()] - yoshida.reduce() # Combine consecutive terms that are the same - - return yoshida - - -def strang_splitting(terms: list[int], time: float) -> TrotterStep: - """ - Create a second-order Strang splitting schedule for explicit term indices. - - The second-order Trotter formula uses symmetric splitting: - - e^{-i H t} \\approx \\prod_{k=1}^{n-1} e^{-i H_k t/2} \\, e^{-i H_n t} \\, \\prod_{k=n-1}^{1} e^{-i H_k t/2} - - This provides second-order accuracy in the time step, compared to - first-order for the basic Trotter decomposition. - - Example: - - .. code-block:: python - >>> strang = strang_splitting(terms=[0, 1, 2], time=0.5) - >>> list(strang.step()) - [(0.25, 0), (0.25, 1), (0.5, 2), (0.25, 1), (0.25, 0)] - - Args: - terms: Ordered term indices for a single symmetric step. Must be non-empty. - time: Total evolution time assigned to this second-order step. - - Returns: - A second-order ``TrotterStep``. - - References: - G. Strang, SIAM J. Numer. Anal. 5, 506 (1968). - """ - strang = TrotterStep() - strang._nterms = len(terms) - strang._time_step = time - strang._order = 2 - strang._repr_string = f"StrangSplitting(time_step={time}, num_terms={len(terms)})" - strang.terms = [] - for i in range(len(terms) - 1): - strang.terms.append((time / 2, terms[i])) - strang.terms.append((time, terms[-1])) - for i in reversed(range(len(terms) - 1)): - strang.terms.append((time / 2, terms[i])) - return strang - - -def fourth_order_trotter_suzuki(terms: list[int], time: float) -> TrotterStep: - """ - Factory function for creating a fourth-order Trotter-Suzuki decomposition - using Suzuki recursion. - - This is obtained by applying one level of Suzuki recursion to the second-order - Strang splitting. The resulting fourth-order decomposition has improved accuracy - compared to the second-order Strang splitting, at the cost of more exponential - applications per step. - - Example: - - .. code-block:: python - >>> fourth_order = fourth_order_trotter_suzuki(terms=[0, 1, 2], time=0.5) - >>> list(fourth_order.step()) - [(0.1767766952966369, 0), (0.1767766952966369, 1), (0.1767766952966369, 2), (0.3535533905932738, 1), (0.3535533905932738, 0), (0.1767766952966369, 1), (0.1767766952966369, 2), (0.1767766952966369, 1), (0.1767766952966369, 0)] - """ - return suzuki_recursion(strang_splitting(terms, time)) - - -class TrotterExpansion: - """Repeated application of a Trotter method on a concrete model. - - ``TrotterExpansion`` builds one step with ``trotter_method(model.terms, dt)`` - where ``dt = time / num_steps`` and then repeats it ``num_steps`` times. - - Iteration via :meth:`step` yields ``PauliString`` operators already scaled by - the per-entry schedule time. - """ - - def __init__( - self, - trotter_method: Callable[[list[int], float], TrotterStep], - model: Model, - time: float, - num_steps: int, - ): - """Initialize a repeated-step Trotter expansion. - - Args: - trotter_method: Callable mapping ``(terms, dt)`` to a ``TrotterStep``. - model: Model that defines term groups and per-term operators. - time: Total evolution time. - num_steps: Number of repeated Trotter steps. - """ - self._model = model - self._num_steps = num_steps - self._trotter_step = trotter_method(model.terms, time / num_steps) - - @property - def order(self) -> int: - """Get the order of the underlying Trotter step.""" - return self._trotter_step.order - - @property - def nterms(self) -> int: - """Get the number of Hamiltonian terms.""" - return self._model.nterms - - @property - def nsteps(self) -> int: - """Get the number of Trotter steps.""" - return self._num_steps - - @property - def total_time(self) -> float: - """Get the total evolution time (time_step * num_steps).""" - return self._trotter_step.time_step * self._num_steps - - def step(self) -> Iterator[PauliString]: - """Iterate over scaled operators for the full expansion. - - Yields: - ``PauliString`` operators with coefficients scaled by schedule time, - in execution order across all repeated steps. - """ - for _ in range(self._num_steps): - for s, i in self._trotter_step.step(): - for c in self._model.colors(i): - for op in self._model.ops(i, c): - yield (op * s) - - def cirq(self) -> cirq.CircuitOperation: - """Get a repeated Cirq circuit operation for this expansion.""" - circuit = self._trotter_step.cirq(self._model).freeze() - return cirq.CircuitOperation(circuit, repetitions=self._num_steps) - - def __str__(self) -> str: - """String representation of the Trotter expansion.""" - return ( - f"TrotterExpansion(order={self.order}, num_steps={self._num_steps}, " - f"total_time={self.total_time}, num_terms={self.nterms})" - ) - - def __repr__(self) -> str: - """Repr representation of the Trotter expansion.""" - return f"TrotterExpansion({self._trotter_step!r}, num_steps={self._num_steps})" diff --git a/source/pip/qsharp/applications/magnets/utilities/__init__.py b/source/pip/qsharp/applications/magnets/utilities/__init__.py deleted file mode 100644 index b350f7da40..0000000000 --- a/source/pip/qsharp/applications/magnets/utilities/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""Utilities module for magnets package. - -This module provides utility data structures and algorithms used across -the magnets package, including hypergraph representations. -""" - -from .hypergraph import ( - Hyperedge, - Hypergraph, - HypergraphEdgeColoring, -) -from .pauli import Pauli, PauliString, PauliX, PauliY, PauliZ - -__all__ = [ - "Hyperedge", - "Hypergraph", - "HypergraphEdgeColoring", - "Pauli", - "PauliString", - "PauliX", - "PauliY", - "PauliZ", -] diff --git a/source/pip/qsharp/applications/magnets/utilities/hypergraph.py b/source/pip/qsharp/applications/magnets/utilities/hypergraph.py deleted file mode 100644 index b7caffbd99..0000000000 --- a/source/pip/qsharp/applications/magnets/utilities/hypergraph.py +++ /dev/null @@ -1,317 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""Hypergraph data structures for representing quantum system geometries. - -This module provides classes for representing hypergraphs, which generalize -graphs by allowing edges (hyperedges) to connect any number of vertices. -Hypergraphs are useful for representing interaction terms in quantum -Hamiltonians, where multi-body interactions can involve more than two sites. -""" - -import random -from typing import Iterator, Optional - - -class Hyperedge: - """A hyperedge connecting one or more vertices in a hypergraph. - - A hyperedge generalizes the concept of an edge in a graph. While a - traditional edge connects exactly two vertices, a hyperedge can connect - any number of vertices. This is useful for representing: - - Single-site terms (self-loops): 1 vertex - - Two-body interactions: 2 vertices - - Multi-body interactions: 3+ vertices - Each hyperedge is defined by a set of unique vertex indices, which are - stored as a sorted tuple for consistency and hashability. - - Attributes: - vertices: Sorted tuple of vertex indices connected by this hyperedge. - - Example: - - .. code-block:: python - >>> edge = Hyperedge([2, 0, 1]) - >>> edge.vertices - (0, 1, 2) - """ - - def __init__(self, vertices: list[int]) -> None: - """Initialize a hyperedge with the given vertices. - - Args: - vertices: List of vertex indices. Will be sorted internally. - """ - self.vertices: tuple[int, ...] = tuple(sorted(set(vertices))) - - def __str__(self) -> str: - return str(self.vertices) - - def __repr__(self) -> str: - return f"Hyperedge({list(self.vertices)})" - - -class Hypergraph: - """A hypergraph consisting of vertices connected by hyperedges. - - A hypergraph is a generalization of a graph where edges (hyperedges) can - connect any number of vertices. This class serves as the base class for - various lattice geometries used in quantum simulations. - - Attributes: - _edge_set: Set of hyperedges in the hypergraph. - _vertex_set: Set of all unique vertex indices in the hypergraph. - - Note: - Edge colors are managed separately by :class:`HypergraphEdgeColoring`. - Use :meth:`edge_coloring` to generate a coloring for this hypergraph. - - Example: - - .. code-block:: python - >>> edges = [Hyperedge([0, 1]), Hyperedge([1, 2]), Hyperedge([0, 2])] - >>> graph = Hypergraph(edges) - >>> graph.nvertices - 3 - >>> graph.nedges - 3 - """ - - def __init__(self, edges: list[Hyperedge]) -> None: - """Initialize a hypergraph with the given edges. - - Args: - edges: List of hyperedges defining the hypergraph structure. - """ - self._vertex_set = set() - self._edge_set = set(edges) - for edge in edges: - self._vertex_set.update(edge.vertices) - - @property - def nvertices(self) -> int: - """Return the number of vertices in the hypergraph.""" - return len(self._vertex_set) - - def vertices(self) -> Iterator[int]: - """Iterate over all vertex indices in the hypergraph. - - Returns: - Iterator of vertex indices in ascending order. - """ - return iter(sorted(self._vertex_set)) - - @property - def nedges(self) -> int: - """Return the number of hyperedges in the hypergraph.""" - return len(self._edge_set) - - def edges(self) -> Iterator[Hyperedge]: - """Iterate over all hyperedges in the hypergraph. - - Returns: - Iterator of all hyperedges in the hypergraph. - """ - return iter(self._edge_set) - - def add_edge(self, edge: Hyperedge) -> None: - """Add a hyperedge to the hypergraph. - - Args: - edge: The Hyperedge instance to add. - """ - self._edge_set.add(edge) - self._vertex_set.update(edge.vertices) - - def edge_coloring( - self, seed: Optional[int] = 0, trials: int = 1 - ) -> "HypergraphEdgeColoring": - """Compute a (nondeterministic) greedy edge coloring of this hypergraph. - - Args: - seed: Optional random seed for reproducibility. - trials: Number of randomized trials to attempt. The best coloring - (fewest colors) is returned. - - Returns: - A :class:`HypergraphEdgeColoring` for this hypergraph. - """ - all_edges = sorted(self.edges(), key=lambda edge: edge.vertices) - - if not all_edges: - return HypergraphEdgeColoring(self) - - num_trials = max(trials, 1) - best_coloring: Optional[HypergraphEdgeColoring] = None - least_colors: Optional[int] = None - - for trial in range(num_trials): - trial_seed = None if seed is None else seed + trial - rng = random.Random(trial_seed) - - edge_order = list(all_edges) - rng.shuffle(edge_order) - - coloring = HypergraphEdgeColoring(self) - num_colors = 0 - - for edge in edge_order: - if len(edge.vertices) == 1: - coloring.add_edge(edge, -1) - continue - - assigned = False - for color in range(num_colors): - used_vertices = set().union( - *( - candidate.vertices - for candidate in coloring.edges_of_color(color) - ) - ) - if not any(vertex in used_vertices for vertex in edge.vertices): - coloring.add_edge(edge, color) - assigned = True - break - - if not assigned: - coloring.add_edge(edge, num_colors) - num_colors += 1 - - if least_colors is None or coloring.ncolors < least_colors: - least_colors = coloring.ncolors - best_coloring = coloring - - assert best_coloring is not None - return best_coloring - - def __str__(self) -> str: - return f"Hypergraph with {self.nvertices} vertices and {self.nedges} edges." - - def __repr__(self) -> str: - return f"Hypergraph({list(self._edge_set)})" - - -class HypergraphEdgeColoring: - """Edge-color assignment for a :class:`Hypergraph`. - - This class stores colors separately from :class:`Hypergraph` and enforces - the rule that multi-vertex edges sharing a color do not share any vertices. - - Conventions: - - - Colors for nontrivial edges must be nonnegative integers. - - Single-vertex edges may use a special color (for example ``-1``). - - Only nonnegative colors contribute to :attr:`ncolors`. - - Note: - Colors are keyed by edge vertex tuples (``edge.vertices``), not by - ``Hyperedge`` object identity. As a result, :meth:`color` accepts edge - vertex tuples directly, while :meth:`add_edge` still requires an edge - instance that belongs to :attr:`hypergraph`. - - Attributes: - hypergraph: The supporting :class:`Hypergraph` whose edges can be - colored by this instance. - """ - - def __init__(self, hypergraph: Hypergraph) -> None: - self.hypergraph = hypergraph - self._colors: dict[tuple[int, ...], int] = {} # Vertices-to-color mapping - self._used_vertices: dict[int, set[int]] = ( - {} - ) # Set of vertices used by each color - - @property - def ncolors(self) -> int: - """Return the number of distinct nonnegative colors in the coloring.""" - return len(self._used_vertices) - - def color(self, vertices: tuple[int, ...]) -> Optional[int]: - """Return the color assigned to edge vertices. - - Args: - vertices: Canonical vertex tuple for the edge to query (typically - ``edge.vertices``). - - Returns: - The color assigned to ``vertices``, or ``None`` if the edge has - not been added to this coloring. - """ - if not isinstance(vertices, tuple) or not all( - isinstance(vertex, int) for vertex in vertices - ): - raise TypeError("vertices must be tuple[int, ...]") - return self._colors.get(vertices) - - def colors(self) -> Iterator[int]: - """Iterate over distinct nonnegative colors present in the coloring. - - Returns: - Iterator of distinct nonnegative color indices. - """ - return iter(self._used_vertices.keys()) - - def add_edge(self, edge: Hyperedge, color: int) -> None: - """Add ``edge`` to this coloring with the specified ``color``. - - For multi-vertex edges, this enforces that no previously added edge - with the same color shares a vertex with ``edge``. - - Args: - edge: The Hyperedge instance to add. This must be an edge present - in :attr:`hypergraph` (typically one returned by - ``hypergraph.edges()``). - color: Color index for the edge. - - Raises: - TypeError: If ``edge`` is not a :class:`Hyperedge`. - ValueError: If ``edge`` is not part of :attr:`hypergraph`. - ValueError: If ``color`` is negative for a nontrivial edge. - RuntimeError: If adding ``edge`` would create a same-color vertex - conflict. - """ - if not isinstance(edge, Hyperedge): - raise TypeError(f"edge must be Hyperedge, got {type(edge).__name__}") - - if edge not in self.hypergraph.edges(): - raise ValueError("edge must belong to the supporting Hypergraph") - - vertices = edge.vertices - - if len(vertices) == 1: - # Single-vertex edges can be colored with a special color (e.g., -1) - self._colors[vertices] = color - else: - if color < 0: - raise ValueError( - "Color index must be nonnegative for multi-vertex edges." - ) - if color not in self._used_vertices: - self._colors[vertices] = color - self._used_vertices[color] = set(vertices) - else: - if any(v in self._used_vertices[color] for v in vertices): - raise RuntimeError( - "Edge conflicts with existing edge of same color." - ) - self._colors[vertices] = color - self._used_vertices[color].update(vertices) - - self._colors[vertices] = color - - def edges_of_color(self, color: int) -> Iterator[Hyperedge]: - """Iterate over hyperedges with a specific color. - - Args: - color: Color index for filtering edges. - - Returns: - Iterator of edges currently assigned to ``color``. - """ - return iter( - [ - edge - for edge in self.hypergraph.edges() - if self._colors.get(edge.vertices) == color - ] - ) diff --git a/source/pip/qsharp/applications/magnets/utilities/pauli.py b/source/pip/qsharp/applications/magnets/utilities/pauli.py deleted file mode 100644 index 4eb7b92e5b..0000000000 --- a/source/pip/qsharp/applications/magnets/utilities/pauli.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""Pauli operator representation for quantum spin systems.""" - -from collections.abc import Sequence - -try: - import cirq -except Exception as ex: - raise ImportError( - "qsharp.magnets.models requires the cirq extras. Install with 'pip install \"qsharp[cirq]\"'." - ) from ex - - -class Pauli: - """Single-qubit Pauli term tied to an explicit qubit index. - - ``Pauli`` stores a Pauli identifier and the qubit it acts on. The Pauli - identifier can be provided either as an integer code or a label: - - - ``0`` / ``"I"`` - - ``1`` / ``"X"`` - - ``2`` / ``"Z"`` - - ``3`` / ``"Y"`` - - Note: - The integer mapping follows the internal QDK convention where ``2`` is - ``Z`` and ``3`` is ``Y``. - - Example: - - .. code-block:: python - >>> p = Pauli("Y", qubit=2) - >>> p.op - 3 - >>> p.qubit - 2 - """ - - _VALID_INTS = {0, 1, 2, 3} - _STR_TO_INT = {"I": 0, "X": 1, "Z": 2, "Y": 3} - - def __init__(self, value: int | str, qubit: int = 0) -> None: - """Initialize a Pauli operator. - - Args: - value: An integer 0-3 or one of 'I', 'X', 'Y', 'Z' (case-insensitive). - qubit: The index of the qubit this operator acts on. Defaults to 0. - - Raises: - ValueError: If ``value`` is not a valid integer/string Pauli identifier. - """ - if isinstance(value, int): - if value not in self._VALID_INTS: - raise ValueError(f"Integer value must be 0-3, got {value}.") - self._op = value - elif isinstance(value, str): - key = value.upper() - if key not in self._STR_TO_INT: - raise ValueError( - f"String value must be one of 'I', 'X', 'Y', 'Z', got '{value}'." - ) - self._op = self._STR_TO_INT[key] - else: - raise ValueError(f"Expected int or str, got {type(value).__name__}.") - self.qubit: int = qubit - - @property - def op(self) -> int: - """Integer encoding of this Pauli term. - - Returns: - ``0`` for ``I``, ``1`` for ``X``, ``2`` for ``Z``, ``3`` for ``Y``. - """ - return self._op - - def __str__(self) -> str: - labels = {0: "I", 1: "X", 2: "Z", 3: "Y"} - return f"{labels[self._op]}({self.qubit})" - - def __repr__(self) -> str: - labels = {0: "I", 1: "X", 2: "Z", 3: "Y"} - return f"Pauli('{labels[self._op]}', qubit={self.qubit})" - - def __eq__(self, other: object) -> bool: - if not isinstance(other, Pauli): - return NotImplemented - return self._op == other._op and self.qubit == other.qubit - - def __hash__(self) -> int: - return hash((self._op, self.qubit)) - - @property - def cirq(self): - """Return this Pauli term as a Cirq gate operation on ``LineQubit``. - - Returns: - A Cirq operation equivalent to - ``cirq.{I|X|Z|Y}.on(cirq.LineQubit(self.qubit))``. - """ - _INT_TO_CIRQ = (cirq.I, cirq.X, cirq.Z, cirq.Y) - return _INT_TO_CIRQ[self._op].on(cirq.LineQubit(self.qubit)) - - -def PauliX(qubit: int) -> Pauli: - """Create a Pauli-X operator on the given qubit.""" - return Pauli("X", qubit) - - -def PauliY(qubit: int) -> Pauli: - """Create a Pauli-Y operator on the given qubit.""" - return Pauli("Y", qubit) - - -def PauliZ(qubit: int) -> Pauli: - """Create a Pauli-Z operator on the given qubit.""" - return Pauli("Z", qubit) - - -class PauliString: - """Ordered tensor product of single-qubit ``Pauli`` terms with a coefficient. - - ``PauliString`` stores: - - - an ordered tuple of :class:`Pauli` objects (including each term's qubit), and - - a complex scalar coefficient. - - Construction options: - - - pass a sequence of :class:`Pauli` objects to ``PauliString(...)`` - - use :meth:`from_qubits` to pair qubit indices with Pauli labels/codes - - Example: - - .. code-block:: python - >>> ps = PauliString([PauliX(0), PauliZ(1)], coefficient=-1j) - >>> ps.qubits - (0, 1) - >>> ps2 = PauliString.from_qubits((0, 1), "XZ", coefficient=-1j) - >>> ps == ps2 - True - """ - - def __init__(self, paulis: Sequence[Pauli], coefficient: complex = 1.0) -> None: - """Initialize a PauliString from a sequence of Pauli operators. - - Args: - paulis: A sequence of :class:`Pauli` instances, each with its - own qubit index. - coefficient: Complex coefficient multiplying the Pauli string (default 1.0). - - Raises: - TypeError: If any element is not a Pauli instance. - """ - for p in paulis: - if not isinstance(p, Pauli): - raise TypeError( - f"Expected Pauli instance, got {type(p).__name__}. " - "Use PauliString.from_qubits() for int/str values." - ) - self._paulis: tuple[Pauli, ...] = tuple(paulis) - self._coefficient: complex = coefficient - - @classmethod - def from_qubits( - cls, - qubits: tuple[int, ...], - values: Sequence[int | str] | str, - coefficient: complex = 1.0, - ) -> "PauliString": - """Create a PauliString from qubit indices and Pauli labels. - - Args: - qubits: Tuple of qubit indices. - values: Sequence of Pauli identifiers (integers 0-3 or strings - 'I', 'X', 'Y', 'Z'). A plain string like ``"XZI"`` is also - accepted and treated as individual characters. - coefficient: Complex coefficient multiplying the Pauli string. - - Returns: - A new PauliString instance. - - Raises: - ValueError: If qubits and values have different lengths, or if - any value is not a valid Pauli identifier. - """ - if len(qubits) != len(values): - raise ValueError( - f"Length mismatch: {len(qubits)} qubits vs {len(values)} values." - ) - paulis = [Pauli(v, q) for q, v in zip(qubits, values)] - return cls(paulis, coefficient=coefficient) - - @property - def qubits(self) -> tuple[int, ...]: - """Tuple of qubit indices in the same order as the stored Pauli terms. - - Returns: - Tuple of qubit indices, one per Pauli operator. - """ - return tuple(p.qubit for p in self._paulis) - - @property - def coefficient(self) -> complex: - """Complex coefficient multiplying this Pauli string.""" - return self._coefficient - - @property - def paulis(self) -> str: - """String of Pauli labels in the same order as the stored Pauli terms. - - Returns: - String of Pauli labels ('I', 'X', 'Z', 'Y'), one per Pauli operator. - """ - labels = {0: "I", 1: "X", 2: "Z", 3: "Y"} - return "".join(labels[p.op] for p in self._paulis) - - def __iter__(self): - """Iterate over Pauli terms in stored order. - - Yields: - :class:`Pauli` instances in order. - """ - return iter(self._paulis) - - def __len__(self) -> int: - return len(self._paulis) - - def __getitem__(self, index: int) -> Pauli: - return self._paulis[index] - - def __mul__(self, scalar: complex) -> "PauliString": - """Scale the coefficient of this PauliString by a complex scalar.""" - return PauliString(self._paulis, coefficient=self._coefficient * scalar) - - def __str__(self) -> str: - labels = {0: "I", 1: "X", 2: "Z", 3: "Y"} - s = "".join(map(str, self._paulis)) - return f"{self._coefficient} * {s}" - - def __repr__(self) -> str: - labels = {0: "I", 1: "X", 2: "Z", 3: "Y"} - s = "".join(labels[p.op] for p in self._paulis) - return f"PauliString(qubits={self.qubits}, ops='{s}', coefficient={self._coefficient})" - - def __eq__(self, other: object) -> bool: - if not isinstance(other, PauliString): - return NotImplemented - return self._paulis == other._paulis and self._coefficient == other._coefficient - - def __hash__(self) -> int: - return hash((self._paulis, self._coefficient)) - - @property - def cirq(self): - """Return the corresponding Cirq ``PauliString``. - - Constructs a ``cirq.PauliString`` by applying each single-qubit - Pauli to its corresponding ``cirq.LineQubit``. - - Returns: - A ``cirq.PauliString`` on ``cirq.LineQubit`` instances with - ``self._coefficient`` as its coefficient. - """ - _INT_TO_CIRQ = (cirq.I, cirq.X, cirq.Z, cirq.Y) - return cirq.PauliString( - {cirq.LineQubit(p.qubit): _INT_TO_CIRQ[p.op] for p in self._paulis}, - coefficient=self._coefficient, - ) diff --git a/source/pip/qsharp/code/__init__.py b/source/pip/qsharp/code/__init__.py index 695b54fb63..4850ce0a7e 100644 --- a/source/pip/qsharp/code/__init__.py +++ b/source/pip/qsharp/code/__init__.py @@ -1,6 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -""" -Code module that receives any user-defined Q# callables as Python functions. -""" +# Deprecated: use qdk.code instead. +from qdk.code import * # noqa: F401,F403 diff --git a/source/pip/qsharp/code/__init__.pyi b/source/pip/qsharp/code/__init__.pyi deleted file mode 100644 index 50d3523caa..0000000000 --- a/source/pip/qsharp/code/__init__.pyi +++ /dev/null @@ -1,4 +0,0 @@ -from typing import Any - -# This helps Pyright understand that this module may have dynamic attributes. -def __getattr__(name: str) -> Any: ... diff --git a/source/pip/qsharp/estimator/__init__.py b/source/pip/qsharp/estimator/__init__.py index ef870f3dad..54564d8c75 100644 --- a/source/pip/qsharp/estimator/__init__.py +++ b/source/pip/qsharp/estimator/__init__.py @@ -1,36 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from ._estimator import ( - EstimatorError, - LogicalCounts, - EstimatorResult, - QubitParams, - QECScheme, - MeasurementErrorRate, - EstimatorQubitParams, - EstimatorQecScheme, - ProtocolSpecificDistillationUnitSpecification, - DistillationUnitSpecification, - ErrorBudgetPartition, - EstimatorConstraints, - EstimatorInputParamsItem, - EstimatorParams, -) - -__all__ = [ - "EstimatorError", - "LogicalCounts", - "EstimatorResult", - "QubitParams", - "QECScheme", - "MeasurementErrorRate", - "EstimatorQubitParams", - "EstimatorQecScheme", - "ProtocolSpecificDistillationUnitSpecification", - "DistillationUnitSpecification", - "ErrorBudgetPartition", - "EstimatorConstraints", - "EstimatorInputParamsItem", - "EstimatorParams", -] +# Deprecated: use qdk.estimator instead. +from qdk.estimator import * # noqa: F401,F403 diff --git a/source/pip/qsharp/estimator/_estimator.py b/source/pip/qsharp/estimator/_estimator.py deleted file mode 100644 index 2349862420..0000000000 --- a/source/pip/qsharp/estimator/_estimator.py +++ /dev/null @@ -1,1180 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -import re -from typing import Any, Dict, List, Optional, Union -from dataclasses import dataclass, field -from .._native import physical_estimates - -import json - -try: - # Both markdown and mdx_math (from python-markdown-math) must be present for our markdown - # rendering logic to work. If either is missing, we'll fall back to plain text. - import markdown - import mdx_math - - has_markdown = True -except ImportError: - has_markdown = False - - -class EstimatorError(BaseException): - """ - An error returned from the resource estimation. - """ - - def __init__(self, code: str, message: str): - self.message = f"Error estimating resources ({code}):\n{message}" - self.code = code - - def __str__(self): - return self.message - - -@dataclass -class AutoValidatingParams: - """ - A helper class for target parameters. - - It has a function as_dict that automatically extracts a dictionary from - the class' fields. They are added to the result dictionary if their value - is not None, the key is automatically transformed from Python snake case - to camel case, and if validate is True and if the field has a validation - function, the field is validated beforehand. - """ - - def as_dict(self, validate=True): - result = {} - - for name, field in self.__dataclass_fields__.items(): - field_value = self.__getattribute__(name) - if field_value is not None: - # validate field? - if validate and "validate" in field.metadata: - func = field.metadata["validate"] - # check for indirect call (like in @staticmethod) - if hasattr(func, "__func__"): - func = func.__func__ - func(name, field_value) - - # translate field name to camel case - s = re.sub(r"(_|-)+", " ", name).title().replace(" ", "") - attribute = "".join([s[0].lower(), s[1:]]) - result[attribute] = field_value - - if validate: - self.post_validation(result) - - return result - - def post_validation(self, result): - """ - A function that is called after all individual fields have been - validated, but before the result is returned. - - Here result is the current dictionary. - """ - pass - - -def validating_field(validation_func, default=None): - """ - A helper method to declare field for an AutoValidatingParams data class. - """ - return field(default=default, metadata={"validate": validation_func}) - - -class QubitParams: - """ - Predefined qubit model name constants for use with :class:`EstimatorQubitParams`. - - Pass one of these string constants as the ``name`` field to select a built-in - qubit model for resource estimation. - """ - - GATE_US_E3 = "qubit_gate_us_e3" - GATE_US_E4 = "qubit_gate_us_e4" - GATE_NS_E3 = "qubit_gate_ns_e3" - GATE_NS_E4 = "qubit_gate_ns_e4" - MAJ_NS_E4 = "qubit_maj_ns_e4" - MAJ_NS_E6 = "qubit_maj_ns_e6" - - -class QECScheme: - """ - Predefined quantum error correction scheme name constants for use with - :class:`EstimatorQecScheme`. - - Pass one of these string constants as the ``name`` field to select a - built-in QEC scheme for resource estimation. - """ - - SURFACE_CODE = "surface_code" - FLOQUET_CODE = "floquet_code" - - -def _check_error_rate(name, value): - if value <= 0.0 or value >= 1.0: - raise ValueError(f"{name} must be between 0 and 1") - - -def _check_error_rate_or_process_and_readout(name, value): - if value is None: - return - - if isinstance(value, float): - _check_error_rate(name, value) - return - - if not isinstance(value, MeasurementErrorRate): - raise ValueError( - f"{name} must be either a float or " - "MeasurementErrorRate with two fields: 'process' and 'readout'" - ) - - -def check_time(name, value): - pat = r"^(\+?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\s*(s|ms|μs|µs|us|ns)$" - if re.match(pat, value) is None: - raise ValueError( - f"{name} is not a valid time string; use a " "suffix s, ms, us, or ns" - ) - - -@dataclass -class MeasurementErrorRate(AutoValidatingParams): - """ - Measurement error rate specified separately for process and readout errors. - - Used as the value of ``one_qubit_measurement_error_rate`` or - ``two_qubit_joint_measurement_error_rate`` in :class:`EstimatorQubitParams` - when process and readout error rates differ. - - :param process: Error rate during the measurement process. Must be in ``(0, 1)``. - :param readout: Error rate during readout. Must be in ``(0, 1)``. - """ - - process: float = field(metadata={"validate": _check_error_rate}) - readout: float = field(metadata={"validate": _check_error_rate}) - - -@dataclass -class EstimatorQubitParams(AutoValidatingParams): - """ - Physical qubit parameters for resource estimation. - - Specify a built-in qubit model by setting ``name`` to one of the - :class:`QubitParams` constants, or fully define a custom model by setting - ``instruction_set`` and all relevant timing and error-rate fields. - """ - - @staticmethod - def check_instruction_set(name, value): - if value not in [ - "gate-based", - "gate_based", - "GateBased", - "gateBased", - "Majorana", - "majorana", - ]: - raise ValueError(f"{name} must be GateBased or Majorana") - - name: Optional[str] = None - instruction_set: Optional[str] = validating_field(check_instruction_set) - one_qubit_measurement_time: Optional[str] = validating_field(check_time) - two_qubit_joint_measurement_time: Optional[str] = validating_field(check_time) - one_qubit_gate_time: Optional[str] = validating_field(check_time) - two_qubit_gate_time: Optional[str] = validating_field(check_time) - t_gate_time: Optional[str] = validating_field(check_time) - one_qubit_measurement_error_rate: Union[None, float, MeasurementErrorRate] = ( - validating_field(_check_error_rate_or_process_and_readout) - ) - two_qubit_joint_measurement_error_rate: Union[None, float, MeasurementErrorRate] = ( - validating_field(_check_error_rate_or_process_and_readout) - ) - one_qubit_gate_error_rate: Optional[float] = validating_field(_check_error_rate) - two_qubit_gate_error_rate: Optional[float] = validating_field(_check_error_rate) - t_gate_error_rate: Optional[float] = validating_field(_check_error_rate) - idle_error_rate: Optional[float] = validating_field(_check_error_rate) - - _default_models = [ - QubitParams.GATE_US_E3, - QubitParams.GATE_US_E4, - QubitParams.GATE_NS_E3, - QubitParams.GATE_NS_E4, - QubitParams.MAJ_NS_E4, - QubitParams.MAJ_NS_E6, - ] - _gate_based = ["gate-based", "gate_based", "GateBased", "gateBased"] - _maj_based = ["Majorana", "majorana"] - - def post_validation(self, result): - # check whether all fields have been specified in case a custom qubit - # model is specified - custom = result != {} and ( - self.name is None or self.name not in self._default_models - ) - - # no further validation needed for non-custom models - if not custom: - return - - # instruction set must be set - if self.instruction_set is None: - raise LookupError( - "instruction_set must be set for custom qubit " "parameters" - ) - - # NOTE at this point, we know that instruction set must have valid - # value - if self.one_qubit_measurement_time is None: - raise LookupError("one_qubit_measurement_time must be set") - if self.one_qubit_measurement_error_rate is None: - raise LookupError("one_qubit_measurement_error_rate must be set") - - # this only needs to be checked for gate based qubits - if self.instruction_set in self._gate_based: - if self.one_qubit_gate_time is None: - raise LookupError("one_qubit_gate_time must be set") - - def as_dict(self, validate=True) -> Dict[str, Any]: - qubit_params = super().as_dict(validate) - if len(qubit_params) != 0: - if isinstance(self.one_qubit_measurement_error_rate, MeasurementErrorRate): - qubit_params["oneQubitMeasurementErrorRate"] = ( - self.one_qubit_measurement_error_rate.as_dict(validate) - ) - - if isinstance( - self.two_qubit_joint_measurement_error_rate, MeasurementErrorRate - ): - qubit_params["twoQubitJointMeasurementErrorRate"] = ( - self.two_qubit_joint_measurement_error_rate.as_dict(validate) - ) - - return qubit_params - - -@dataclass -class EstimatorQecScheme(AutoValidatingParams): - """ - Quantum error correction scheme parameters for resource estimation. - - Specify a built-in scheme by setting ``name`` to one of the - :class:`QECScheme` constants, or define a custom scheme by setting - the threshold and code-distance parameters directly. - """ - - name: Optional[str] = None - error_correction_threshold: Optional[float] = validating_field(_check_error_rate) - crossing_prefactor: Optional[float] = None - distance_coefficient_power: Optional[int] = None - logical_cycle_time: Optional[str] = None - physical_qubits_per_logical_qubit: Optional[str] = None - max_code_distance: Optional[int] = None - - -@dataclass -class ProtocolSpecificDistillationUnitSpecification(AutoValidatingParams): - """ - Protocol-specific specification for a magic-state distillation unit. - - Defines the number of physical qubits and the duration (in logical cycle - time units) for one round of distillation under a specific QEC code. - """ - - num_unit_qubits: Optional[int] = None - duration_in_qubit_cycle_time: Optional[int] = None - - def post_validation(self, result): - if self.num_unit_qubits is None: - raise LookupError("num_unit_qubits must be set") - - if self.duration_in_qubit_cycle_time is None: - raise LookupError("duration_in_qubit_cycle_time must be set") - - -@dataclass -class DistillationUnitSpecification(AutoValidatingParams): - """ - Specification for a magic-state distillation unit. - - Either select a built-in unit by setting ``name``, or define a custom unit - by providing ``num_input_ts``, ``num_output_ts``, ``failure_probability_formula``, - ``output_error_rate_formula``, and optionally physical and logical qubit - specifications. - """ - - name: Optional[str] = None - display_name: Optional[str] = None - num_input_ts: Optional[int] = None - num_output_ts: Optional[int] = None - failure_probability_formula: Optional[str] = None - output_error_rate_formula: Optional[str] = None - physical_qubit_specification: Optional[ - ProtocolSpecificDistillationUnitSpecification - ] = None - logical_qubit_specification: Optional[ - ProtocolSpecificDistillationUnitSpecification - ] = None - logical_qubit_specification_first_round_override: Optional[ - ProtocolSpecificDistillationUnitSpecification - ] = None - - def has_custom_specification(self): - return ( - self.display_name is not None - or self.num_input_ts is not None - or self.num_output_ts is not None - or self.failure_probability_formula is not None - or self.output_error_rate_formula is not None - or self.physical_qubit_specification is not None - or self.logical_qubit_specification is not None - or self.logical_qubit_specification_first_round_override is not None - ) - - def has_predefined_name(self): - return self.name is not None - - def post_validation(self, result): - if not self.has_custom_specification() and not self.has_predefined_name(): - raise LookupError( - "name must be set or custom specification must be provided" - ) - - if self.has_custom_specification() and self.has_predefined_name(): - raise LookupError( - "If predefined name is provided, " - "custom specification is not allowed. " - "Either remove name or remove all other " - "specification of the distillation unit" - ) - - if self.has_predefined_name(): - return # all other validation is on the server side - - if self.num_input_ts is None: - raise LookupError("num_input_ts must be set") - - if self.num_output_ts is None: - raise LookupError("num_output_ts must be set") - - if self.failure_probability_formula is None: - raise LookupError("failure_probability_formula must be set") - - if self.output_error_rate_formula is None: - raise LookupError("output_error_rate_formula must be set") - - if self.physical_qubit_specification is not None: - self.physical_qubit_specification.post_validation(result) - - if self.logical_qubit_specification is not None: - self.logical_qubit_specification.post_validation(result) - - if self.logical_qubit_specification_first_round_override is not None: - self.logical_qubit_specification_first_round_override.post_validation( - result - ) - - def as_dict(self, validate=True) -> Dict[str, Any]: - specification_dict = super().as_dict(validate) - if len(specification_dict) != 0: - if self.physical_qubit_specification is not None: - physical_qubit_specification_dict = ( - self.physical_qubit_specification.as_dict(validate) - ) - if len(physical_qubit_specification_dict) != 0: - specification_dict["physicalQubitSpecification"] = ( - physical_qubit_specification_dict - ) - - if self.logical_qubit_specification is not None: - logical_qubit_specification_dict = ( - self.logical_qubit_specification.as_dict(validate) - ) - if len(logical_qubit_specification_dict) != 0: - specification_dict["logicalQubitSpecification"] = ( - logical_qubit_specification_dict - ) - - if self.logical_qubit_specification_first_round_override is not None: - logical_qubit_specification_first_round_override_dict = ( - self.logical_qubit_specification_first_round_override.as_dict( - validate - ) - ) - if len(logical_qubit_specification_first_round_override_dict) != 0: - specification_dict[ - "logicalQubitSpecificationFirstRoundOverride" - ] = logical_qubit_specification_first_round_override_dict - - return specification_dict - - -@dataclass -class ErrorBudgetPartition(AutoValidatingParams): - """ - Partition of the total error budget across algorithm components. - - The three fields must sum to the overall error budget. Defaults to equal - thirds of ``0.001`` (i.e. each component gets ``~3.33e-4``). - - :param logical: Budget allocated to logical errors in the algorithm. - :param t_states: Budget allocated to T-state distillation errors. - :param rotations: Budget allocated to rotation synthesis errors. - """ - - logical: float = 0.001 / 3 - t_states: float = 0.001 / 3 - rotations: float = 0.001 / 3 - - -@dataclass -class EstimatorConstraints(AutoValidatingParams): - """ - Optional runtime and resource constraints for resource estimation. - - At most one of ``max_duration`` or ``max_physical_qubits`` may be set - simultaneously. - """ - - @staticmethod - def at_least_one(name, value): - if value < 1: - raise ValueError(f"{name} must be at least 1") - - logical_depth_factor: Optional[float] = validating_field(at_least_one) - max_t_factories: Optional[int] = validating_field(at_least_one) - max_duration: Optional[int] = validating_field(check_time) - max_physical_qubits: Optional[int] = validating_field(at_least_one) - - def post_validation(self, result): - if self.max_duration is not None and self.max_physical_qubits is not None: - raise LookupError( - "Both duration and number of physical qubits constraints are provided, but only one is allowed at a time." - ) - - -class EstimatorInputParamsItem: - """ - Input parameters for a single resource estimation job. - - Contains qubit model, QEC scheme, distillation unit specifications, constraints, - and error budget settings. Used directly for single-point estimation or as a - base class for batching via :class:`EstimatorParams`. - """ - - def __init__(self): - super().__init__() - - self.qubit_params: EstimatorQubitParams = EstimatorQubitParams() - self.qec_scheme: EstimatorQecScheme = EstimatorQecScheme() - self.distillation_unit_specifications = ( - [] - ) # type: List[DistillationUnitSpecification] - self.constraints: EstimatorConstraints = EstimatorConstraints() - self.error_budget: Optional[Union[float, ErrorBudgetPartition]] = None - self.estimate_type: Optional[str] = None - - def as_dict(self, validate=True, additional_params=None) -> Dict[str, Any]: - result = {} - - qubit_params = self.qubit_params.as_dict(validate) - if len(qubit_params) != 0: - result["qubitParams"] = qubit_params - elif hasattr(additional_params, "qubit_params"): - qubit_params = additional_params.qubit_params.as_dict(validate) - if len(qubit_params) != 0: - result["qubitParams"] = qubit_params - - qec_scheme = self.qec_scheme.as_dict(validate) - if len(qec_scheme) != 0: - result["qecScheme"] = qec_scheme - elif hasattr(additional_params, "qec_scheme"): - qec_scheme = additional_params.qec_scheme.as_dict(validate) - if len(qec_scheme) != 0: - result["qecScheme"] = qec_scheme - - for specification in self.distillation_unit_specifications: - specification_dict = specification.as_dict(validate) - if len(specification_dict) != 0: - if result.get("distillationUnitSpecifications") is None: - result["distillationUnitSpecifications"] = [] - - result["distillationUnitSpecifications"].append(specification_dict) - if result.get("distillationUnitSpecifications") is not None and hasattr( - additional_params, "distillation_unit_specifications" - ): - for specification in additional_params.distillation_unit_specifications: - specification_dict = specification.as_dict(validate) - if len(specification_dict) != 0: - if result.get("distillationUnitSpecifications") is None: - result["distillationUnitSpecifications"] = [] - - result["distillationUnitSpecifications"].append(specification_dict) - - constraints = self.constraints.as_dict(validate) - if len(constraints) != 0: - result["constraints"] = constraints - elif hasattr(additional_params, "constraints"): - constraints = additional_params.constraints.as_dict(validate) - if len(constraints) != 0: - result["constraints"] = constraints - - if self.error_budget is not None: - if isinstance(self.error_budget, float) or isinstance( - self.error_budget, int - ): - if validate and (self.error_budget <= 0 or self.error_budget >= 1): - message = "error_budget must be value between 0 and 1" - raise ValueError(message) - result["errorBudget"] = self.error_budget - elif isinstance(self.error_budget, ErrorBudgetPartition): - result["errorBudget"] = self.error_budget.as_dict(validate) - elif hasattr(additional_params, "error_budget"): - if isinstance(additional_params.error_budget, float) or isinstance( - additional_params.error_budget, int - ): - if validate and ( - additional_params.error_budget <= 0 - or additional_params.error_budget >= 1 - ): - message = "error_budget must be value between 0 and 1" - raise ValueError(message) - result["errorBudget"] = additional_params.error_budget - elif isinstance(additional_params.error_budget, ErrorBudgetPartition): - result["errorBudget"] = additional_params.error_budget.as_dict(validate) - - if self.estimate_type is not None: - if self.estimate_type not in ["frontier", "singlePoint"]: - raise ValueError( - "estimate_type must be either 'frontier' or 'singlePoint'" - ) - result["estimateType"] = self.estimate_type - - return result - - -class EstimatorParams(EstimatorInputParamsItem): - """ - Top-level input parameters for the Microsoft Resource Estimator. - - Extends :class:`~qsharp.estimator.EstimatorInputParamsItem` with support for batching: pass - ``num_items`` to create a batching job where each item can override the - top-level parameters. - - :param num_items: Number of batching items. If ``None``, creates a - single-point estimation job. - :type num_items: int - """ - - MAX_NUM_ITEMS: int = 1000 - - def __init__(self, num_items: Optional[int] = None): - EstimatorInputParamsItem.__init__(self) - - if num_items is not None: - self.has_items = True - if num_items <= 0 or num_items > self.MAX_NUM_ITEMS: - raise ValueError( - "num_items must be a positive value less or equal to " - f"{self.MAX_NUM_ITEMS}" - ) - self._items = [EstimatorInputParamsItem() for _ in range(num_items)] - else: - self.has_items = False - - @property - def items(self) -> List: - if self.has_items: - return self._items - else: - raise Exception( - "Cannot access items in a non-batching job, call " - "make_params with num_items parameter" - ) - - def as_dict(self, validate=True) -> Dict[str, Any]: - """ - Constructs a dictionary from the input params. - - For batching jobs, top-level entries are merged into item entries. - Item entries have priority in case they are specified. - """ - - # initialize result and set type hint - result: Dict[str, Any] = EstimatorInputParamsItem.as_dict(self, validate) - - if self.has_items: - result["items"] = [item.as_dict(validate, self) for item in self._items] - # In case of batching, no need to stop if failing an item - result["resumeAfterFailedItem"] = True - - return result - - -class HTMLWrapper: - """ - Simple HTML wrapper to expose _repr_html_ for Jupyter clients. - """ - - def __init__(self, content: str): - self.content = content - - def _repr_html_(self): - return self.content - - -class EstimatorResult(dict): - """ - Microsoft Resource Estimator result. - - The class represents simple resource estimation results as well as batching - resource estimation results. The latter can be indexed by an integer index to - access an individual result from the batching result. - """ - - MAX_DEFAULT_ITEMS_IN_TABLE = 5 - - def __init__(self, data: Union[Dict, List]): - self._error = None - - if isinstance(data, list) and len(data) == 1: - data = data[0] - if not EstimatorResult._is_succeeded(data): - raise EstimatorError(data["code"], data["message"]) - - if isinstance(data, dict): - self._data = data - super().__init__(data) - - self._is_simple = True - if EstimatorResult._is_succeeded(self): - self._repr = self._item_result_table() - self.summary = HTMLWrapper(self._item_result_summary_table()) - self.diagram = EstimatorResultDiagram(self.data().copy()) - else: - self._error = EstimatorError(data["code"], data["message"]) - - elif isinstance(data, list): - super().__init__( - {idx: EstimatorResult(item_data) for idx, item_data in enumerate(data)} - ) - - self._data = data - self._is_simple = False - num_items = len(data) - self._repr = "" - if num_items > self.MAX_DEFAULT_ITEMS_IN_TABLE: - self._repr += ( - "

Info: The overview table is " - "cut off after " - f"{self.MAX_DEFAULT_ITEMS_IN_TABLE} items. If " - "you want to see all items, suffix the result " - "variable with [:]

" - ) - num_items = self.MAX_DEFAULT_ITEMS_IN_TABLE - self._repr += self._batch_result_table(range(num_items)) - - # Add plot function for batching jobs - self.plot = self._plot - self.summary_data_frame = self._summary_data_frame - - @staticmethod - def _is_succeeded(data): - return "status" in data and data["status"] == "success" - - def data(self, idx: Optional[int] = None) -> Any: - """ - Returns raw data of the result object. - - In case of a batching job, you can pass an index to access a specific - item. - """ - if idx is None: - return self._data - elif not self._is_simple: - return self._data[idx] - else: - msg = "Cannot pass parameter 'idx' to 'data' for non-batching job" - raise ValueError(msg) - - @property - def error(self) -> Optional[EstimatorError]: - """ - Returns the error object if the result is an error. - """ - return self._error - - @property - def logical_counts(self): - """ - Returns the logical counts of the result. - """ - if self._is_simple: - return LogicalCounts(self.data()["logicalCounts"]) - else: - return LogicalCounts(self.data(0)["logicalCounts"]) - - def _repr_html_(self): - """ - HTML table representation of the result. - """ - if self._error: - raise self._error - return self._repr - - def __getitem__(self, key): - """ - If the result represents a batching job and key is a slice, a - side-by-side table comparison is shown for the indexes represented by - the slice. - - Otherwise, the key is used to access the raw data directly. - """ - if isinstance(key, slice): - if self._is_simple: - msg = "Cannot pass slice to '__getitem__' for non-batching job" - raise ValueError(msg) - return HTMLWrapper(self._batch_result_table(range(len(self))[key])) - else: - if super().__contains__(key): - return super().__getitem__(key) - elif super().__contains__("frontierEntries"): - return super().__getitem__("frontierEntries")[0].__getitem__(key) - else: - raise KeyError(key) - - def _plot(self, **kwargs): - """ - Plots all result items in a space time plot, where the x-axis shows - total runtime, and the y-axis shows total number of physical qubits. - Both axes are in log-scale. - - :param **kwargs: Common options: - - - ``labels`` (list): List of labels for the legend. Defaults to ``[]``. - """ - try: - import matplotlib.pyplot as plt - except ImportError: - raise ImportError( - "Missing optional 'matplotlib' dependency. To install run: " - "pip install matplotlib" - ) - - labels = kwargs.pop("labels", []) - - [xs, ys] = zip( - *[ - ( - self.data(i)["physicalCounts"]["runtime"], - self.data(i)["physicalCounts"]["physicalQubits"], - ) - for i in range(len(self)) - ] - ) - - _ = plt.figure(figsize=(15, 8)) - - plt.ylabel("Physical qubits") - plt.xlabel("Runtime") - plt.loglog() - for i, (x, y) in enumerate(zip(xs, ys)): - if isinstance(labels, list) and i < len(labels): - label = labels[i] - else: - label = str(i) - plt.scatter(x=[x], y=[y], label=label, marker="os+x"[i % 4]) - - nsec = 1 - usec = 1e3 * nsec - msec = 1e3 * usec - sec = 1e3 * msec - min = 60 * sec - hour = 60 * min - day = 24 * hour - week = 7 * day - month = 31 * day - year = 365 * month - decade = 10 * year - century = 10 * decade - - time_units = [ - nsec, - usec, - msec, - sec, - min, - hour, - day, - week, - month, - year, - decade, - century, - ] - time_labels = [ - "1 ns", - "1 µs", - "1 ms", - "1 s", - "1 min", - "1 hour", - "1 day", - "1 week", - "1 month", - "1 year", - "1 decade", - "1 century", - ] - - cutoff = ( - next( - (i for i, x in enumerate(time_units) if x > max(xs)), - len(time_units) - 1, - ) - + 1 - ) - - plt.xticks(time_units[0:cutoff], time_labels[0:cutoff], rotation=90) - plt.legend(loc="upper left") - plt.show() - - @property - def json(self): - """ - Returns a JSON representation of the resource estimation result data. - """ - if not hasattr(self, "_json"): - import json - - self._json = json.dumps(self._data) - - return self._json - - def _summary_data_frame(self, **kwargs): - try: - import pandas as pd - except ImportError: - raise ImportError( - "Missing optional 'pandas' dependency. To install run: " - "pip install pandas" - ) - - # get labels or use default value, then extend with missing elements, - # and truncate extra elements - labels = kwargs.pop("labels", []) - labels.extend(range(len(labels), len(self))) - labels = labels[: len(self)] - - def get_row(result): - if EstimatorResult._is_succeeded(result): - formatted = result["physicalCountsFormatted"] - - return ( - formatted["algorithmicLogicalQubits"], - formatted["logicalDepth"], - formatted["numTstates"], - result["logicalQubit"]["codeDistance"], - formatted["numTfactories"], - formatted["physicalQubitsForTfactoriesPercentage"], - formatted["physicalQubits"], - formatted["rqops"], - formatted["runtime"], - ) - else: - return ["No solution found"] * 9 - - data = [get_row(self.data(index)) for index in range(len(self))] - columns = [ - "Logical qubits", - "Logical depth", - "T states", - "Code distance", - "T factories", - "T factory fraction", - "Physical qubits", - "rQOPS", - "Physical runtime", - ] - return pd.DataFrame(data, columns=columns, index=labels) - - def _item_result_table(self): - html = "" - - if has_markdown: - md = markdown.Markdown(extensions=["mdx_math"]) - for group in self["reportData"]["groups"]: - html += f""" -
- - {group['title']} - - """ - for entry in group["entries"]: - val = self - for key in entry["path"].split("/"): - if key not in val and "frontierEntries" in val: - val = val["frontierEntries"][0] - val = val[key] - if has_markdown: - explanation = md.convert(entry["explanation"]) - else: - explanation = entry["explanation"] - html += f""" - - - - - - """ - html += "
{entry['label']}{val} - {entry["description"]} -
- {explanation} -
" - - html += f'
Assumptions
    ' - if has_markdown: - for assumption in self["reportData"]["assumptions"]: - html += f"
  • {md.convert(assumption)}
  • " - html += "
" - - return html - - def _item_result_summary_table(self): - html = """ - """ - - if has_markdown: - md = markdown.Markdown(extensions=["mdx_math"]) - for group in self["reportData"]["groups"]: - html += f""" -
- - {group['title']} - - """ - for entry in group["entries"]: - val = self - for key in entry["path"].split("/"): - val = val[key] - if has_markdown: - explanation = md.convert(entry["explanation"]) - else: - explanation = entry["explanation"] - html += f""" - - - - - - """ - html += "
{explanation}{entry['label']}{val}{entry["description"]}
" - - html += f"
Assumptions
    " - if has_markdown: - for assumption in self["reportData"]["assumptions"]: - html += f"
  • {md.convert(assumption)}
  • " - html += "
" - - return html - - def _batch_result_table(self, indices): - succeeded_item_indices = [ - i for i in indices if EstimatorResult._is_succeeded(self[i]) - ] - if len(succeeded_item_indices) == 0: - print("None of the jobs succeeded") - return "" - - first_succeeded_item_index = succeeded_item_indices[0] - - html = "" - - if has_markdown: - md = markdown.Markdown(extensions=["mdx_math"]) - - item_headers = "".join(f"{i}" for i in indices) - - for group_index, group in enumerate( - self[first_succeeded_item_index]["reportData"]["groups"] - ): - html += f""" -
- - {group['title']} - - - {item_headers}""" - - visited_entries = set() - - for entry in [ - entry - for index in succeeded_item_indices - for entry in self[index]["reportData"]["groups"][group_index]["entries"] - ]: - label = entry["label"] - if label in visited_entries: - continue - visited_entries.add(label) - - html += f""" - - - """ - - for index in indices: - val = self[index] - if index in succeeded_item_indices: - for key in entry["path"].split("/"): - if key in val: - val = val[key] - else: - val = "N/A" - break - else: - val = "N/A" - html += f""" - - """ - - html += """ - - """ - html += "
Item
{label}{val}
" - - html += f'
Assumptions
    ' - if has_markdown: - for assumption in self[0]["reportData"]["assumptions"]: - html += f"
  • {md.convert(assumption)}
  • " - html += "
" - - return html - - @staticmethod - def _is_succeeded(obj): - return "status" in obj and obj["status"] == "success" - - -class EstimatorResultDiagram: - def __init__(self, data): - data.pop("reportData") - self.data_json = json.dumps(data).replace(" ", "") - self.vis_lib = "https://cdn-aquavisualization-prod.azureedge.net/resource-estimation/index.js" - self.space = HTMLWrapper(self._space_diagram()) - self.time = HTMLWrapper(self._time_diagram()) - - def _space_diagram(self): - html = f""" - - """ - return html - - def _time_diagram(self): - html = f""" - - """ - return html - - -class LogicalCounts(dict): - """ - Microsoft Resource Estimator Logical Counts. - - The class represents logical counts that can be used as input to physical estimation of resources - in the Microsoft Resource Estimator. - """ - - def __init__(self, data: Dict): - self._data = {} - self._data["numQubits"] = data.get("numQubits", 0) - self._data["tCount"] = data.get("tCount", 0) - self._data["rotationCount"] = data.get("rotationCount", 0) - self._data["rotationDepth"] = data.get("rotationDepth", 0) - self._data["cczCount"] = data.get("cczCount", 0) - self._data["ccixCount"] = data.get("ccixCount", 0) - self._data["measurementCount"] = data.get("measurementCount", 0) - if "numComputeQubits" in data: - self._data["numComputeQubits"] = data["numComputeQubits"] - if "readFromMemoryCount" in data: - self._data["readFromMemoryCount"] = data["readFromMemoryCount"] - if "writeToMemoryCount" in data: - self._data["writeToMemoryCount"] = data["writeToMemoryCount"] - super().__init__(self._data) - - @property - def json(self): - """ - Returns a JSON representation of the logical counts. - """ - if not hasattr(self, "_json"): - import json - - self._json = json.dumps(self._data) - - return self._json - - def estimate( - self, params: Union[dict, List, EstimatorParams] = None - ) -> EstimatorResult: - """ - Estimates resources for the current logical counts, using the - Parallel Synthesis Sequential Pauli Computation (PSSPC) layout method. - - :param params: The parameters to configure physical estimation. - :return: The estimated resources. - :rtype: EstimatorResult - """ - if params is None: - params = [{}] - elif isinstance(params, EstimatorParams): - if params.has_items: - params = params.as_dict()["items"] - else: - params = [params.as_dict()] - elif isinstance(params, dict): - params = [params] - return EstimatorResult( - json.loads(physical_estimates(self.json, json.dumps(params))) - ) diff --git a/source/pip/qsharp/interop/__init__.py b/source/pip/qsharp/interop/__init__.py index 379559deb5..2e8d11cac6 100644 --- a/source/pip/qsharp/interop/__init__.py +++ b/source/pip/qsharp/interop/__init__.py @@ -1,4 +1,4 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -"""Interoperability modules for the Q# ecosystem.""" +# Deprecated: use qdk.interop (qdk.qiskit / qdk.cirq) instead. diff --git a/source/pip/qsharp/interop/cirq/__init__.py b/source/pip/qsharp/interop/cirq/__init__.py index 8a484fc8ab..b814dfd25b 100644 --- a/source/pip/qsharp/interop/cirq/__init__.py +++ b/source/pip/qsharp/interop/cirq/__init__.py @@ -1,33 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -"""Cirq interoperability for the Q# ecosystem. - -This module provides a :class:`~qsharp.interop.cirq.NeutralAtomSampler` — a standard -``cirq.Sampler`` that runs Cirq circuits on the local NeutralAtomDevice -simulator. - -Usage: - - import cirq - from qsharp.interop.cirq import NeutralAtomSampler - - q0, q1 = cirq.LineQubit.range(2) - circuit = cirq.Circuit([ - cirq.H(q0), - cirq.CNOT(q0, q1), - cirq.measure(q0, q1, key="m"), - ]) - - sampler = NeutralAtomSampler(seed=42) - result = sampler.run(circuit, repetitions=1000) - print(result.histogram(key="m")) -""" - -from ._neutral_atom import NeutralAtomSampler -from ._result import NeutralAtomCirqResult - -__all__ = [ - "NeutralAtomSampler", - "NeutralAtomCirqResult", -] +# Deprecated: use qdk.cirq instead. +from qdk.cirq import * # noqa: F401,F403 diff --git a/source/pip/qsharp/interop/cirq/_neutral_atom.py b/source/pip/qsharp/interop/cirq/_neutral_atom.py deleted file mode 100644 index 8829e44b9d..0000000000 --- a/source/pip/qsharp/interop/cirq/_neutral_atom.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""NeutralAtomSampler — a cirq.Sampler backed by the local NeutralAtomDevice.""" - -from __future__ import annotations - -from typing import List, Optional, TYPE_CHECKING - -import cirq - -from ._result import NeutralAtomCirqResult, measurement_dict, to_cirq_result - -if TYPE_CHECKING: - from qsharp._simulation import NoiseConfig - from qsharp._device._atom import NeutralAtomDevice - - -class NeutralAtomSampler(cirq.Sampler): - """A ``cirq.Sampler`` that runs Cirq circuits on the local NeutralAtomDevice simulator. - - This sampler integrates with the standard Cirq sampler protocol, so it can - be used anywhere a ``cirq.Sampler`` is expected. - - Pipeline for each ``run()`` call: - - 1. ``cirq.Circuit.to_qasm(version="3.0")`` → OpenQASM 3.0 - 2. OpenQASM 3.0 → QIR (base profile, via the Q# compiler) - 3. QIR → ``NeutralAtomDevice.simulate()`` (decompose, schedule, simulate) - 4. Raw shots → :class:`NeutralAtomCirqResult` - - Example:: - - import cirq - from qsharp.interop.cirq import NeutralAtomSampler - from qsharp._simulation import NoiseConfig - - q0, q1 = cirq.LineQubit.range(2) - circuit = cirq.Circuit([ - cirq.H(q0), - cirq.CNOT(q0, q1), - cirq.measure(q0, q1, key="m"), - ]) - - # Noiseless simulation - sampler = NeutralAtomSampler(seed=42) - result = sampler.run(circuit, repetitions=1000) - print(result.histogram(key="m")) - - # Noisy simulation — 1% loss on Rz (native gate) - noise = NoiseConfig() - noise.rz.loss = 0.01 - sampler = NeutralAtomSampler(noise=noise, seed=42) - result = sampler.run(circuit, repetitions=1000) - print(f"Accepted: {len(result.measurements['m'])} / {len(result.raw_shots)}") - - :keyword noise: Optional :class:`~qsharp._simulation.NoiseConfig` describing - per-gate noise. The device decomposes gates to the native set - ``{Rz, SX, CZ, MResetZ}``; configure noise on those native gates. - For example, a Cirq ``X`` gate arriving via QASM 2.0 is decomposed - to ``SX·SX``, so ``noise.sx`` is the relevant field. Defaults to - ``None`` (noiseless). - :kwtype noise: NoiseConfig - :keyword simulator_type: Force a particular simulator backend. - ``"clifford"`` — Clifford-only, fast. Requires a Clifford circuit. - ``"cpu"`` — Full state-vector on CPU. - ``"gpu"`` — Full state-vector on GPU. - ``None`` (default) — GPU if available, CPU otherwise. - :kwtype simulator_type: str - :keyword seed: Optional integer seed for reproducibility. Defaults to ``None``. - :kwtype seed: int - :keyword device: An existing :class:`~qsharp._device._atom.NeutralAtomDevice` - instance to reuse across calls. A default-configured device is - created lazily on the first call when not provided. - :kwtype device: NeutralAtomDevice - """ - - def __init__( - self, - *, - noise: Optional["NoiseConfig"] = None, - simulator_type: Optional[str] = None, - seed: Optional[int] = None, - device: Optional["NeutralAtomDevice"] = None, - ) -> None: - self._noise = noise - self._simulator_type = simulator_type - self._seed = seed - self._device = device - - def _get_device(self) -> "NeutralAtomDevice": - """Return the NeutralAtomDevice, creating a default one on first access.""" - if self._device is None: - from qsharp._device._atom import NeutralAtomDevice - - self._device = NeutralAtomDevice() - return self._device - - def run_sweep( - self, - program: cirq.AbstractCircuit, - params: cirq.Sweepable, - repetitions: int = 1, - ) -> List[NeutralAtomCirqResult]: - """Run the circuit for each parameter resolver in the sweep. - - :param program: The Cirq circuit to simulate. - :param params: A ``cirq.Sweepable`` defining the parameter resolvers - to sweep over. Each resolver produces one result. - :param repetitions: Number of shots per parameter resolver. - :type repetitions: int - :return: A list of :class:`NeutralAtomCirqResult` objects, one per resolver. - :rtype: List[NeutralAtomCirqResult] - """ - resolvers = ( - list(cirq.to_sweep(params)) - if params is not None - else [cirq.ParamResolver()] - ) - return [ - self._run_once(program, resolver, repetitions) for resolver in resolvers - ] - - def _run_once( - self, - circuit: cirq.AbstractCircuit, - param_resolver: cirq.ParamResolver, - repetitions: int, - ) -> NeutralAtomCirqResult: - from qsharp._native import compile_qasm_program_to_qir - from qsharp._fs import read_file, list_directory, resolve - from qsharp._http import fetch_github - from qsharp._qsharp import TargetProfile - - # Resolve parameters - resolved_circuit = cirq.resolve_parameters(circuit, param_resolver) - - # Step 1: Cirq circuit → QASM 3.0 - try: - qasm = resolved_circuit.to_qasm(version="3.0") - except Exception as exc: - raise ValueError( - "Failed to convert the Cirq circuit to QASM 3.0. " - "Ensure every gate in the circuit supports QASM serialization " - f"(see cirq.Circuit.to_qasm). Original error: {exc}" - ) from exc - - # Step 2: QASM 3.0 → QIR (base profile) - qir = compile_qasm_program_to_qir( - qasm, - read_file, - list_directory, - resolve, - fetch_github, - name="cirq_circuit", - target_profile=TargetProfile.Base, - search_path=".", - ) - - # Step 3: QIR → NeutralAtomDevice simulation - device = self._get_device() - raw_shots = device.simulate( - qir, - shots=repetitions, - noise=self._noise, - type=self._simulator_type, - seed=self._seed, - ) - - # Step 4: Build NeutralAtomCirqResult - meas_dict = measurement_dict(resolved_circuit) - return to_cirq_result(raw_shots, meas_dict, param_resolver) diff --git a/source/pip/qsharp/interop/cirq/_result.py b/source/pip/qsharp/interop/cirq/_result.py deleted file mode 100644 index d76bf8ecc3..0000000000 --- a/source/pip/qsharp/interop/cirq/_result.py +++ /dev/null @@ -1,310 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -"""Result types and conversion utilities for the Cirq–NeutralAtomDevice integration.""" - -from __future__ import annotations - -import ast -import re -from typing import Any, Dict, List, Optional, Sequence - -import cirq -import numpy as np - - -# --------------------------------------------------------------------------- -# Result type -# --------------------------------------------------------------------------- - - -class NeutralAtomCirqResult(cirq.ResultDict): - """A ``cirq.ResultDict`` that also carries raw (loss-inclusive) shot data. - - The inherited ``measurements`` field contains only *accepted* shots - those - where every measured qubit returned a clean ``{0, 1}`` outcome. Shots in - which one or more qubits were lost during the simulation are excluded from - ``measurements`` but are preserved in ``raw_shots``. - - The ``raw_shots`` attribute holds the full list of simulation results, one - entry per shot, in the native simulator output format (tuple, list, or - scalar). This includes shots that contain qubit-loss markers. - - Use :meth:`raw_measurements` to retrieve the full per-shot data (including - loss markers) in the same ``{key: 2D-array (shots x bits)}`` format as - ``measurements``, but with Unicode string dtype so that non-binary markers - are preserved. - """ - - __slots__ = ("raw_shots", "_measurement_dict_data", "_raw_measurements_cache") - - def __init__( - self, - *, - params: cirq.ParamResolver, - measurements: Dict[str, np.ndarray], - raw_shots: List[Any], - measurement_dict: Dict[str, Sequence[int]], - ) -> None: - super().__init__(params=params, measurements=measurements) - self.raw_shots = raw_shots - self._measurement_dict_data = measurement_dict - self._raw_measurements_cache: Optional[Dict[str, Any]] = None - - def raw_measurements(self) -> Dict[str, Any]: - """Return unfiltered per-shot measurement symbols including loss markers. - - The structure mirrors ``measurements``: ``{key: 2D array (shots x bits)}``, - but the array dtype is ``" width: - chars = chars[:width] - rows_by_key[key].append(chars) - - try: - raw_meas: Dict[str, Any] = { - k: np.asarray(v, dtype=" Dict[str, List[int]]: - """Extract ``{measurement_key: [global_qubit_indices]}`` from a Cirq circuit. - - Qubit indices are determined by ``sorted(circuit.all_qubits())``, matching - the ordering that Cirq's ``to_qasm()`` uses when it numbers the qubits. - - :param circuit: The Cirq circuit to introspect. - :return: An ordered dict mapping each measurement key to the list of global qubit - indices that key covers, in the order they are measured. - :rtype: Dict[str, List[int]] - """ - ordered_qubits = sorted(circuit.all_qubits()) - index_by_qubit = {q: i for i, q in enumerate(ordered_qubits)} - - keys_in_order: List[str] = [] - key_to_qubits: Dict[str, List[int]] = {} - - for op in circuit.all_operations(): - if isinstance(op.gate, cirq.MeasurementGate): - key = op.gate.key - if key not in key_to_qubits: - keys_in_order.append(key) - key_to_qubits[key] = [] - key_to_qubits[key].extend(index_by_qubit[q] for q in op.qubits) - - return {k: key_to_qubits[k] for k in keys_in_order} - - -# --------------------------------------------------------------------------- -# Bit-string parsing utilities -# --------------------------------------------------------------------------- - - -def _qir_display_to_bitstring(obj: Any) -> str: - """Convert a raw QIR simulation result value to a flat bitstring. - - Handles the various formats the NeutralAtomDevice simulator may emit: - - ``qsharp.Result`` enum values (``Result.One`` -> ``"1"``, ``Result.Zero`` -> ``"0"``) - - ``tuple`` - multiple classical registers, joined with spaces - - ``list`` - single register bits, each element processed recursively - - ``str`` - already a representation, parsed with ``ast.literal_eval`` if needed - - other - converted to string with ``str()`` - """ - # Handle qsharp.Result enum values produced by the local simulator. - try: - from qsharp import Result as _Result - - if obj == _Result.One: - return "1" - if obj == _Result.Zero: - return "0" - if obj == _Result.Loss: - return "-" - except ImportError: - pass - - if isinstance(obj, str) and not re.match(r"[\d\s\-]+$", obj): - try: - obj = ast.literal_eval(obj) - except Exception: - return str(obj) - - if isinstance(obj, tuple): - return " ".join(_qir_display_to_bitstring(t) for t in obj) - if isinstance(obj, list): - # Recurse per element so Result.One/Zero inside lists are handled correctly. - return "".join(_qir_display_to_bitstring(bit) for bit in obj) - return str(obj) - - -def _split_registers(bitstring: str, key_lengths: List[int]) -> List[str]: - """Split a flat or space-delimited bitstring into per-register chunks. - - :param bitstring: The raw bitstring, possibly containing spaces between registers. - :param key_lengths: The expected width of each register, in order. - :return: A list of register strings, one per key. - :rtype: List[str] - """ - raw = str(bitstring).strip() - - if " " in raw: - return raw.split(" ") - - if not key_lengths: - return [raw] - - total_len = sum(key_lengths) - if total_len == len(raw): - regs: List[str] = [] - start = 0 - for length in key_lengths: - regs.append(raw[start : start + length]) - start += length - return regs - - return [raw] - - -# --------------------------------------------------------------------------- -# Loss-filtering shot conversion -# --------------------------------------------------------------------------- - - -def _shots_to_rows( - shots: Sequence[Any], - measurement_dict_data: Optional[Dict[str, Sequence[int]]] = None, -) -> Dict[str, List[List[int]]]: - """Convert raw simulation shots to ``{key: [[bit_per_shot]]}`` filtering loss. - - Shots where any qubit returned a non-binary value (loss marker) are silently - dropped. Only ``{0, 1}`` shots contribute to the returned arrays. - - :param shots: Raw simulation output, one entry per shot. - :param measurement_dict_data: ``{key: [qubit_indices]}`` — the measurement - register layout. Defaults to a single key ``"m"`` with no qubits. - :return: ``{key: list_of_rows}`` where each row is a list of 0/1 integers. - :rtype: Dict[str, List[List[int]]] - """ - if measurement_dict_data is None: - measurement_dict_data = {"m": []} - - measurement_keys = list(measurement_dict_data.keys()) - key_lengths = [len(measurement_dict_data[k]) for k in measurement_keys] - - shots_by_key: Dict[str, List[List[int]]] = {k: [] for k in measurement_keys} - - for shot in shots: - bitstring = _qir_display_to_bitstring(shot) - registers = _split_registers(bitstring, key_lengths) - - if len(registers) == len(measurement_keys): - parts = registers - else: - flattened = "".join(registers) - parts = _split_registers(flattened, key_lengths) - - per_key_rows: Dict[str, List[int]] = {} - is_valid_shot = True - - for key, bits in zip(measurement_keys, parts): - bit_chars = list(str(bits).strip()) - if not all(ch in "01" for ch in bit_chars): - is_valid_shot = False - break - per_key_rows[key] = [1 if ch == "1" else 0 for ch in bit_chars] - - if not is_valid_shot: - continue - - for key in measurement_keys: - shots_by_key[key].append(per_key_rows.get(key, [])) - - return shots_by_key - - -# --------------------------------------------------------------------------- -# Result construction -# --------------------------------------------------------------------------- - - -def to_cirq_result( - raw_shots: List[Any], - meas_dict: Dict[str, List[int]], - param_resolver: Optional[cirq.ParamResolverOrSimilarType] = None, -) -> NeutralAtomCirqResult: - """Build a :class:`NeutralAtomCirqResult` from raw simulation output. - - :param raw_shots: The raw per-shot results from ``NeutralAtomDevice.simulate()``. - :param meas_dict: ``{key: [qubit_indices]}`` as returned by :func:`measurement_dict`. - :param param_resolver: Cirq parameter resolver for the circuit. Defaults to the - empty resolver. - :return: A :class:`NeutralAtomCirqResult` whose ``measurements`` field contains only - loss-free shots, and whose ``raw_shots`` / ``raw_measurements()`` retain - all shots including those with loss markers. - :rtype: NeutralAtomCirqResult - """ - if param_resolver is None: - param_resolver = cirq.ParamResolver({}) - - normalized = meas_dict or {"m": []} - shots_by_key = _shots_to_rows(raw_shots, normalized) - measurement_keys = list(normalized.keys()) - - measurements: Dict[str, np.ndarray] = {} - for key in measurement_keys: - rows = shots_by_key.get(key, []) - if not rows: - measurements[key] = np.zeros((0, 0), dtype=np.int8) - else: - measurements[key] = np.asarray(rows, dtype=np.int8) - - return NeutralAtomCirqResult( - params=param_resolver, - measurements=measurements, - raw_shots=raw_shots, - measurement_dict=normalized, - ) diff --git a/source/pip/qsharp/interop/qiskit/__init__.py b/source/pip/qsharp/interop/qiskit/__init__.py index 47efd0ed95..3718bfc99b 100644 --- a/source/pip/qsharp/interop/qiskit/__init__.py +++ b/source/pip/qsharp/interop/qiskit/__init__.py @@ -1,108 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -"""Qiskit interoperability for the Q# ecosystem. - -This module provides Qiskit backends backed by the local Q# simulator and -NeutralAtomDevice, allowing Qiskit circuits to be run locally without any -cloud connection. - -Available backends: - -- :class:`~qsharp.interop.qiskit.QSharpBackend` - Runs any Qiskit ``QuantumCircuit`` using the Q# simulator. Supports - noise-free simulation via QASM export and QIR compilation. - -- :class:`~qsharp.interop.qiskit.NeutralAtomBackend` - Runs Qiskit circuits on the local NeutralAtomDevice simulator. Decomposes - gates to the native ``{Rz, SX, CZ}`` gate set and optionally models - per-gate noise (including qubit loss). Loss shots are exposed separately - from accepted shots in the job result. - -- :class:`~qsharp.interop.qiskit.ResourceEstimatorBackend` - Estimates quantum resources (qubits, T-gates, etc.) for a Qiskit circuit - without running a full simulation. - -- :func:`~qsharp.interop.qiskit.estimate` - Convenience function that runs resource estimation on a Qiskit circuit - and returns an :class:`~qsharp.estimator.EstimatorResult` directly, without - needing to construct a backend or job manually. - -Usage: - - from qiskit import QuantumCircuit - from qsharp.interop.qiskit import NeutralAtomBackend - from qsharp._simulation import NoiseConfig - - circuit = QuantumCircuit(2, 2) - circuit.h(0) - circuit.cx(0, 1) - circuit.measure([0, 1], [0, 1]) - - noise = NoiseConfig() - noise.rz.loss = 0.05 # 5% qubit loss per Rz gate - - backend = NeutralAtomBackend() - job = backend.run(circuit, shots=1000, noise=noise, seed=42) - result = job.result() - print(result.results[0].data.counts) # accepted shots only - print(result.results[0].data.raw_counts) # includes loss shots -""" -from typing import Any, Dict, List, Optional, Union - -from ...estimator import EstimatorParams, EstimatorResult -from ..._native import OutputSemantics, ProgramType, QasmError -from .backends import ( - NeutralAtomBackend, - QSharpBackend, - ResourceEstimatorBackend, - QirTarget, -) -from .jobs import QsJob, QsSimJob, ReJob, QsJobSet -from .execution import DetaultExecutor -from qiskit import QuantumCircuit - - -def estimate( - circuit: QuantumCircuit, - params: Optional[Union[Dict[str, Any], List, EstimatorParams]] = None, - **options, -) -> EstimatorResult: - """ - Estimates resources for Qiskit QuantumCircuit. - - :param circuit: The input Qiskit QuantumCircuit object. - :param params: The parameters to configure physical estimation. - :type params: EstimatorParams or dict or list - :param **options: Additional options for the transpiler, exporter, or Qiskit passes - configuration. Defaults to backend config values. Common options: - - - ``optimization_level`` (int): Transpiler optimization level. - - ``basis_gates`` (list): Basis gates for transpilation. - - ``includes`` (list): Include paths for QASM resolution. - - ``search_path`` (str): Search path for resolving file references. - :raises QasmError: If there is an error generating or parsing QASM. - :return: The estimated resources. - :rtype: EstimatorResult - """ - from ..._qsharp import ipython_helper - - ipython_helper() - backend = ResourceEstimatorBackend() - job = backend.run(circuit, params=params, **options) - return job.result() - - -__all__ = [ - "NeutralAtomBackend", - "QSharpBackend", - "ResourceEstimatorBackend", - "QirTarget", - "QsJob", - "QsSimJob", - "ReJob", - "QsJobSet", - "estimate", - "EstimatorParams", - "EstimatorResult", -] +# Deprecated: use qdk.qiskit instead. +from qdk.qiskit import * # noqa: F401,F403 diff --git a/source/pip/qsharp/interop/qiskit/backends/__init__.py b/source/pip/qsharp/interop/qiskit/backends/__init__.py deleted file mode 100644 index 9e23c5bd40..0000000000 --- a/source/pip/qsharp/interop/qiskit/backends/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from .compilation import Compilation -from .errors import Errors -from .qirtarget import QirTarget -from .neutral_atom_target import NeutralAtomTarget -from .neutral_atom_backend import NeutralAtomBackend -from .qsharp_backend import QSharpBackend -from .re_backend import ResourceEstimatorBackend diff --git a/source/pip/qsharp/interop/qiskit/backends/backend_base.py b/source/pip/qsharp/interop/qiskit/backends/backend_base.py deleted file mode 100644 index 8d24a45499..0000000000 --- a/source/pip/qsharp/interop/qiskit/backends/backend_base.py +++ /dev/null @@ -1,614 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from abc import ABC, abstractmethod -import datetime -import logging -from time import monotonic -from typing import Dict, Any, List, Optional, Union -from warnings import warn - -from qiskit import transpile -from qiskit.circuit import ( - QuantumCircuit, -) -from qiskit.version import get_version_info - -from qiskit.qasm3.exporter import Exporter -from qiskit.providers import BackendV2, Options -from qiskit.result import Result -from qiskit.transpiler import PassManager -from qiskit.transpiler.passes import RemoveBarriers, RemoveResetInZeroState -from qiskit.transpiler.target import Target - -from .compilation import Compilation -from .errors import Errors -from .qirtarget import QirTarget -from ..execution import DetaultExecutor -from ..jobs import QsJob, QsSimJob, QsJobSet -from ..passes import RemoveDelays -from .... import TargetProfile - -logger = logging.getLogger(__name__) - -_QISKIT_NON_GATE_INSTRUCTIONS = [ - "control_flow", - "if_else", - "switch_case", - "while_loop", - "break", - "continue", - "store", - "for_loop", - "measure", - "reset", -] - -_QISKIT_STDGATES = [ - "p", - "x", - "y", - "z", - "h", - "s", - "sdg", - "t", - "tdg", - "sx", - "rx", - "ry", - "rz", - "cx", - "cy", - "cz", - "cp", - "crx", - "cry", - "crz", - "ch", - "swap", - "ccx", - "cswap", - "cu", - "CX", - "phase", - "cphase", - "id", - "u1", - "u2", - "u3", - "U", -] - - -def filter_kwargs(func, **kwargs) -> Dict[str, Any]: - import inspect - - sig = inspect.signature(func) - supported_args = set(sig.parameters.keys()) - extracted_kwargs = { - k: kwargs.get(k) for k in list(kwargs.keys()) if k in supported_args - } - return extracted_kwargs - - -def get_transpile_options(**kwargs) -> Dict[str, Any]: - args = filter_kwargs(transpile, **kwargs) - return args - - -def get_exporter_options(**kwargs) -> Dict[str, Any]: - return filter_kwargs(Exporter.__init__, **kwargs) - - -class BackendBase(BackendV2, ABC): - """ - A virtual backend for transpiling to a Q# ecosystem compatible format. - """ - - def __init__( - self, - target: Optional[Target] = None, - qiskit_pass_options: Optional[Dict[str, Any]] = None, - transpile_options: Optional[Dict[str, Any]] = None, - qasm_export_options: Optional[Dict[str, Any]] = None, - skip_transpilation: bool = False, - **options, - ): - """ - :param target: The target to use for the backend. - :param qiskit_pass_options: Options for the Qiskit passes. - :param transpile_options: Options for the transpiler. - :param qasm_export_options: Options for the QASM3 exporter. - :param **options: Additional keyword arguments passed to subclasses. - """ - super().__init__( - name="QSharpBackend", - description="A virtual BackendV2 for transpiling to a Q# compatible format.", - backend_version="0.0.1", - ) - - if options is not None: - # we need to rename the seed_simulator to seed. This - # is a convenience for aer users. - # if the user passes in seed_simulator, we will rename it to seed - # but only if the seed field is defined in the backend options. - if "seed_simulator" in options and "seed" in self._options.data: - warn("seed_simulator passed, but field is called seed.") - options["seed"] = options.pop("seed_simulator") - - # updates the options with the fields passed in, if the backend - # doesn't have the field, it will raise an error. - self.set_options(**options) - - self._qiskit_pass_options = Options( - supports_barrier=False, - supports_delay=False, - remove_reset_in_zero_state=True, - ) - self._skip_transpilation = skip_transpilation - - # we need to set the target after the options are set - # so that the target_profile can be used to determine - # which gates/instructions are available - if target is not None: - # update the properties so that we are internally consistent - self._qiskit_pass_options.update_options( - **{ - "supports_barrier": target.instruction_supported("barrier"), - "supports_delay": target.instruction_supported("delay"), - "remove_reset_in_zero_state": True, - } - ) - - self._target = target - else: - self._target = self._build_target() - - self._transpile_options = {} - - basis_gates = None - if qasm_export_options is not None and "basis_gates" in qasm_export_options: - basis_gates = qasm_export_options.pop("basis_gates") - else: - # here we get the gates that are in the target but not in qasm's - # stdgates so that we can build the basis gates list for the exporter. - # A user can override this list by passing in a basis_gates list - # We also remove any non-gate instructions from the list. - target_gates = set(self.target.operation_names) - target_gates -= set(_QISKIT_NON_GATE_INSTRUCTIONS) - target_gates -= set(_QISKIT_STDGATES) - basis_gates = list(target_gates) - - # set the default options for the exporter - self._qasm_export_options = { - "includes": ("stdgates.inc",), - "alias_classical_registers": False, - "allow_aliasing": False, - "disable_constants": True, - "basis_gates": basis_gates, - } - - if qiskit_pass_options is not None: - self._qiskit_pass_options.update_options(**qiskit_pass_options) - if transpile_options is not None: - self._transpile_options.update(**transpile_options) - if qasm_export_options is not None: - self._qasm_export_options.update(**qasm_export_options) - - def _build_target(self) -> Target: - supports_barrier = self._qiskit_pass_options["supports_barrier"] - supports_delay = self._qiskit_pass_options["supports_delay"] - - # explicitly set ``num_qubits`` to ``None`` to indicate a :class:`Target` representing a - # simulator or other abstract machine that imposes no limits on the number of qubits. - return QirTarget.build_target( - num_qubits=None, - target_profile=self._options["target_profile"], - supports_barrier=supports_barrier, - supports_delay=supports_delay, - ) - - @property - def target(self) -> Target: - """Returns the target of the Backend object.""" - return self._target - - @property - def max_circuits(self): - """ - Returns the maximum number of circuits that can be executed simultaneously. - """ - return None - - @abstractmethod - def _execute(self, programs: List[Compilation], **input_params) -> Dict[str, Any]: - """Execute circuits on the backend. - - :param programs: Simulator input circuits. - :type programs: List[Compilation] - :param **input_params: Configuration for simulation/compilation. - :return: A dictionary of results. - :rtype: dict - """ - - @abstractmethod - def run( - self, - run_input: Union[QuantumCircuit, List[QuantumCircuit]], - **options, - ) -> QsJob: - pass - - def _run( - self, - run_input: List[QuantumCircuit], - **options, - ) -> QsJob: - if "name" not in options and len(run_input) == 1: - options["name"] = run_input[0].name - - # Get out default options - # Look at all of the kwargs and see if they match any of the options - # If they do, set the option to the value of the kwarg as an override - # We only to remove the options that are in the backend options for - # the run so that other options can be passed to other calls. - input_params: Dict[str, Any] = vars(self.options).copy() - input_params.update(options) - - return self._submit_job(run_input, **input_params) - - def run_job( - self, run_input: List[QuantumCircuit], job_id: str, **options - ) -> Result: - start = monotonic() - - compilations = self._compile(run_input, **options) - - output = self._execute(compilations, **options) - - if not isinstance(output, dict): - logger.error("%s: run failed.", self.name) - if output: - logger.error("Output: %s", output) - from .... import QSharpError - - raise QSharpError(str(Errors.RUN_TERMINATED_WITHOUT_OUTPUT)) - - output["job_id"] = job_id - output["date"] = str(datetime.datetime.now().isoformat()) - output["status"] = "COMPLETED" - output["backend_name"] = self.name - output["backend_version"] = self.backend_version - - duration = monotonic() - start - output["time_taken"] = str(duration) - output["config"] = { - "qasm_export_options": str(self._build_qasm_export_options(**options)), - "qiskit_pass_options": str(self._build_qiskit_pass_options(**options)), - "transpile_options": str(self._build_transpile_options(**options)), - } - output["header"] = {} - return self._create_results(output) - - def _validate_quantum_circuits( - self, run_input: Union[QuantumCircuit, List[QuantumCircuit]] - ) -> List[QuantumCircuit]: - """Normalize and validate run_input to a list of QuantumCircuits. - - Wraps a bare ``QuantumCircuit`` in a list and raises ``ValueError`` - if any element is not a ``QuantumCircuit``. - """ - if not isinstance(run_input, list): - run_input = [run_input] - for circuit in run_input: - if not isinstance(circuit, QuantumCircuit): - raise ValueError(str(Errors.INPUT_MUST_BE_QC)) - return run_input - - def _submit_job(self, run_input: List[QuantumCircuit], **options) -> QsJob: - """Default implementation for simulation backends. - - Submits a ``QsSimJob`` for a single circuit or a ``QsJobSet`` for - multiple circuits. Override for backends with different job types - (e.g. ``ResourceEstimatorBackend`` uses ``ReJob``). - """ - from uuid import uuid4 - - job_id = str(uuid4()) - executor = options.pop("executor", DetaultExecutor()) - if len(run_input) == 1: - job = QsSimJob(self, job_id, self.run_job, run_input, options, executor) - else: - job = QsJobSet(self, job_id, self.run_job, run_input, options, executor) - job.submit() - return job - - def _compile(self, run_input: List[QuantumCircuit], **options) -> List[Compilation]: - # for each run input, convert to qasm - compilations = [] - for circuit in run_input: - args = options.copy() - assert isinstance( - circuit, QuantumCircuit - ), "Input must be a QuantumCircuit." - start = monotonic() - qasm = self._qasm(circuit, **args) - end = monotonic() - - time_taken = end - start - compilation = Compilation(circuit, qasm, time_taken) - compilations.append(compilation) - return compilations - - def _create_results(self, output: Dict[str, Any]) -> Any: - """Default implementation: build a Qiskit ``Result`` from the output dict. - - Override for backends that return a different result type - (e.g. ``ResourceEstimatorBackend`` returns ``EstimatorResult``). - """ - return Result.from_dict(output) - - def _map_result_bit(self, v) -> str: - """Map a single QIR result value to a bit character. - - Override in subclasses to customize the mapping — for example, - to emit a loss marker instead of the default string fallback for - unknown values. - """ - from .... import Result as QSharpResult - - if v == QSharpResult.One: - return "1" - if v == QSharpResult.Zero: - return "0" - return str(v) - - def _shot_to_bitstring(self, value) -> str: - """Recursively convert a QIR shot result to a Qiskit-style bitstring. - - - ``tuple`` → space-joined register parts (multiple classical registers) - - ``list`` → concatenated bits via `_map_result_bit` - - anything else → ``str(value)`` - """ - if isinstance(value, tuple): - return " ".join(self._shot_to_bitstring(p) for p in value) - elif isinstance(value, list): - return "".join(self._map_result_bit(v) for v in value) - else: - return str(value) - - def _transpile(self, circuit: QuantumCircuit, **options) -> QuantumCircuit: - if options.get("skip_transpilation", self._skip_transpilation): - return circuit - - circuit = self.run_qiskit_passes(circuit, options) - - transpile_options = self._build_transpile_options(**options) - backend = transpile_options.pop("backend", self) - target = transpile_options.pop("target", self.target) - if get_version_info().startswith("1.2"): - # The older Qiskit version does not support the `qubits_initially_zero` option - transpiled_circuit = transpile( - circuit, - backend=backend, - target=target, - **transpile_options, - ) - else: - transpiled_circuit = transpile( - circuit, - backend=backend, - target=target, - qubits_initially_zero=True, - **transpile_options, - ) - return transpiled_circuit - - def run_qiskit_passes(self, circuit, options): - pass_options = self._build_qiskit_pass_options(**options) - - pass_manager = PassManager() - if not pass_options["supports_barrier"]: - pass_manager.append(RemoveBarriers()) - if not pass_options["supports_delay"]: - pass_manager.append(RemoveDelays()) - if pass_options["remove_reset_in_zero_state"]: - # when doing state initialization, qiskit will reset all qubits to 0 - # As our semantics are different, we can remove these resets - # as it will double the number of qubits if we have to reset them - # before using them when using the base profile. - pass_manager.append(RemoveResetInZeroState()) - - circuit = pass_manager.run(circuit) - return circuit - - def _build_qiskit_pass_options(self, **kwargs) -> Dict[str, Any]: - params: Dict[str, Any] = vars(self._qiskit_pass_options).copy() - for opt in params.copy(): - if opt in kwargs: - params[opt] = kwargs.pop(opt) - if "supports_barrier" not in params: - params["supports_barrier"] = False - if "supports_delay" not in params: - params["supports_delay"] = False - if "remove_reset_in_zero_state" not in params: - params["remove_reset_in_zero_state"] = True - - return params - - def _build_transpile_options(self, **kwargs) -> Dict[str, Any]: - # create the default options from the backend - args = self._transpile_options.copy() - # gather any remaining options that are not in the default list - transpile_args = get_transpile_options(**kwargs) - args.update(transpile_args) - return args - - def _build_qasm_export_options(self, **kwargs) -> Dict[str, Any]: - # Disable aliasing until we decide want to support it - # The exporter defaults to only having the U gate. - # When it sees the stdgates.inc in the default includes list, it adds - # bodyless symbols for that fixed gate set. - # We set the basis gates for any gates that we want that wouldn't - # be defined when stdgates.inc is included. - - # any gates that are not in the stdgates.inc file need to be defined - # in the basis gates list passed to the exporter. The exporter doesn't - # know about the gates defined in the backend's target. - # Anything in the basis_gates gets added to the qasm builder's global - # namespace as an opaque gate. All parameter information comes from the - # gate object itself in the circuit. - - # create the default options from the backend - args = self._qasm_export_options.copy() - # gather any remaining options that are not in the default list - exporter_args = get_exporter_options(**kwargs) - args.update(exporter_args) - return args - - def transpile(self, circuit: QuantumCircuit, **options) -> QuantumCircuit: - transpiled_circuit = self._transpile(circuit, **options) - return transpiled_circuit - - def _qasm(self, circuit: QuantumCircuit, **options) -> str: - """Converts a Qiskit QuantumCircuit to QASM 3 for the current backend. - - :param circuit: The QuantumCircuit to be executed. - :param **options: Additional options for the transpiler, exporter, or Qiskit passes. - Common values include: ``optimization_level``, ``basis_gates``, ``includes``, - ``search_path``. Defaults to backend config values. - :return: The converted QASM code as a string. Any supplied includes - are emitted as ``include`` statements at the top of the program. - :rtype: str - :raises QasmError: If there is an error generating or parsing QASM. - """ - transpiled_circuit = self.transpile(circuit, **options) - try: - export_options = self._build_qasm_export_options(**options) - exporter = Exporter(**export_options) - qasm3_source = exporter.dumps(transpiled_circuit) - # Qiskit QASM exporter doesn't handle experimental features correctly and always emits - # OPENQASM 3.0; even though switch case is not supported in QASM 3.0, so we bump - # the version to 3.1 for now. - qasm3_source = qasm3_source.replace("OPENQASM 3.0", "OPENQASM 3.1") - return qasm3_source - except Exception as ex: - from .. import QasmError - - raise QasmError(str(Errors.FAILED_TO_EXPORT_QASM)) from ex - - def _qsharp(self, circuit: QuantumCircuit, **kwargs) -> str: - """ - Converts a Qiskit QuantumCircuit to Q# for the current backend. - - The generated Q# code will not be idiomatic Q# code, but will be - a direct translation of the Qiskit circuit. - - :param circuit: The QuantumCircuit to be executed. - :param **kwargs: Additional options for the transpiler, exporter, or Qiskit passes. - Common values include: ``optimization_level``, ``basis_gates``, ``includes``, - ``search_path``, ``output_semantics``. Defaults to backend config values. - :return: The converted Q# code as a string. - :rtype: str - :raises QSharpError: If there is an error evaluating the source code. - :raises QasmError: If there is an error generating, parsing, or compiling QASM. - """ - - qasm_source = self._qasm(circuit, **kwargs) - - args = { - "name": kwargs.get("name", circuit.name), - } - - if search_path := kwargs.pop("search_path", "."): - args["search_path"] = search_path - - if output_semantics := kwargs.pop( - "output_semantics", self.options.get("output_semantics", default=None) - ): - args["output_semantics"] = output_semantics - - qsharp_source = self._qasm_to_qsharp(qasm_source, **args) - return qsharp_source - - def qir( - self, - circuit: QuantumCircuit, - **kwargs, - ) -> str: - """ - Converts a Qiskit QuantumCircuit to QIR (Quantum Intermediate Representation). - - :param circuit: The input Qiskit QuantumCircuit object. - :param **kwargs: Common options: - - - ``target_profile`` (TargetProfile): The target profile for the backend. Defaults to backend config value. - - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. Defaults to backend config value. - - ``search_path`` (str): The search path for the backend. Defaults to ``'.'``. - :return: The converted QIR code as a string. - :rtype: str - :raises QSharpError: If there is an error evaluating the source code. - :raises QasmError: If there is an error generating, parsing, or compiling QASM. - :raises ValueError: If the backend configuration does not support QIR generation. - """ - name = kwargs.pop("name", circuit.name) - target_profile = kwargs.pop("target_profile", self.options.target_profile) - if target_profile == TargetProfile.Unrestricted: - raise ValueError(str(Errors.UNRESTRICTED_INVALID_QIR_TARGET)) - - qasm_source = self._qasm(circuit, **kwargs) - - args = { - "name": name, - "target_profile": target_profile, - } - - if search_path := kwargs.pop("search_path", "."): - args["search_path"] = search_path - - if params := kwargs.pop("params", None): - args["params"] = params - - if output_semantics := kwargs.pop( - "output_semantics", self.options.get("output_semantics", default=None) - ): - args["output_semantics"] = output_semantics - - return self._qasm_to_qir(qasm_source, **args) - - def _qasm_to_qir( - self, - source: str, - **kwargs, - ) -> str: - from ...._native import compile_qasm_program_to_qir - from ...._fs import read_file, list_directory, resolve - from ...._http import fetch_github - - return compile_qasm_program_to_qir( - source, - read_file, - list_directory, - resolve, - fetch_github, - **kwargs, - ) - - def _qasm_to_qsharp( - self, - source: str, - **kwargs, - ) -> str: - from ...._native import compile_qasm_to_qsharp - from ...._fs import read_file, list_directory, resolve - from ...._http import fetch_github - - return compile_qasm_to_qsharp( - source, - read_file, - list_directory, - resolve, - fetch_github, - **kwargs, - ) diff --git a/source/pip/qsharp/interop/qiskit/backends/compilation.py b/source/pip/qsharp/interop/qiskit/backends/compilation.py deleted file mode 100644 index de0eba284f..0000000000 --- a/source/pip/qsharp/interop/qiskit/backends/compilation.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from qiskit import QuantumCircuit - - -class Compilation(dict): - def __init__(self, circuit: QuantumCircuit, qasm: str, time_taken: str): - super().__init__() - self["circuit"] = circuit - self["qasm"] = qasm - self["compilation_time_taken"] = time_taken - - @property - def circuit(self) -> QuantumCircuit: - return self["circuit"] - - @circuit.setter - def circuit(self, value: QuantumCircuit): - self["circuit"] = value - - @property - def qasm(self) -> str: - return self["qasm"] - - @qasm.setter - def qasm(self, value: str): - self["qasm"] = value - - @property - def time_taken(self) -> str: - return self["compilation_time_taken"] - - @time_taken.setter - def time_taken(self, value: str): - self["compilation_time_taken"] = value diff --git a/source/pip/qsharp/interop/qiskit/backends/errors.py b/source/pip/qsharp/interop/qiskit/backends/errors.py deleted file mode 100644 index 6468eddcd9..0000000000 --- a/source/pip/qsharp/interop/qiskit/backends/errors.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from enum import Enum - - -class Errors(Enum): - UNRESTRICTED_INVALID_QIR_TARGET = 1 - RUN_TERMINATED_WITHOUT_OUTPUT = 2 - FAILED_TO_EXPORT_QASM = 3 - MISSING_NUMBER_OF_SHOTS = 4 - INPUT_MUST_BE_QC = 5 - ONLY_ONE_CIRCUIT_ALLOWED = 6 - - def __str__(self): - if self == Errors.UNRESTRICTED_INVALID_QIR_TARGET: - return "The Unrestricted profile is not valid when generating QIR." - elif self == Errors.RUN_TERMINATED_WITHOUT_OUTPUT: - return "Run terminated without valid output." - elif self == Errors.FAILED_TO_EXPORT_QASM: - return "Failed to export QASM source." - elif self == Errors.MISSING_NUMBER_OF_SHOTS: - return "The number of shots must be specified." - elif self == Errors.INPUT_MUST_BE_QC: - return "Input must be a QuantumCircuit." - elif self == Errors.ONLY_ONE_CIRCUIT_ALLOWED: - return "Only one QuantumCircuit can be estimated at a time." - else: - return "Unknown option." diff --git a/source/pip/qsharp/interop/qiskit/backends/neutral_atom_backend.py b/source/pip/qsharp/interop/qiskit/backends/neutral_atom_backend.py deleted file mode 100644 index e78ac94d46..0000000000 --- a/source/pip/qsharp/interop/qiskit/backends/neutral_atom_backend.py +++ /dev/null @@ -1,288 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import logging -from typing import Any, Dict, List, Literal, Optional, Union -from uuid import uuid4 - -from qiskit import QuantumCircuit -from qiskit.providers import Options -from qiskit.transpiler.target import Target - -from .... import Result, TargetProfile -from .. import OutputSemantics -from ..execution import DetaultExecutor -from ..jobs import QsSimJob, QsJobSet -from .backend_base import BackendBase -from .compilation import Compilation -from .errors import Errors -from .neutral_atom_target import NeutralAtomTarget - -logger = logging.getLogger(__name__) - - -def _bitstring_has_qubit_loss(bitstring: str) -> bool: - """Return True if the bitstring contains a qubit-loss marker. - - Lost qubits may be represented using non-binary markers (e.g. '-', '2'). - We treat any shot containing those markers as lost-qubit affected. - """ - return "-" in bitstring or "2" in bitstring - - -class NeutralAtomBackend(BackendBase): - """A Qiskit backend that simulates circuits using the NeutralAtomDevice pipeline. - - Circuits are transpiled to OpenQASM 3 using the device's native gate set - (Rz, SX, CZ), compiled to QIR via the Q# compiler, then run through the - NeutralAtomDevice compilation and simulation pipeline. - The device handles single-qubit gate optimization and qubit movement scheduling. - An optional noise model can be applied to model realistic device behavior. - - The native gate set target ensures Qiskit's transpiler decomposes all non-native - gates before simulation, so noise configured on native gates (``noise.rz``, - ``noise.sx``, ``noise.cz``, ``noise.mresetz``) behaves as expected. - - The simulator backend (Clifford, CPU full-state, or GPU full-state) is - selected automatically unless overridden via the ``simulator_type`` option. - - Example:: - - from qiskit import QuantumCircuit - from qsharp.interop.qiskit import NeutralAtomBackend - from qsharp._simulation import NoiseConfig - - qc = QuantumCircuit(2) - qc.h(0) - qc.cx(0, 1) - qc.measure_all() - - # Noiseless simulation - backend = NeutralAtomBackend() - job = backend.run(qc, shots=1000) - print(job.result().get_counts()) - - # Noisy simulation - noise = NoiseConfig() - noise.cz.set_depolarizing(1e-3) - noise.mresetz.set_bitflip(1e-3) - - job = backend.run(qc, shots=1000, noise=noise, seed=42) - print(job.result().get_counts()) - """ - - def __init__( - self, - device=None, - target: Optional[Target] = None, - qiskit_pass_options: Optional[Dict[str, Any]] = None, - transpile_options: Optional[Dict[str, Any]] = None, - qasm_export_options: Optional[Dict[str, Any]] = None, - skip_transpilation: bool = False, - **options, - ): - """ - :param device: The NeutralAtomDevice instance to use for compilation and simulation. - A default-configured device is created automatically if not provided. - Pass a custom device to control the qubit layout (column count, zone dimensions, etc.). - :type device: NeutralAtomDevice - :param target: Qiskit transpiler target. Defaults to the NeutralAtomDevice native - gate set ``{rz, sx, cz, measure, reset}``. Override only if you need a custom - decomposition strategy. - :param qiskit_pass_options: Options forwarded to Qiskit pre-transpilation passes. - :type qiskit_pass_options: Dict - :param transpile_options: Options forwarded to ``qiskit.transpile()``. - :type transpile_options: Dict - :param qasm_export_options: Options forwarded to the Qiskit QASM3 exporter. - :type qasm_export_options: Dict - :param skip_transpilation: Skip Qiskit transpilation. Useful when the circuit is - already expressed in terms of the target gate set. - :type skip_transpilation: bool - :param **options: Default option overrides. These can also be overridden per-call via - :meth:`run`. Common options: - - - ``name`` (str): Backend name for job metadata. Defaults to the circuit name. - - ``shots`` (int): Number of shots. Defaults to ``1024``. - - ``seed`` (int): Random seed for reproducibility. Defaults to ``None``. - - ``noise`` (NoiseConfig): Optional per-gate noise model. Defaults to ``None`` (noiseless). - - ``simulator_type`` (str): Simulator to use — ``"clifford"`` (Clifford only), - ``"cpu"`` (CPU full-state), ``"gpu"`` (GPU full-state), or ``None`` to - auto-select (GPU if available, CPU otherwise). - - ``output_semantics`` (OutputSemantics): QIR output encoding. Defaults to ``OutputSemantics.Qiskit``. - - ``executor``: Executor for async job submission. - """ - self._device = device - super().__init__( - target, - qiskit_pass_options, - transpile_options, - qasm_export_options, - skip_transpilation, - **options, - ) - - def _get_device(self): - """Return the NeutralAtomDevice, creating a default one on first access.""" - if self._device is None: - from qsharp._device._atom import NeutralAtomDevice - - self._device = NeutralAtomDevice() - return self._device - - def _build_target(self) -> Target: - """Return a target restricted to the NeutralAtomDevice native gate set. - - Limiting the target to ``{rz, sx, cz, measure, reset}`` ensures Qiskit's - transpiler decomposes all non-native gates before QASM3 export, so the - circuit that reaches the simulator already uses only native gates. - """ - return NeutralAtomTarget.build_target(num_qubits=None) - - @classmethod - def _default_options(cls): - return Options( - search_path=".", - shots=1024, - seed=None, - noise=None, - simulator_type=None, - output_semantics=OutputSemantics.Qiskit, - executor=DetaultExecutor(), - ) - - def run( - self, - run_input: Union[QuantumCircuit, List[QuantumCircuit]], - **options, - ) -> Union[QsSimJob, QsJobSet]: - """Simulate the given circuit(s) using the NeutralAtomDevice pipeline. - - :param run_input: A single ``QuantumCircuit`` or a list of them. - :param **options: Per-call option overrides. Common options: - - - ``name`` (str): Backend name for job metadata. Defaults to the circuit name. - - ``shots`` (int): Number of shots. Defaults to ``1024``. - - ``seed`` (int): Random seed for reproducibility. Defaults to ``None``. - - ``noise`` (NoiseConfig): Optional per-gate noise model. Defaults to ``None`` (noiseless). - - ``simulator_type`` (str): Simulator to use — ``"clifford"`` (Clifford only), - ``"cpu"`` (CPU full-state), ``"gpu"`` (GPU full-state), or ``None`` to - auto-select (GPU if available, CPU otherwise). - - ``output_semantics`` (OutputSemantics): QIR output encoding. Defaults to ``OutputSemantics.Qiskit``. - - ``executor``: Executor for async job submission. - :return: A job object whose ``.result()`` returns a Qiskit ``Result``. - :rtype: QsSimJob - :raises ValueError: If ``run_input`` is not a ``QuantumCircuit`` or list thereof, - or if a ``target_profile`` other than ``TargetProfile.Base`` is provided. - """ - run_input = self._validate_quantum_circuits(run_input) - return self._run(run_input, **options) - - def _map_result_bit(self, v) -> str: - """Override: unknown values are qubit-loss markers (``"-"``).""" - if v == Result.One: - return "1" - if v == Result.Zero: - return "0" - return "-" - - def _execute(self, programs: List[Compilation], **input_params) -> Dict[str, Any]: - device = self._get_device() - - shots = input_params.get("shots") - if shots is None: - raise ValueError(str(Errors.MISSING_NUMBER_OF_SHOTS)) - - noise = input_params.get("noise") - simulator_type: Optional[Literal["clifford", "cpu", "gpu"]] = input_params.get( - "simulator_type" - ) - seed: Optional[int] = input_params.get("seed") - search_path: str = input_params.get("search_path", ".") - output_semantics = input_params.get("output_semantics") - - # NeutralAtomDevice always requires base-profile QIR — the device's - # compilation pipeline validates that no conditional branches exist. - # Raise explicitly if the caller passed a non-Base profile so the - # error is immediate and clear rather than silently ignored. - target_profile = input_params.get("target_profile") - if target_profile is not None and target_profile != TargetProfile.Base: - raise ValueError( - "NeutralAtomBackend only supports TargetProfile.Base. " - "The NeutralAtomDevice compilation pipeline does not support " - f"conditional branches produced by {target_profile}." - ) - - job_results = [] - for program in programs: - name = input_params.get("name", program.circuit.name) - - # Compile QASM3 → QIR (base profile). - qir = self._qasm_to_qir( - program.qasm, - name=name, - target_profile=TargetProfile.Base, - output_semantics=output_semantics, - search_path=search_path, - ) - - # Run through NeutralAtomDevice compilation + simulation pipeline. - sim_results = device.simulate( - qir, - shots=shots, - noise=noise, - type=simulator_type, - seed=seed, - ) - - raw_memory = [self._shot_to_bitstring(shot) for shot in sim_results] - - # Separate accepted shots (no loss markers) from raw shots. - # Qiskit-compatible fields (counts, memory, probabilities) - # contain only clean {0,1} outcomes; raw_* fields retain the - # full picture including loss. - memory = [s for s in raw_memory if not _bitstring_has_qubit_loss(s)] - accepted_total_count = len(memory) - raw_total_count = len(raw_memory) - - raw_counts: Dict[str, int] = {} - counts: Dict[str, int] = {} - for bs in raw_memory: - raw_counts[bs] = raw_counts.get(bs, 0) + 1 - if not _bitstring_has_qubit_loss(bs): - counts[bs] = counts.get(bs, 0) + 1 - - raw_probabilities = ( - {} - if raw_total_count == 0 - else {bs: c / raw_total_count for bs, c in raw_counts.items()} - ) - probabilities = ( - {} - if accepted_total_count == 0 - else {bs: c / accepted_total_count for bs, c in counts.items()} - ) - - job_results.append( - { - "data": { - # Qiskit-compatible fields: loss shots excluded. - "counts": counts, - "probabilities": probabilities, - "memory": memory, - # Raw fields: all shots, including loss markers. - "raw_counts": raw_counts, - "raw_probabilities": raw_probabilities, - "raw_memory": raw_memory, - }, - "success": True, - "header": { - "metadata": {"qasm": program.qasm}, - "name": program.circuit.name, - "compilation_time_taken": program.time_taken, - }, - # shots reflects accepted (non-loss) count. - "shots": accepted_total_count, - } - ) - - return {"results": job_results, "qobj_id": str(uuid4()), "success": True} diff --git a/source/pip/qsharp/interop/qiskit/backends/neutral_atom_target.py b/source/pip/qsharp/interop/qiskit/backends/neutral_atom_target.py deleted file mode 100644 index 3502e95562..0000000000 --- a/source/pip/qsharp/interop/qiskit/backends/neutral_atom_target.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import logging - -from qiskit.circuit import Measure, Parameter, Reset -from qiskit.circuit.library.standard_gates import CZGate, RZGate, SXGate -from qiskit.transpiler.target import Target - -logger = logging.getLogger(__name__) - - -class NeutralAtomTarget: - """Factory for a Qiskit ``Target`` restricted to the NeutralAtomDevice native gate set. - - The native gate set is ``{rz, sx, cz, measure}`` — the only gates that survive - ``NeutralAtomDevice.compile()``'s decomposition pipeline. Using this target ensures - that Qiskit's transpiler decomposes all non-native gates (H, CX, X, etc.) into - native gates *before* QASM3 export, so the noise model fields that matter - (``noise.rz``, ``noise.sx``, ``noise.cz``, ``noise.mresetz``) align with the - gates actually present during simulation. - """ - - @classmethod - def build_target( - cls, - num_qubits: int | None = None, - ) -> Target: - """Return a Qiskit ``Target`` with only the NeutralAtomDevice native gates. - - :param num_qubits: Number of qubits. ``None`` means no limit (simulator). - :return: A ``Target`` containing ``{rz, sx, cz, measure, reset}``. - """ - target = Target(num_qubits=num_qubits) - - target.add_instruction(RZGate(Parameter("theta")), name="rz") - target.add_instruction(SXGate, name="sx") - target.add_instruction(CZGate, name="cz") - target.add_instruction(Measure, name="measure") - # Reset is used internally by NeutralAtomDevice (MResetZ), so include it - # so the transpiler can express mid-circuit resets. - target.add_instruction(Reset, name="reset") - - return target diff --git a/source/pip/qsharp/interop/qiskit/backends/qirtarget.py b/source/pip/qsharp/interop/qiskit/backends/qirtarget.py deleted file mode 100644 index b88f2056a1..0000000000 --- a/source/pip/qsharp/interop/qiskit/backends/qirtarget.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import logging -from typing import Union - -from qiskit.circuit import ( - Barrier, - Delay, - Measure, - Parameter, - Reset, - Store, -) -from qiskit.circuit.controlflow import ( - ControlFlowOp, - ForLoopOp, - IfElseOp, - SwitchCaseOp, - WhileLoopOp, -) -from qiskit.circuit.library.standard_gates import ( - CHGate, - CCXGate, - CXGate, - CYGate, - CZGate, - CRXGate, - CRYGate, - CRZGate, - RXGate, - RXXGate, - RYGate, - RYYGate, - RZGate, - RZZGate, - HGate, - SGate, - SdgGate, - SXGate, - SwapGate, - TGate, - TdgGate, - XGate, - YGate, - ZGate, - IGate, -) - -from qiskit.transpiler.target import Target -from .... import TargetProfile - -logger = logging.getLogger(__name__) - - -class QirTarget: - """Factory for QIR-compatible Qiskit ``Target`` instances.""" - - def __init__( - self, - num_qubits=None, - target_profile=TargetProfile.Base, - supports_barrier=False, - supports_delay=False, - ) -> None: - logger.warning( - "QirTarget should not be instantiated directly. Use the 'build_target' class method" - + " instead. This will be enforced in a future release. You can replace" - + " 'QirTarget(...)' with 'QirTarget.build_target(...)'." - ) - self._target = self.build_target( - num_qubits=num_qubits, - target_profile=target_profile, - supports_barrier=supports_barrier, - supports_delay=supports_delay, - ) - - def __getattr__(self, item): - """ - Delegate attribute access to the underlying _target object. - - This method is called when an attribute is not found in the current instance. - It forwards the attribute lookup to the internal _target object, effectively - making this class act as a proxy or wrapper around the target. - - :param item: The name of the attribute being accessed. - :return: The value of the requested attribute from the ``_target`` object. - :raises AttributeError: If the requested item is ``"_target"`` or if the attribute - does not exist on the ``_target`` object. - """ - if item == "_target": - raise AttributeError(item) - return getattr(self._target, item) - - def to_target(self) -> Target: - """Return the underlying Qiskit Target instance.""" - return self._target - - @classmethod - def build_target( - cls, - num_qubits: Union[int, None] = None, - target_profile=TargetProfile.Base, - supports_barrier=False, - supports_delay=False, - ) -> Target: - """ - Create a Qiskit Target object with quantum gates and operations for QIR compilation. - - This class method creates a Target instance that defines the available quantum - operations and gates that can be used when compiling Q#/OpenQASM code to QIR (Quantum - Intermediate Representation) format. - - :param num_qubits: The number of qubits for the target. - If ``None``, the target will support any number of qubits. Defaults to ``None``. - :param target_profile: The target profile that determines which control flow operations - are supported. If not ``TargetProfile.Base``, adds control flow operations like - ``if_else``, ``switch_case``, and ``while_loop``. Defaults to ``TargetProfile.Base``. - :param supports_barrier: Whether to include barrier operations in the target. - Defaults to ``False``. - :param supports_delay: Whether to include delay operations in the target. - Defaults to ``False``. - :return: A Qiskit ``Target`` object configured with quantum gates and operations. - """ - - target = Target(num_qubits=num_qubits) - - if target_profile != TargetProfile.Base: - target.add_instruction(ControlFlowOp, name="control_flow") - target.add_instruction(IfElseOp, name="if_else") - target.add_instruction(SwitchCaseOp, name="switch_case") - target.add_instruction(WhileLoopOp, name="while_loop") - - # We don't currently support break or continue statements in Q#, - # so we don't include them yet. - # target.add_instruction(BreakLoopOp, name="break") - # target.add_instruction(ContinueLoopOp, name="continue") - - target.add_instruction(Store, name="store") - - if supports_barrier: - target.add_instruction(Barrier, name="barrier") - if supports_delay: - target.add_instruction(Delay, name="delay") - - # For loops should be fully deterministic in Qiskit/QASM. - target.add_instruction(ForLoopOp, name="for_loop") - target.add_instruction(Measure, name="measure") - - # While reset is technically not supported in base profile, the - # compiler can use decompositions to implement workarounds. - target.add_instruction(Reset, name="reset") - - target.add_instruction(CCXGate, name="ccx") - target.add_instruction(CXGate, name="cx") - target.add_instruction(CYGate, name="cy") - target.add_instruction(CZGate, name="cz") - - target.add_instruction(RXGate(Parameter("theta")), name="rx") - target.add_instruction(RXXGate(Parameter("theta")), name="rxx") - target.add_instruction(CRXGate(Parameter("theta")), name="crx") - - target.add_instruction(RYGate(Parameter("theta")), name="ry") - target.add_instruction(RYYGate(Parameter("theta")), name="ryy") - target.add_instruction(CRYGate(Parameter("theta")), name="cry") - - target.add_instruction(RZGate(Parameter("theta")), name="rz") - target.add_instruction(RZZGate(Parameter("theta")), name="rzz") - target.add_instruction(CRZGate(Parameter("theta")), name="crz") - - target.add_instruction(HGate, name="h") - - target.add_instruction(SGate, name="s") - target.add_instruction(SdgGate, name="sdg") - - target.add_instruction(SXGate, name="sx") - - target.add_instruction(SwapGate, name="swap") - - target.add_instruction(TGate, name="t") - target.add_instruction(TdgGate, name="tdg") - - target.add_instruction(XGate, name="x") - target.add_instruction(YGate, name="y") - target.add_instruction(ZGate, name="z") - - target.add_instruction(IGate, name="id") - - target.add_instruction(CHGate, name="ch") - - return target diff --git a/source/pip/qsharp/interop/qiskit/backends/qsharp_backend.py b/source/pip/qsharp/interop/qiskit/backends/qsharp_backend.py deleted file mode 100644 index c23db5d849..0000000000 --- a/source/pip/qsharp/interop/qiskit/backends/qsharp_backend.py +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from collections import Counter -import logging -from typing import Any, Dict, List, Optional, Tuple, Union -from uuid import uuid4 - -from qiskit import QuantumCircuit -from qiskit.providers import Options -from qiskit.transpiler.target import Target -from .... import TargetProfile -from .. import OutputSemantics -from ..execution import DetaultExecutor -from ..jobs import QsSimJob -from .backend_base import BackendBase -from .compilation import Compilation -from .errors import Errors - -logger = logging.getLogger(__name__) - - -class QSharpBackend(BackendBase): - """ - A virtual backend for running Qiskit circuits using the Q# simulator. - """ - - # This init is included for the docstring - # pylint: disable=useless-parent-delegation - def __init__( - self, - target: Optional[Target] = None, - qiskit_pass_options: Optional[Dict[str, Any]] = None, - transpile_options: Optional[Dict[str, Any]] = None, - qasm_export_options: Optional[Dict[str, Any]] = None, - skip_transpilation: bool = False, - **options, - ): - """ - :param target: The target to use for the backend. - :param qiskit_pass_options: Options for the Qiskit passes. - :type qiskit_pass_options: Dict - :param transpile_options: Options for the transpiler. - :type transpile_options: Dict - :param qasm_export_options: Options for the QASM3 exporter. - :type qasm_export_options: Dict - :param skip_transpilation: Skip Qiskit transpilation. - :type skip_transpilation: bool - :param **options: Default option overrides. These can also be overridden per-call via - :meth:`run`. Common options: - - - ``name`` (str): The name of the circuit used as the entry point. Defaults to the circuit name. - - ``target_profile`` (TargetProfile): The target profile to use for the compilation. - - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. - Defaults to ``OutputSemantics.Qiskit``. - - ``shots`` (int): The number of shots to run the program for. Defaults to ``1024``. - - ``seed`` (int): The seed to use for the random number generator. Defaults to ``None``. - - ``search_path`` (str): The path to search for imports. Defaults to ``'.'``. - - ``output_fn`` (Callable): A callback function to receive the output of the circuit. - Defaults to ``None``. - - ``executor``: The executor to be used to submit the job. Defaults to ``SynchronousExecutor``. - """ - - super().__init__( - target, - qiskit_pass_options, - transpile_options, - qasm_export_options, - skip_transpilation, - **options, - ) - - @classmethod - def _default_options(cls): - return Options( - name="program", - params=None, - search_path=".", - shots=1024, - seed=None, - output_fn=None, - target_profile=TargetProfile.Unrestricted, - output_semantics=OutputSemantics.Qiskit, - executor=DetaultExecutor(), - ) - - def run( - self, - run_input: Union[QuantumCircuit, List[QuantumCircuit]], - **options, - ) -> QsSimJob: - """ - Runs the given QuantumCircuit using the Q# simulator. - - :param run_input: The QuantumCircuit to be executed. - :param **options: Per-call option overrides. Common options: - - - ``name`` (str): The name of the circuit used as the entry point. Defaults to the circuit name. - - ``target_profile`` (TargetProfile): The target profile to use for the compilation. - - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. - Defaults to ``OutputSemantics.Qiskit``. - - ``shots`` (int): The number of shots to run the program for. Defaults to ``1024``. - - ``seed`` (int): The seed to use for the random number generator. Defaults to ``None``. - - ``search_path`` (str): The path to search for imports. Defaults to ``'.'``. - - ``output_fn`` (Callable): A callback function to receive the output of the circuit. - Defaults to ``None``. - - ``executor``: The executor to be used to submit the job. Defaults to ``SynchronousExecutor``. - :return: The simulation job. - :rtype: QsSimJob - :raises QSharpError: If there is an error evaluating the source code. - :raises QasmError: If there is an error generating, parsing, or compiling QASM. - :raises ValueError: If run_input is not a QuantumCircuit or List[QuantumCircuit]. - """ - - run_input = self._validate_quantum_circuits(run_input) - return self._run(run_input, **options) - - def _execute(self, programs: List[Compilation], **input_params) -> Dict[str, Any]: - exec_results: List[Tuple[Compilation, Dict[str, Any]]] = [ - ( - program, - _run_qasm(program.qasm, vars(self.options).copy(), **input_params), - ) - for program in programs - ] - job_results = [] - - shots = input_params.get("shots") - if shots is None: - raise ValueError(str(Errors.MISSING_NUMBER_OF_SHOTS)) - - for program, exec_result in exec_results: - results = [self._shot_to_bitstring(result) for result in exec_result] - - counts = Counter(results) - counts_dict = dict(counts) - probabilities = { - bitstring: (count / shots) for bitstring, count in counts_dict.items() - } - - job_result = { - "data": {"counts": counts_dict, "probabilities": probabilities}, - "success": True, - "header": { - "metadata": {"qasm": program.qasm}, - "name": program.circuit.name, - "compilation_time_taken": program.time_taken, - }, - "shots": shots, - } - job_results.append(job_result) - - # All of these fields are required by the Result object - result_dict = { - "results": job_results, - "qobj_id": str(uuid4()), - "success": True, - } - - return result_dict - - -def _run_qasm( - qasm: str, - default_options: Options, - **options, -) -> Any: - """ - Runs the supplied OpenQASM 3 program. - Gates defined by stdgates.inc will be overridden with definitions - from the Q# compiler. - - Any gates, such as matrix unitaries, that are not able to be - transpiled will result in an error. - - :param source: The input OpenQASM 3 string to be processed. - :param default_options: Default backend option values. - :param **options: Common options: - - - ``target_profile`` (TargetProfile): The target profile to use for the compilation. - - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. - - ``name`` (str): The name of the circuit. Defaults to ``'program'``. - - ``search_path`` (str): The optional search path for resolving qasm imports. - - ``shots`` (int): The number of shots to run the program for. - - ``seed`` (int): The seed to use for the random number generator. - - ``output_fn`` (Callable): A callback for each output. Defaults to ``None``. - :return: A list of results or runtime errors. - :raises QSharpError: If there is an error evaluating the source code. - :raises QasmError: If there is an error generating, parsing, or compiling QASM. - """ - - from ...._native import run_qasm_program, Output # type: ignore - from ...._fs import read_file, list_directory, resolve - from ...._http import fetch_github - - def callback(output: Output) -> None: - print(output) - - output_fn = options.pop("output_fn", callback) - - def value_or_default(key: str) -> Any: - return options.pop(key, default_options[key]) - - # when passing the args into the rust layer, any kwargs with None values - # will cause an error, so we need to filter them out. - args = {} - if name := value_or_default("name"): - args["name"] = name - - if target_profile := value_or_default("target_profile"): - args["target_profile"] = target_profile - if output_semantics := value_or_default("output_semantics"): - args["output_semantics"] = output_semantics - - if search_path := value_or_default("search_path"): - args["search_path"] = search_path - if shots := value_or_default("shots"): - args["shots"] = shots - if seed := value_or_default("seed"): - args["seed"] = seed - - return run_qasm_program( - qasm, - output_fn, - None, - None, - None, - read_file, - list_directory, - resolve, - fetch_github, - **args, - ) diff --git a/source/pip/qsharp/interop/qiskit/backends/re_backend.py b/source/pip/qsharp/interop/qiskit/backends/re_backend.py deleted file mode 100644 index 9e523d2bc8..0000000000 --- a/source/pip/qsharp/interop/qiskit/backends/re_backend.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from concurrent.futures import Executor -import json -import logging -from typing import Any, Dict, List, Optional, Union -from uuid import uuid4 - -from qiskit import QuantumCircuit -from qiskit.providers import Options -from qiskit.transpiler.target import Target - -from .compilation import Compilation -from .errors import Errors -from .backend_base import BackendBase -from .. import OutputSemantics -from ..jobs import ReJob -from ..execution import DetaultExecutor -from ...._fs import read_file, list_directory, resolve -from ...._http import fetch_github -from ...._native import resource_estimate_qasm_program -from .... import TargetProfile -from ....estimator import ( - EstimatorResult, - EstimatorParams, -) - -logger = logging.getLogger(__name__) - - -class ResourceEstimatorBackend(BackendBase): - """ - A virtual backend for resource estimating Qiskit circuits levaraging - Q# resource estimation capabilities. - """ - - # This init is included for the docstring - # pylint: disable=useless-parent-delegation - def __init__( - self, - target: Optional[Target] = None, - qiskit_pass_options: Optional[Dict[str, Any]] = None, - transpile_options: Optional[Dict[str, Any]] = None, - qasm_export_options: Optional[Dict[str, Any]] = None, - skip_transpilation: bool = False, - **options, - ): - """ - :param target: The target to use for the backend. - :param qiskit_pass_options: Options for the Qiskit passes. - :type qiskit_pass_options: Dict - :param transpile_options: Options for the transpiler. - :type transpile_options: Dict - :param qasm_export_options: Options for the QASM3 exporter. - :type qasm_export_options: Dict - :param skip_transpilation: Skip Qiskit transpilation. - :type skip_transpilation: bool - :param **options: Default option overrides. These can also be overridden per-call via - :meth:`run`. Common options: - - - ``params`` (EstimatorParams): Configuration values for resource estimation. - - ``name`` (str): The name of the circuit used as the entry point. Defaults to the circuit name. - - ``search_path`` (str): Path to search in for qasm imports. Defaults to ``'.'``. - - ``executor``: The executor to be used to submit the job. Defaults to ``SynchronousExecutor``. - """ - - super().__init__( - target, - qiskit_pass_options, - transpile_options, - qasm_export_options, - skip_transpilation, - **options, - ) - - @property - def max_circuits(self): - """ - Returns the maximum number of circuits that can be executed simultaneously. - """ - return 1 - - @classmethod - def _default_options(cls): - return Options( - params=None, - name="program", - search_path=".", - target_profile=TargetProfile.Unrestricted, - output_semantics=OutputSemantics.ResourceEstimation, - executor=DetaultExecutor(), - ) - - def run( - self, - run_input: Union[QuantumCircuit, List[QuantumCircuit]], - params: Optional[EstimatorParams] = None, - **options, - ) -> ReJob: - """ - Performs resource estimation on the supplied QuantumCircuit via conversion - to OpenQASM 3. - - :param run_input: The input Qiskit QuantumCircuit object. - :param params: Configuration values for resource estimation. - :type params: EstimatorParams - :param **options: Per-call option overrides. Common options: - - - ``params`` (EstimatorParams): Configuration values for resource estimation. - - ``name`` (str): The name of the circuit used as the entry point. Defaults to the circuit name. - - ``search_path`` (str): Path to search in for qasm imports. Defaults to ``'.'``. - - ``target_profile`` (TargetProfile): The target profile to use for the backend. - - ``executor``: The executor to be used to submit the job. Defaults to ``SynchronousExecutor``. - :return: The resource estimation job. - :rtype: ReJob - :raises QSharpError: If there is an error evaluating the source code. - :raises QasmError: If there is an error generating, parsing, or compiling QASM. - :raises ValueError: If run_input is not a QuantumCircuit. - """ - if isinstance(run_input, QuantumCircuit): - run_input = [run_input] - if len(run_input) != 1: - raise ValueError(str(Errors.ONLY_ONE_CIRCUIT_ALLOWED)) - - if params is not None: - options["params"] = params - return self._run(run_input, **options) - - def _estimate_qasm( - self, - source: str, - **input_params, - ) -> Dict[str, Any]: - """ - Estimates the resource usage of a QASM source code. - """ - params = input_params.pop("params", None) - if params is None: - params = [{}] - elif isinstance(params, EstimatorParams): - if params.has_items: - params = params.as_dict()["items"] - else: - params = [params.as_dict()] - elif isinstance(params, dict): - params = [params] - param_str = json.dumps(params) - kwargs = { - "name": input_params.pop("name"), - "search_path": input_params.pop("search_path", "."), - } - kwargs.update(input_params) - res_str = resource_estimate_qasm_program( - source, - param_str, - read_file, - list_directory, - resolve, - fetch_github, - **kwargs, - ) - res = json.loads(res_str) - return res - - def _execute(self, programs: List[Compilation], **input_params) -> Dict: - exec_results = [ - (program, self._estimate_qasm(program.qasm, **input_params)) - for program in programs - ] - success = ( - all( - "status" in res and res["status"] == "success" - for (_, res) in exec_results - ) - and len(exec_results) > 0 - ) - result_dict = { - "results": [res for (_, res) in exec_results], - "qobj_id": str(uuid4()), - "success": success, - } - - return result_dict - - def _create_results(self, output: Dict[str, Any]) -> EstimatorResult: - return EstimatorResult(output["results"][0]) - - def _submit_job(self, run_input: List[QuantumCircuit], **options) -> ReJob: - job_id = str(uuid4()) - executor: Executor = options.pop("executor", DetaultExecutor()) - job = ReJob(self, job_id, self.run_job, run_input, options, executor) - job.submit() - return job diff --git a/source/pip/qsharp/interop/qiskit/execution/__init__.py b/source/pip/qsharp/interop/qiskit/execution/__init__.py deleted file mode 100644 index 3bef3d637f..0000000000 --- a/source/pip/qsharp/interop/qiskit/execution/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from .default import DetaultExecutor diff --git a/source/pip/qsharp/interop/qiskit/execution/default.py b/source/pip/qsharp/interop/qiskit/execution/default.py deleted file mode 100644 index 4eece33ecf..0000000000 --- a/source/pip/qsharp/interop/qiskit/execution/default.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - - -from concurrent.futures import ThreadPoolExecutor - - -class DetaultExecutor(ThreadPoolExecutor): - def __init__(self) -> None: - super().__init__(max_workers=1) diff --git a/source/pip/qsharp/interop/qiskit/jobs/__init__.py b/source/pip/qsharp/interop/qiskit/jobs/__init__.py deleted file mode 100644 index a70db02097..0000000000 --- a/source/pip/qsharp/interop/qiskit/jobs/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from .qsjob import QsJob, QsSimJob, ReJob -from .qsjobset import QsJobSet diff --git a/source/pip/qsharp/interop/qiskit/jobs/qsjob.py b/source/pip/qsharp/interop/qiskit/jobs/qsjob.py deleted file mode 100644 index 23c3f27281..0000000000 --- a/source/pip/qsharp/interop/qiskit/jobs/qsjob.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from abc import ABC, abstractmethod -from concurrent.futures import Executor, Future -import logging -from time import monotonic -from typing import Callable, Dict, Optional, Any - -from qiskit.providers import BackendV2 -from qiskit.circuit import ( - QuantumCircuit, -) - -from qiskit.result import Result -from qiskit.providers import JobV1, JobStatus, JobError - -from ..execution import DetaultExecutor -from .... import telemetry_events -from ....estimator import EstimatorResult - -logger = logging.getLogger(__name__) - -RunInputCallable = Callable[[QuantumCircuit, str, Dict[str, Any]], Result] - - -class QsJob(JobV1, ABC): - """ - Abstract base class for Q# Qiskit jobs. - - Manages asynchronous execution of a quantum circuit via a callable submitted - to a thread pool. Concrete subclasses must implement :meth:`result` and - :meth:`_submit_duration`. - """ - - def __init__( - self, - backend: Optional[BackendV2], - job_id: str, - job_callable: RunInputCallable, - run_input: QuantumCircuit, - input_params: Dict[str, Any], - executor=None, - **kwargs, - ) -> None: - """ - :param backend: The backend on which the job is run. - :param job_id: A unique identifier for the job. - :type job_id: str - :param job_callable: The callable that executes the circuit and returns a result. - :param run_input: The quantum circuit to execute. - :param input_params: Parameters forwarded to ``job_callable`` at execution time. - :type input_params: Dict - :param executor: Thread pool executor. Uses a default single-threaded executor if - not provided. - :param **kwargs: Additional keyword arguments passed to ``qiskit.providers.JobV1``. - """ - - self._run_input = run_input - self._input_params = input_params - self._future: Optional[Future] = None - self._executor: Executor = executor or DetaultExecutor() - self._job_callable = job_callable - self._status = JobStatus.INITIALIZING - self._submit_start_time: Optional[float] = None - super().__init__(backend, job_id, **kwargs) - - def submit(self): - """Submit the job to the backend for execution. - - :raises JobError: If trying to re-submit the job. - """ - if self._future is not None: - raise JobError("Job has already been submitted.") - - self._submit_start_time = monotonic() - self._future = self._executor.submit( - self._job_callable, self._run_input, self.job_id(), **self._input_params - ) - self.add_done_callback(self._submit_duration) - - @abstractmethod - def result(self, timeout: Optional[float] = None) -> Any: - pass - - @abstractmethod - def _submit_duration(self, _future: Future): - pass - - def _result(self, timeout: Optional[float] = None) -> Any: - """Return the results of the job.""" - if self._future is None: - raise JobError("Job has not been submitted.") - - return self._future.result(timeout=timeout) - - def status(self) -> JobStatus: - """Return the status of the job, among the values of ``JobStatus``.""" - if self._future is None: - return JobStatus.INITIALIZING - if self._future.cancelled(): - return JobStatus.CANCELLED - if self._future.done(): - if self._future.exception() is None: - return JobStatus.DONE - else: - return JobStatus.ERROR - if self._future.running(): - return JobStatus.RUNNING - - return JobStatus.INITIALIZING - - def backend(self) -> BackendV2: - """Return the backend where this job was executed.""" - - return super().backend() - - def cancel(self): - """Attempt to cancel the job.""" - if self._future is not None: - self._future.cancel() - - def error(self) -> Optional[JobError]: - """Return the error that occurred during the execution of the job.""" - if self._future is not None: - return self._future.exception() - - def add_done_callback(self, fn: Callable[[Future[Result]], object]) -> None: - """Attaches a callable that will be called when the job finishes.""" - if self._future is not None: - self._future.add_done_callback(fn) - - -class QsSimJob(QsJob): - """ - A Qiskit job that runs a quantum circuit on the Q# simulator. - - Submits the circuit for simulation and returns a ``qiskit.result.Result`` - containing shot-level measurement outcomes. - """ - - def result(self, timeout: Optional[float] = None) -> Result: - return self._result(timeout=timeout) - - def submit(self): - """Submit the job to the backend for execution. - - :raises JobError: If trying to re-submit the job. - """ - shots = self._input_params.get("shots", -1) - telemetry_events.on_qiskit_run(shots, 1) - - super().submit() - - def _submit_duration(self, _future: Future): - end_time = monotonic() - # _submit_start_time is set in submit() before adding this callback - assert self._submit_start_time is not None - duration_in_sec = end_time - self._submit_start_time - duration_in_ms = duration_in_sec * 1000 - - shots = self._input_params.get("shots", -1) - telemetry_events.on_qiskit_run_end(shots, 1, duration_in_ms) - - -class ReJob(QsJob): - """ - A Qiskit job that runs the Q# Resource Estimator. - - Submits the circuit to the resource estimator and returns an - :class:`~qsharp.estimator.EstimatorResult` with the computed resource estimates. - """ - - def result(self, timeout: Optional[float] = None) -> EstimatorResult: - return self._result(timeout=timeout) - - def submit(self): - """Submit the job to the backend for execution. - - :raises JobError: If trying to re-submit the job. - """ - - telemetry_events.on_qiskit_run_re() - - super().submit() - - def _submit_duration(self, _future: Future): - end_time = monotonic() - # _submit_start_time is set in submit() before adding this callback - assert self._submit_start_time is not None - duration_in_sec = end_time - self._submit_start_time - duration_in_ms = duration_in_sec * 1000 - - telemetry_events.on_qiskit_run_re_end(duration_in_ms) diff --git a/source/pip/qsharp/interop/qiskit/jobs/qsjobset.py b/source/pip/qsharp/interop/qiskit/jobs/qsjobset.py deleted file mode 100644 index 0a20908079..0000000000 --- a/source/pip/qsharp/interop/qiskit/jobs/qsjobset.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - - -from concurrent.futures import Executor, Future -import datetime -from time import monotonic -import logging -from typing import Dict, List, Optional, Any -from uuid import uuid4 - - -from qiskit.circuit import QuantumCircuit -from qiskit.providers import JobV1 as Job -from qiskit.providers import BackendV2, JobStatus, JobError -from qiskit.result.result import Result, ExperimentResult - - -from .qsjob import QsSimJob, RunInputCallable -from ..execution import DetaultExecutor -from .... import telemetry_events - -logger = logging.getLogger(__name__) - - -class QsJobSet(Job): - """ - A Qiskit job set that runs multiple quantum circuits concurrently. - - Each circuit in ``run_input`` is submitted as an individual - :class:`~qsharp.interop.qiskit.jobs.qsjob.QsSimJob` and executed in a thread pool. - :meth:`result` blocks until all constituent - jobs are complete and aggregates their ``qiskit.result.Result`` objects into a single - combined result. - """ - - def __init__( - self, - backend: Optional[BackendV2], - job_id: str, - job_callable: RunInputCallable, - run_input: List[QuantumCircuit], - input_params: Dict[str, Any], - executor=None, - **kwargs, - ) -> None: - super().__init__(backend, job_id, **kwargs) - - self._run_input: List[QuantumCircuit] = run_input - self._input_params: Dict[str, Any] = input_params - self._jobs: List[QsSimJob] = [] - self._job_indexes: List[int] = [] - self._executor: Executor = executor or DetaultExecutor() - self._job_callable = job_callable - self._start_time: Optional[float] = None - self._end_time: Optional[float] = None - - def submit(self): - """Submit the job to the backend for execution. - - :raises JobError: If trying to re-submit the job. - """ - if len(self._jobs) > 0: - raise JobError("Jobs have already been submitted.") - self._start_time = monotonic() - shots = self._input_params.get("shots", -1) - telemetry_events.on_qiskit_run(shots, len(self._run_input)) - job_index = 0 - for circuit in self._run_input: - job_id = str(uuid4()) - job = QsSimJob( - self._backend, - job_id, - self._job_callable, - [circuit], - self._input_params, - self._executor, - ) - self._job_indexes.append(job_index) - job.submit() - job.add_done_callback(self._job_done) - - self._jobs.append(job) - - def _job_done(self, _future: Future): - self._end_time = monotonic() - if all(job.in_final_state() for job in self._jobs): - # all jobs are done, so we can log the telemetry event - shots = self._input_params.get("shots", -1) - # _start_time is set in submit() before adding this callback - assert self._start_time is not None - duration_in_ms = (self._end_time - self._start_time) * 1000 - num_circuits = len(self._run_input) - telemetry_events.on_qiskit_run_end(shots, num_circuits, duration_in_ms) - - def cancel(self): - """Attempt to cancel the job.""" - for future in self._jobs: - future.cancel() - - def status(self) -> JobStatus: - """Return the status of the job, among the values of ``JobStatus``.""" - if all(job.in_final_state() for job in self._jobs): - if any(job.status() == JobStatus.ERROR for job in self._jobs): - return JobStatus.ERROR - elif any(job.status() == JobStatus.CANCELLED for job in self._jobs): - return JobStatus.CANCELLED - assert all(job.status() == JobStatus.DONE for job in self._jobs) - return JobStatus.DONE - else: - if any(job.status() == JobStatus.RUNNING for job in self._jobs): - return JobStatus.RUNNING - if any(job.status() == JobStatus.QUEUED for job in self._jobs): - return JobStatus.QUEUED - return JobStatus.INITIALIZING - - def result(self, timeout: Optional[float] = None) -> Result: - results: List[Result] = [] - for job in self._jobs: - results.append(job.result(timeout=timeout)) - - if len(results) == 1: - return results[0] - - output = results[0].to_dict() - - output["job_id"] = self.job_id() - output["date"] = str(datetime.datetime.now().isoformat()) - output["backend_name"] = self.backend().name - output["backend_version"] = self.backend().backend_version - - # Times are set in submit() and _job_done() which must be called before result() - assert self._start_time is not None - assert self._end_time is not None - duration = self._end_time - self._start_time - output["time_taken"] = str(duration) - output["header"] = { - "metadata": {}, - } - output["qobj_id"] = str(uuid4()) - output["success"] = all(result.success for result in results) - agg_result: List[ExperimentResult] = [] - for result in results: - # The results of an experiment should not be empty - assert result.results is not None - for experiment_result in result.results: - agg_result.append(experiment_result.to_dict()) - output["results"] = agg_result - output = Result.from_dict(output) - return output diff --git a/source/pip/qsharp/interop/qiskit/passes/__init__.py b/source/pip/qsharp/interop/qiskit/passes/__init__.py deleted file mode 100644 index bc095ca4ba..0000000000 --- a/source/pip/qsharp/interop/qiskit/passes/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from .remove_delay import RemoveDelays diff --git a/source/pip/qsharp/interop/qiskit/passes/remove_delay.py b/source/pip/qsharp/interop/qiskit/passes/remove_delay.py deleted file mode 100644 index 65e8902d38..0000000000 --- a/source/pip/qsharp/interop/qiskit/passes/remove_delay.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - - -from qiskit.dagcircuit import DAGCircuit -from qiskit.transpiler.basepasses import TransformationPass -from qiskit.transpiler.passes.utils import control_flow - - -class RemoveDelays(TransformationPass): - """Return a circuit with any delay removed. - - This transformation is not semantics preserving. - """ - - @control_flow.trivial_recurse - def run(self, dag: DAGCircuit) -> DAGCircuit: - """Run the RemoveDelays pass on `dag`.""" - - dag.remove_all_ops_named("delay") - - return dag diff --git a/source/pip/qsharp/noisy_simulator/__init__.py b/source/pip/qsharp/noisy_simulator/__init__.py index c150ec9a45..ab8c6901e7 100644 --- a/source/pip/qsharp/noisy_simulator/__init__.py +++ b/source/pip/qsharp/noisy_simulator/__init__.py @@ -1,18 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from ._noisy_simulator import ( - NoisySimulatorError, - Operation, - Instrument, - DensityMatrixSimulator, - StateVectorSimulator, -) - -__all__ = [ - "NoisySimulatorError", - "Operation", - "Instrument", - "DensityMatrixSimulator", - "StateVectorSimulator", -] +# Deprecated: use qdk.noisy_simulator instead. +from qdk.noisy_simulator import * # noqa: F401,F403 diff --git a/source/pip/qsharp/noisy_simulator/_noisy_simulator.py b/source/pip/qsharp/noisy_simulator/_noisy_simulator.py deleted file mode 100644 index c640d14c85..0000000000 --- a/source/pip/qsharp/noisy_simulator/_noisy_simulator.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from .._native import ( - NoisySimulatorError, - Operation, - Instrument, - DensityMatrixSimulator, - StateVectorSimulator, -) diff --git a/source/pip/qsharp/noisy_simulator/_noisy_simulator.pyi b/source/pip/qsharp/noisy_simulator/_noisy_simulator.pyi deleted file mode 100644 index cd740324b6..0000000000 --- a/source/pip/qsharp/noisy_simulator/_noisy_simulator.pyi +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from typing import Optional, List, Any - -class NoisySimulatorError(BaseException): - """ - EXPERIMENTAL: - - An error returned from the Q# noisy simulator. - """ - - ... - -class Operation: - """ - EXPERIMENTAL: - - This struct represents a quantum operation. A quantum operation is a linear - transformation that maps a valid density matrix to another valid density matrices. - """ - - def __init__(self, kraus_operators: Any) -> None: - """ - Construct an operation from a list of Kraus operators. - Matrices must be of dimension 2^k x 2^k, where k is an integer. - - :param kraus_operators: List of Kraus operators. Each operator is a 2D matrix stored as - a list of lists of complex numbers, or a numpy array. - :type kraus_operators: List[List[List[complex]]] - :raises NoisySimulatorError: If the Kraus matrices are ill formed. - """ - ... - - def get_effect_matrix(self) -> List[List[complex]]: - r""" - Returns effect matrix: - $$ (\sum_i K_i^{\dagger} K_i) $$ - where $K_i$ are Kraus operators. - """ - ... - - def get_operation_matrix(self) -> List[List[complex]]: - r""" - Return matrix representation: - $$ \sum_i K_i \otimes K_{i}* $$ - where $K_i$ are Kraus operators. - """ - ... - - def get_kraus_operators(self) -> List[List[List[complex]]]: - """ - Return list of Kraus operators. - """ - ... - - def get_number_of_qubits(self) -> int: - """ - Return the number of qubits that the operation acts on. - """ - -class Instrument: - """ - EXPERIMENTAL: - - An instrument is the means by which we make measurements on a quantum system. - """ - - def __init__(self, operations: List[Operation]) -> None: - """ - Constructs an instrument from a list of operations. - """ - ... - -class DensityMatrix: - """ - EXPERIMENTAL: - - A square complex matrix of size 2^k x 2^k representing the state - of a quantum system. The data is stored in a linear vector for - performance reasons. - """ - - def data(self) -> List[List[complex]]: - """ - Returns a copy of the matrix data. - """ - ... - - def dimension(self) -> int: - """ - Returns the dimension of the matrix. E.g.: if the matrix is - 5 x 5, it returns 5. - """ - ... - - def number_of_qubits(self) -> int: - """ - Returns the number of qubits in the system. - """ - ... - -class DensityMatrixSimulator: - """ - EXPERIMENTAL: - - A quantum circuit simulator using a density matrix. - - If the simulator reaches an invalid state due to a numerical - error, it will raise a `SimulatorException`. - """ - - def __init__(self, number_of_qubits: int, seed: Optional[int]) -> None: - """ - Creates a new `DensityMatrixSimulator`. - """ - ... - - def apply_operation(self, operation: Operation, qubits: List[int]) -> None: - """ - Apply an operation to the given qubit ids. - """ - ... - - def apply_instrument(self, instrument: Instrument, qubits: List[int]) -> None: - """ - Apply non selective evolution to the given qubit ids. - """ - ... - - def sample_instrument(self, instrument: Instrument, qubits: List[int]) -> int: - """ - Performs selective evolution under the given instrument. - Returns the index of the observed outcome. - - Use this method to perform measurements on the quantum system. - """ - - def get_state(self) -> Optional[DensityMatrix]: - """ - Returns the `DensityMatrix` if the simulator is in a valid state, - otherwise returns None. - """ - ... - - def set_state(self, state: DensityMatrix) -> None: - """ - Set state of the quantum system to another `DensityMatrix` of the - same dimensions. - """ - ... - - def set_trace(self, trace: float) -> None: - """ - Set trace of the quantum system. That is, the probability of - finding the quantum system in the current state. The new trace - must be a number between 0 and 1. - """ - ... - -class StateVector: - """ - EXPERIMENTAL: - - A vector representing a pure state of a quantum system. - """ - - def data(self) -> List[complex]: - """ - Returns a copy of the vector data. - """ - ... - - def dimension(self) -> int: - """ - Returns the dimension of the vector. - """ - ... - - def number_of_qubits(self) -> int: - """ - Returns the number of qubits in the system. - """ - ... - -class StateVectorSimulator: - """ - EXPERIMENTAL: - - A quantum circuit simulator using a density matrix. - - If the simulator reaches an invalid state due to a numerical - error, it will raise a `SimulatorException`. - """ - - def __init__(self, number_of_qubits: int, seed: Optional[int]) -> None: - """ - Creates a new `DensityMatrixSimulator`. - """ - ... - - def apply_operation(self, operation: Operation, qubits: List[int]) -> None: - """ - Apply an operation to the given qubit ids. - """ - ... - - def apply_instrument(self, instrument: Instrument, qubits: List[int]) -> None: - """ - Apply non selective evolution to the given qubit ids. - """ - ... - - def sample_instrument(self, instrument: Instrument, qubits: List[int]) -> int: - """ - Performs selective evolution under the given instrument. - Returns the index of the observed outcome. - - Use this method to perform measurements on the quantum system. - """ - - def get_state(self) -> Optional[StateVector]: - """ - Returns the `StateVector` if the simulator is in a valid state, - otherwise returns None. - """ - ... - - def set_state(self, state: StateVector) -> None: - """ - Set state of the quantum system to another `StateVector` of the - same dimensions. - """ - ... - - def set_trace(self, trace: float) -> None: - """ - Set trace of the quantum system. That is, the probability of - finding the quantum system in the current state. The new trace - must be a number between 0 and 1. - """ - ... diff --git a/source/pip/qsharp/openqasm/__init__.py b/source/pip/qsharp/openqasm/__init__.py index 5e9d3757a0..b188ee05a5 100644 --- a/source/pip/qsharp/openqasm/__init__.py +++ b/source/pip/qsharp/openqasm/__init__.py @@ -1,20 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from ._circuit import circuit -from ._compile import compile -from ._estimate import estimate -from ._import import import_openqasm -from ._run import run -from .._native import ProgramType, OutputSemantics, QasmError # type: ignore - -__all__ = [ - "circuit", - "compile", - "estimate", - "import_openqasm", - "run", - "ProgramType", - "OutputSemantics", - "QasmError", -] +# Deprecated: use qdk.openqasm instead. +from qdk.openqasm import * # noqa: F401,F403 diff --git a/source/pip/qsharp/openqasm/_circuit.py b/source/pip/qsharp/openqasm/_circuit.py deleted file mode 100644 index eaed78cba8..0000000000 --- a/source/pip/qsharp/openqasm/_circuit.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from time import monotonic -from typing import Any, Callable, Dict, Optional, Union -from .._fs import read_file, list_directory, resolve -from .._http import fetch_github -from .._native import circuit_qasm_program # type: ignore -from .._qsharp import ( - get_interpreter, - ipython_helper, - Circuit, - CircuitConfig, - python_args_to_interpreter_args, -) -from .. import telemetry_events - - -def circuit( - source: Optional[Union[str, Callable]] = None, - *args, - **kwargs: Any, -) -> Circuit: - """ - Synthesizes a circuit for an OpenQASM program. Either a program string or - an operation must be provided. - - :param source: An OpenQASM program. Alternatively, a callable can be provided, - which must be an already imported global callable. - :type source: str or Callable - - :param *args: The arguments to pass to the callable, if one is provided. - - :keyword generation_method: The method to use for circuit generation. - :attr:`~qsharp.CircuitGenerationMethod.ClassicalEval` evaluates classical - control flow at circuit generation time. - :attr:`~qsharp.CircuitGenerationMethod.Simulate` runs a full simulation to - trace the circuit. - :attr:`~qsharp.CircuitGenerationMethod.Static` uses partial evaluation and - requires a non-``Unrestricted`` target profile. Defaults to ``None`` which - auto-selects the generation method. - :kwtype generation_method: :class:`~qsharp.CircuitGenerationMethod` - - :keyword max_operations: The maximum number of operations to include in the circuit. - Defaults to ``None`` which means no limit. - :kwtype max_operations: int - - :keyword source_locations: If ``True``, annotates each gate with its source location. - Defaults to ``False``. - :kwtype source_locations: bool - - :keyword group_by_scope: If ``True``, groups operations by their containing scope, such as function declarations or loop blocks. - Defaults to ``True``. - :kwtype group_by_scope: bool - - :keyword prune_classical_qubits: If ``True``, removes qubits that are never used in a quantum - gate (e.g. qubits only used as classical controls). Defaults to ``False``. - :kwtype prune_classical_qubits: bool - - :keyword name: The name of the program. This is used as the entry point for the program. - :kwtype name: str - - :keyword search_path: The optional search path for resolving file references. - :kwtype search_path: str - - :return: The synthesized circuit. - :rtype: :class:`~qsharp._native.Circuit` - :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. - :raises QSharpError: If there is an error evaluating or synthesizing the circuit. - """ - - ipython_helper() - start = monotonic() - telemetry_events.on_circuit_qasm() - - max_operations = kwargs.pop("max_operations", None) - generation_method = kwargs.pop("generation_method", None) - source_locations = kwargs.pop("source_locations", False) - group_by_scope = kwargs.pop("group_by_scope", True) - prune_classical_qubits = kwargs.pop("prune_classical_qubits", False) - config = CircuitConfig( - max_operations=max_operations, - generation_method=generation_method, - source_locations=source_locations, - group_by_scope=group_by_scope, - prune_classical_qubits=prune_classical_qubits, - ) - - if isinstance(source, Callable) and hasattr(source, "__global_callable"): - args = python_args_to_interpreter_args(args) - res = get_interpreter().circuit( - config, callable=source.__global_callable, args=args - ) - else: - # remove any entries from kwargs with a None key or None value - kwargs = {k: v for k, v in kwargs.items() if k is not None and v is not None} - - if "search_path" not in kwargs: - kwargs["search_path"] = "." - - res = circuit_qasm_program( - source, - config, - read_file, - list_directory, - resolve, - fetch_github, - **kwargs, - ) - - durationMs = (monotonic() - start) * 1000 - telemetry_events.on_circuit_qasm_end(durationMs) - - return res diff --git a/source/pip/qsharp/openqasm/_compile.py b/source/pip/qsharp/openqasm/_compile.py deleted file mode 100644 index 8f34963eb1..0000000000 --- a/source/pip/qsharp/openqasm/_compile.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from time import monotonic -from typing import Any, Callable, Dict, Optional, Union -from .._fs import read_file, list_directory, resolve -from .._http import fetch_github - -from .._native import ( # type: ignore - compile_qasm_program_to_qir, -) -from .._qsharp import ( - QirInputData, - get_interpreter, - ipython_helper, - TargetProfile, - python_args_to_interpreter_args, -) -from .. import telemetry_events - - -def compile( - source: Union[str, Callable], - *args: Any, - **kwargs: Any, -) -> QirInputData: - """ - Compiles the OpenQASM source code into a program that can be submitted to a - target as QIR (Quantum Intermediate Representation). - Either a full program or a callable with arguments must be provided. - - :param source: An OpenQASM program. Alternatively, a callable can be provided, - which must be an already imported global callable. - :type source: str or Callable - :param *args: The arguments to pass to the callable, if one is provided. - :param **kwargs: Additional keyword arguments for compiling the source program. Common options: - - - ``name`` (str): The name of the circuit. This is used as the entry point for the program. - - ``target_profile`` (TargetProfile): The target profile to use for code generation. - - ``search_path`` (str): The optional search path for resolving file references. - - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. - :return: The compiled program. Use ``str()`` to get the QIR string. - :rtype: QirInputData - :raises ValueError: If ``source`` is neither a string nor a callable with a - ``__global_callable`` attribute. - :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. - :raises QSharpError: If there is an error compiling the program. - - Example: - - .. code-block:: python - from qsharp.openqasm import compile - source = ... - program = compile(source) - with open('myfile.ll', 'w') as file: - file.write(str(program)) - """ - - ipython_helper() - start = monotonic() - - # This doesn't work the same way as the Q# compile function as it doesn't - # have access to the global configuration which has the target profile. - # Instead, we get the target profile from the kwargs and pass it to the telemetry event. - target_profile = str(kwargs.get("target_profile", "unspecified")) - - telemetry_events.on_compile_qasm(target_profile) - - if isinstance(source, Callable) and hasattr(source, "__global_callable"): - args = python_args_to_interpreter_args(args) - ll_str = get_interpreter().qir( - entry_expr=None, callable=source.__global_callable, args=args - ) - elif isinstance(source, str): - # remove any entries from kwargs with a None key or None value - kwargs = {k: v for k, v in kwargs.items() if k is not None and v is not None} - - if "search_path" not in kwargs: - kwargs["search_path"] = "." - if "target_profile" not in kwargs: - kwargs["target_profile"] = TargetProfile.Base - - ll_str = compile_qasm_program_to_qir( - source, - read_file, - list_directory, - resolve, - fetch_github, - **kwargs, - ) - else: - raise ValueError( - "source must be a string or a callable with __global_callable attribute" - ) - res = QirInputData("main", ll_str) - - durationMs = (monotonic() - start) * 1000 - telemetry_events.on_compile_qasm_end(durationMs, target_profile) - - return res diff --git a/source/pip/qsharp/openqasm/_estimate.py b/source/pip/qsharp/openqasm/_estimate.py deleted file mode 100644 index 755f61eaec..0000000000 --- a/source/pip/qsharp/openqasm/_estimate.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import json -from time import monotonic -from typing import Any, Callable, Dict, List, Optional, Union, cast -from .._fs import read_file, list_directory, resolve -from .._http import fetch_github -from .._native import ( # type: ignore - resource_estimate_qasm_program, -) -from ..estimator import EstimatorParams, EstimatorResult - -from .._qsharp import ( - get_interpreter, - ipython_helper, - python_args_to_interpreter_args, -) -from .. import telemetry_events - - -def estimate( - source: Union[str, Callable], - params: Optional[Union[Dict[str, Any], List, EstimatorParams]] = None, - *args: Any, - **kwargs: Any, -) -> EstimatorResult: - """ - Estimates the resource requirements for executing OpenQASM source code. - Either a full program or a callable with arguments must be provided. - - :param source: An OpenQASM program. Alternatively, a callable can be provided, - which must be an already imported global callable. - :type source: str or Callable - :param params: The parameters to configure estimation. - :type params: Dict, List, or EstimatorParams - :param *args: The arguments to pass to the callable, if one is provided. - :param **kwargs: Additional keyword arguments. Common options: - - - ``name`` (str): The name of the circuit. This is used as the entry point for the program. - Defaults to ``'program'``. - - ``search_path`` (str): The optional search path for resolving imports. - :return: The estimated resources. - :rtype: EstimatorResult - :raises ValueError: If ``source`` is neither a string nor a callable with a - ``__global_callable`` attribute. - :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. - :raises QSharpError: If there is an error compiling the program. - """ - - ipython_helper() - - def _coerce_estimator_params( - params: Optional[ - Union[Dict[str, Any], List[Dict[str, Any]], EstimatorParams] - ] = None, - ) -> List[Dict[str, Any]]: - if params is None: - return [{}] - elif isinstance(params, EstimatorParams): - if params.has_items: - return cast(List[Dict[str, Any]], params.as_dict()["items"]) - else: - return [params.as_dict()] - elif isinstance(params, dict): - return [params] - return params - - params = _coerce_estimator_params(params) - param_str = json.dumps(params) - telemetry_events.on_estimate_qasm() - start = monotonic() - if isinstance(source, Callable) and hasattr(source, "__global_callable"): - args = python_args_to_interpreter_args(args) - res_str = get_interpreter().estimate( - param_str, entry_expr=None, callable=source.__global_callable, args=args - ) - elif isinstance(source, str): - # remove any entries from kwargs with a None key or None value - kwargs = {k: v for k, v in kwargs.items() if k is not None and v is not None} - - if "search_path" not in kwargs: - kwargs["search_path"] = "." - - res_str = resource_estimate_qasm_program( - source, - param_str, - read_file, - list_directory, - resolve, - fetch_github, - **kwargs, - ) - else: - raise ValueError( - "source must be a string or a callable with __global_callable attribute" - ) - res = json.loads(res_str) - - try: - qubits = res[0]["logicalCounts"]["numQubits"] - except (KeyError, IndexError): - qubits = "unknown" - - durationMs = (monotonic() - start) * 1000 - telemetry_events.on_estimate_qasm_end(durationMs, qubits) - return EstimatorResult(res) diff --git a/source/pip/qsharp/openqasm/_import.py b/source/pip/qsharp/openqasm/_import.py deleted file mode 100644 index e616ee0d39..0000000000 --- a/source/pip/qsharp/openqasm/_import.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from time import monotonic -from typing import Any - -from ._ipython import display_or_print -from .._fs import read_file, list_directory, resolve -from .._http import fetch_github -from .._qsharp import ( - get_interpreter, - ipython_helper, -) -from .. import telemetry_events - - -def import_openqasm( - source: str, - **kwargs: Any, -) -> Any: - """ - Imports OpenQASM source code into the active QDK interpreter. By default, import uses ``ProgramType.Operation`` - such that the source becomes a Q# operation in the global namespace with parameters for any declared classical - inputs and parameters for each of the declared qubits, while any explicit or implicit output declarations become - the return type of the operation. - Alternatively, specifying ``ProgramType.File`` will treat the input source as a stand-alone program and create - an operation in the ``qasm_import`` namespace that only takes classical parameters, allocates the required qubits - internally and releases them at the end of the operation. - Finally, using ``ProgramType.Fragments`` executes the provided source in the current interactive interpreter, - defining any declared variables or operations in the current scope and returning the value of the last statement - in the source. - - :param source: An OpenQASM program or fragment. - :type source: str - :param **kwargs: Additional keyword arguments. Common options: - - - ``name`` (str): The name of the program. This is used as the entry point for the program. - - ``search_path`` (str): The optional search path for resolving file references. - - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. - - ``program_type`` (ProgramType): The type of program compilation to perform. - Defaults to ``ProgramType.Operation``. - :return: The value returned by the last statement in the source code. - :rtype: Any - :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. - :raises QSharpError: If there is an error compiling the program. - """ - - ipython_helper() - - telemetry_events.on_import_qasm() - start_time = monotonic() - - # remove any entries from kwargs with a None key or None value - kwargs = {k: v for k, v in kwargs.items() if k is not None and v is not None} - - if "search_path" not in kwargs: - kwargs["search_path"] = "." - - res = get_interpreter().import_qasm( - source, - display_or_print, - read_file, - list_directory, - resolve, - fetch_github, - **kwargs, - ) - - durationMs = (monotonic() - start_time) * 1000 - telemetry_events.on_import_qasm_end(durationMs) - - return res diff --git a/source/pip/qsharp/openqasm/_ipython.py b/source/pip/qsharp/openqasm/_ipython.py deleted file mode 100644 index 32a11bf82b..0000000000 --- a/source/pip/qsharp/openqasm/_ipython.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from .._native import Output # type: ignore - -_in_jupyter = False -try: - from IPython.display import display - - if get_ipython().__class__.__name__ == "ZMQInteractiveShell": # type: ignore - _in_jupyter = True # Jupyter notebook or qtconsole -except: - pass - - -def display_or_print(output: Output) -> None: - if _in_jupyter: - try: - display(output) - return - except: - # If IPython is not available, fall back to printing the output - pass - print(output, flush=True) diff --git a/source/pip/qsharp/openqasm/_run.py b/source/pip/qsharp/openqasm/_run.py deleted file mode 100644 index 1b82cb41ff..0000000000 --- a/source/pip/qsharp/openqasm/_run.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from time import monotonic -from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union -from .._fs import read_file, list_directory, resolve -from .._http import fetch_github -from .._native import QasmError, Output, run_qasm_program # type: ignore -from .._qsharp import ( - BitFlipNoise, - DepolarizingNoise, - PauliNoise, - PhaseFlipNoise, - ShotResult, - StateDump, - StateDumpData, - get_interpreter, - ipython_helper, - python_args_to_interpreter_args, - NoiseConfig, -) -from .. import telemetry_events -from ._ipython import display_or_print - - -def run( - source: Union[str, Callable], - shots: int = 1024, - *args: Any, - on_result: Optional[Callable[[ShotResult], None]] = None, - save_events: bool = False, - noise: Optional[ - Union[ - Tuple[float, float, float], - PauliNoise, - BitFlipNoise, - PhaseFlipNoise, - DepolarizingNoise, - NoiseConfig, - ] - ] = None, - qubit_loss: Optional[float] = None, - as_bitstring: bool = False, - **kwargs: Any, -) -> List[Any]: - """ - Runs the given OpenQASM program for the given number of shots. - Either a full program or a callable with arguments must be provided. - Each shot uses an independent instance of the simulator. - - :param source: An OpenQASM program. Alternatively, a callable can be provided, - which must be an already imported global callable. - :type source: str or Callable - :param shots: The number of shots to run. Defaults to ``1024``. - :type shots: int - :param *args: The arguments to pass to the callable, if one is provided. - :param on_result: A callback function that will be called with each result. - Only used when a callable is provided. - :type on_result: Callable - :param save_events: If true, the output of each shot will be saved. If false, they will be printed. - Only used when a callable is provided. - :type save_events: bool - :param noise: The noise to use in simulation. - :type noise: Union[Tuple[float, float, float], PauliNoise, BitFlipNoise, PhaseFlipNoise, DepolarizingNoise, NoiseConfig] - :param qubit_loss: The probability of qubit loss in simulation. - :type qubit_loss: float - :param as_bitstring: If true, the result registers will be converted to bitstrings. - :type as_bitstring: bool - :param **kwargs: Additional keyword arguments for compiling the source program. Common options: - - - ``name`` (str): The name of the circuit. This is used as the entry point for the program. - - ``target_profile`` (TargetProfile): The target profile to use for code generation. - - ``search_path`` (str): The optional search path for resolving file references. - - ``output_semantics`` (OutputSemantics): The output semantics for the compilation. - - ``seed`` (int): The seed to use for the random number generator. - :return: A list of results or runtime errors. If ``save_events`` is true, a list of ``ShotResult`` values is returned. - :rtype: List[Any] - :raises QasmError: If there is an error generating, parsing, or analyzing the OpenQASM source. - :raises QSharpError: If there is an error interpreting the input. - :raises ValueError: If the number of shots is less than 1. - :raises QasmError: If ``on_result`` or ``save_events`` are used when running OpenQASM programs. - """ - - ipython_helper() - - if shots < 1: - raise ValueError("The number of shots must be greater than 0.") - - telemetry_events.on_run_qasm( - shots, noise=noise is not None, qubit_loss=qubit_loss is not None - ) - start_time = monotonic() - - results: List[ShotResult] = [] - - def on_save_events(output: Output) -> None: - # Append the output to the last shot's output list - results[-1]["events"].append(output) - if output.is_matrix(): - results[-1]["matrices"].append(output) - elif output.is_state_dump(): - dump_data = cast(StateDumpData, output.state_dump()) - results[-1]["dumps"].append(StateDump(dump_data)) - elif output.is_message(): - results[-1]["messages"].append(str(output)) - - callable = None - source_str: Optional[str] = None - if isinstance(source, Callable) and hasattr(source, "__global_callable"): - args = python_args_to_interpreter_args(args) - callable = source.__global_callable - elif isinstance(source, str): - source_str = source - - noise_config = None - if isinstance(noise, NoiseConfig): - noise_config = noise - noise = None - - if callable: - for _ in range(shots): - results.append( - { - "result": None, - "events": [], - "matrices": [], - "dumps": [], - "messages": [], - } - ) - run_results = get_interpreter().run( - source_str, - on_save_events if save_events else display_or_print, - noise_config, - noise, - qubit_loss=qubit_loss, - callable=callable, - args=args, - ) - results[-1]["result"] = run_results - - if on_result: - on_result(results[-1]) - - if not save_events: - # If we are not saving events, we can just return the results - # as a list of results. - results = [result["result"] for result in results] - else: - # running the QASM program in isolation means we can't use the - # interpreter to run the program, so we can't cache the compilation - # results. This means we need to compile the program for each - # shot, or we push the shots into the QASM program and compile it once. - # - # This breaks the output streaming and event saving. - if on_result or save_events: - raise QasmError( - "The `on_result` and `save_events` parameters are not supported when running QASM programs." - ) - - if source_str is None: - raise QasmError( - "source must be a string or a callable with __global_callable attribute" - ) - - # remove any entries from kwargs with a None key or None value - kwargs = {k: v for k, v in kwargs.items() if k is not None and v is not None} - - if "search_path" not in kwargs: - kwargs["search_path"] = "." - - kwargs["shots"] = shots - - results = run_qasm_program( - source_str, - display_or_print, - noise_config, - noise, - qubit_loss, - read_file, - list_directory, - resolve, - fetch_github, - **kwargs, - ) - - durationMs = (monotonic() - start_time) * 1000 - telemetry_events.on_run_qasm_end(durationMs, shots) - - if as_bitstring: - from ._utils import as_bitstring as convert_to_bitstring - - results = convert_to_bitstring(results) - - return results diff --git a/source/pip/qsharp/openqasm/_utils.py b/source/pip/qsharp/openqasm/_utils.py deleted file mode 100644 index 11bde66b8d..0000000000 --- a/source/pip/qsharp/openqasm/_utils.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from .. import Result - - -def _map_qsharp_result_to_bit(v) -> str: - if isinstance(v, Result): - if v == Result.One: - return "1" - else: - return "0" - return str(v) - - -def _convert_result_arrays_to_bitstrings(obj): - if isinstance(obj, tuple): - return tuple([_convert_result_arrays_to_bitstrings(term) for term in obj]) - elif isinstance(obj, list): - # if all elements are Q# results, convert to bitstring - if all([isinstance(bit, Result) for bit in obj]): - return "".join([_map_qsharp_result_to_bit(bit) for bit in obj]) - return [_convert_result_arrays_to_bitstrings(bit) for bit in obj] - elif isinstance(obj, Result): - if obj == Result.One: - return 1 - else: - return 0 - else: - return obj - - -def as_bitstring(obj): - """ - Convert Q# results to bitstrings. - - :param obj: The object to convert. - :return: The converted object. - """ - return _convert_result_arrays_to_bitstrings(obj) diff --git a/source/pip/qsharp/qre/__init__.py b/source/pip/qsharp/qre/__init__.py index 0dbe8d4a9d..1c2ed76824 100644 --- a/source/pip/qsharp/qre/__init__.py +++ b/source/pip/qsharp/qre/__init__.py @@ -1,86 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from ._application import Application -from ._architecture import Architecture, ISAContext -from ._estimation import estimate -from ._instruction import ( - LOGICAL, - PHYSICAL, - Encoding, - ISATransform, - constraint, - InstructionSource, -) -from ._isa_enumeration import ISAQuery, ISARefNode, ISA_ROOT -from ._qre import ( - ISA, - InstructionFrontier, - Constraint, - ConstraintBound, - EstimationResult, - FactoryResult, - ISARequirements, - Block, - Trace, - block_linear_function, - constant_function, - generic_function, - linear_function, - instruction_name, - property_name, - property_name_to_key, -) -from ._results import ( - EstimationTable, - EstimationTableColumn, - EstimationTableEntry, - plot_estimates, -) -from ._trace import LatticeSurgery, PSSPC, TraceQuery, TraceTransform - -# Extend Rust Python types with additional Python-side functionality -from ._instruction import _isa_as_frame, _requirements_as_frame - -ISA.as_frame = _isa_as_frame -ISARequirements.as_frame = _requirements_as_frame - -__all__ = [ - "block_linear_function", - "constant_function", - "constraint", - "estimate", - "linear_function", - "plot_estimates", - "Application", - "Architecture", - "Block", - "Constraint", - "ConstraintBound", - "Encoding", - "EstimationResult", - "EstimationTable", - "EstimationTableColumn", - "EstimationTableEntry", - "FactoryResult", - "generic_function", - "instruction_name", - "InstructionFrontier", - "InstructionSource", - "ISA", - "ISA_ROOT", - "ISAContext", - "ISAQuery", - "ISARefNode", - "ISARequirements", - "ISATransform", - "LatticeSurgery", - "PSSPC", - "property_name", - "property_name_to_key", - "Trace", - "TraceQuery", - "TraceTransform", - "LOGICAL", - "PHYSICAL", -] +# Deprecated: use qdk.qre instead. +from qdk.qre import * # noqa: F401,F403 diff --git a/source/pip/qsharp/qre/_application.py b/source/pip/qsharp/qre/_application.py deleted file mode 100644 index 6c20621b2b..0000000000 --- a/source/pip/qsharp/qre/_application.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations - -import types -from abc import ABC, abstractmethod -from concurrent.futures import ThreadPoolExecutor -from types import NoneType -from typing import ( - ClassVar, - Generic, - Protocol, - TypeVar, - Generator, - get_type_hints, - cast, -) - -from ._enumeration import _enumerate_instances -from ._qre import Trace, EstimationResult -from ._trace import TraceQuery - - -class DataclassProtocol(Protocol): - __dataclass_fields__: ClassVar[dict] - - -TraceParameters = TypeVar("TraceParameters", DataclassProtocol, types.NoneType) - - -class Application(ABC, Generic[TraceParameters]): - """ - An application defines a class of quantum computation problems along with a - method to generate traces for specific problem instances. - - We distinguish between application and trace parameters. The application - parameters define which particular instance of the application we want to - consider. The trace parameters define how to generate a trace. They change - the specific way in which we solve the problem, but not the problem itself. - - For example, in quantum cryptanalysis, the application parameters could - define the key size for an RSA prime product, while the trace parameters - define which algorithm to use to break the cryptography, as well as - parameters therein. - """ - - _parallel_traces: bool = True - - @abstractmethod - def get_trace(self, parameters: TraceParameters) -> Trace: - """Return the trace corresponding to this application and parameters. - - Args: - parameters (TraceParameters): The trace parameters. - - Returns: - Trace: The trace for this application instance and parameters. - """ - - @staticmethod - def q(**kwargs) -> TraceQuery: - """Create a trace query for this application. - - Args: - **kwargs: Domain overrides forwarded to trace parameter enumeration. - - Returns: - TraceQuery: A trace query for this application type. - """ - return TraceQuery(NoneType, **kwargs) - - def context(self) -> _Context: - """Create a new enumeration context for this application.""" - return _Context(self) - - def post_process( - self, parameters: TraceParameters, estimation: EstimationResult - ) -> EstimationResult: - """Post-process an estimation result for a given set of trace parameters.""" - return estimation - - def enumerate_traces( - self, - **kwargs, - ) -> Generator[Trace, None, None]: - """Yield all traces of an application given its dataclass parameters. - - Args: - **kwargs: Domain overrides forwarded to ``_enumerate_instances``. - - Yields: - Trace: A trace for each enumerated set of trace parameters. - """ - - param_type = get_type_hints(self.__class__.get_trace).get("parameters") - if param_type is types.NoneType: - yield self.get_trace(None) # type: ignore - return - - if isinstance(param_type, TypeVar): - for c in param_type.__constraints__: - if c is not types.NoneType: - param_type = c - break - - if self._parallel_traces: - instances = list(_enumerate_instances(cast(type, param_type), **kwargs)) - with ThreadPoolExecutor() as executor: - for trace in executor.map(self.get_trace, instances): - yield trace - else: - for instances in _enumerate_instances(cast(type, param_type), **kwargs): - yield self.get_trace(instances) - - def enumerate_traces_with_parameters( - self, - **kwargs, - ) -> Generator[tuple[TraceParameters, Trace], None, None]: - """Yield (parameters, trace) pairs for an application. - - Like ``enumerate_traces``, but each yielded trace is accompanied by the - trace parameters that were used to generate it. - - Args: - **kwargs: Domain overrides forwarded to ``_enumerate_instances``. - - Yields: - tuple[TraceParameters, Trace]: A pair of trace parameters and - the corresponding trace. - """ - - param_type = get_type_hints(self.__class__.get_trace).get("parameters") - if param_type is types.NoneType: - yield None, self.get_trace(None) # type: ignore - return - - if isinstance(param_type, TypeVar): - for c in param_type.__constraints__: - if c is not types.NoneType: - param_type = c - break - - if self._parallel_traces: - instances = list(_enumerate_instances(cast(type, param_type), **kwargs)) - with ThreadPoolExecutor() as executor: - for instance, trace in zip( - instances, executor.map(self.get_trace, instances) - ): - yield instance, trace - else: - for instance in _enumerate_instances(cast(type, param_type), **kwargs): - yield instance, self.get_trace(instance) - - def disable_parallel_traces(self): - """Disable parallel trace generation for this application.""" - self._parallel_traces = False - - -class _Context: - """Enumeration context wrapping an application instance.""" - - application: Application - - def __init__(self, application: Application, **kwargs): - """Initialize the context for the given application. - - Args: - application (Application): The application instance. - **kwargs: Additional keyword arguments (reserved for future use). - """ - self.application = application diff --git a/source/pip/qsharp/qre/_architecture.py b/source/pip/qsharp/qre/_architecture.py deleted file mode 100644 index cd8bb52e64..0000000000 --- a/source/pip/qsharp/qre/_architecture.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations -import copy -from typing import cast, TYPE_CHECKING - -from abc import ABC, abstractmethod - -from ._qre import ( - ISA, - _ProvenanceGraph, - Instruction, - _IntFunction, - _FloatFunction, - constant_function, - property_name_to_key, -) - -if TYPE_CHECKING: - from typing import Optional - - from ._instruction import ISATransform, Encoding - - -class Architecture(ABC): - """Abstract base class for quantum hardware architectures.""" - - @abstractmethod - def provided_isa(self, ctx: ISAContext) -> ISA: - """ - Create the ISA provided by this architecture, adding instructions - directly to the context's provenance graph. - - Args: - ctx (ISAContext): The enumeration context whose provenance graph stores - the instructions. - - Returns: - ISA: The ISA backed by the context's provenance graph. - """ - ... - - def context(self) -> ISAContext: - """Create a new enumeration context for this architecture. - - Returns: - ISAContext: A new enumeration context. - """ - return ISAContext(self) - - -class ISAContext: - """ - Context passed through enumeration, holding shared state. - """ - - def __init__(self, arch: Architecture): - """Initialize the ISA context for the given architecture. - - Args: - arch (Architecture): The architecture providing the base ISA. - """ - self._provenance: _ProvenanceGraph = _ProvenanceGraph() - - # Let the architecture create instructions directly in the graph. - self._isa = arch.provided_isa(self) - - self._bindings: dict[str, ISA] = {} - self._transforms: dict[int, Architecture | ISATransform] = {0: arch} - - def _with_binding(self, name: str, isa: ISA) -> ISAContext: - """Return a new context with an additional binding (internal use).""" - ctx = copy.copy(self) - ctx._bindings = {**self._bindings, name: isa} - return ctx - - @property - def isa(self) -> ISA: - """The ISA provided by the architecture for this context.""" - return self._isa - - def add_instruction( - self, - id_or_instruction: int | Instruction, - encoding: Encoding = 0, # type: ignore - *, - arity: Optional[int] = 1, - time: int | _IntFunction = 0, - space: Optional[int] | _IntFunction = None, - length: Optional[int | _IntFunction] = None, - error_rate: float | _FloatFunction = 0.0, - transform: ISATransform | None = None, - source: list[Instruction] | None = None, - **kwargs: int, - ) -> int: - """ - Create an instruction and add it to the provenance graph. - - Can be called in two ways: - - 1. With keyword args to create a new instruction:: - - ctx.add_instruction(T, encoding=LOGICAL, time=1000, - error_rate=1e-8) - - 2. With a pre-existing ``Instruction`` object (e.g. from - ``with_id()``):: - - ctx.add_instruction(existing_instruction) - - Provenance is recorded when *transform* and/or *source* are - supplied: - - - **transform** — the ``ISATransform`` that produced the - instruction. - - **source** — input instructions consumed by the transform. - - Args: - id_or_instruction: Either an instruction ID (int) for creating - a new instruction, or an existing ``Instruction`` object. - encoding: The instruction encoding (0 = Physical, 1 = Logical). - Ignored when passing an existing ``Instruction``. - arity: The instruction arity. ``None`` for variable arity. - Ignored when passing an existing ``Instruction``. - time: Instruction time in ns (or ``_IntFunction`` for variable - arity). Ignored when passing an existing ``Instruction``. - space: Instruction space in physical qubits (or ``_IntFunction`` - for variable arity). Ignored when passing an existing - ``Instruction``. - length: Arity including ancilla qubits. Ignored when passing an - existing ``Instruction``. - error_rate: Instruction error rate (or ``_FloatFunction`` for - variable arity). Ignored when passing an existing - ``Instruction``. - transform: The ``ISATransform`` that produced the instruction. - source: List of source ``Instruction`` objects consumed by the - transform. - **kwargs: Additional properties (e.g. ``distance=9``). Ignored - when passing an existing ``Instruction``. - - Returns: - The node index in the provenance graph. - - Raises: - ValueError: If an unknown property name is provided in kwargs. - """ - if transform is None and source is None: - return self._provenance.add_instruction( - cast(int, id_or_instruction), - encoding, - arity=arity, - time=time, - space=space, - length=length, - error_rate=error_rate, - **kwargs, - ) - - if isinstance(id_or_instruction, Instruction): - instr = id_or_instruction - else: - instr = _make_instruction( - id_or_instruction, - int(encoding), - arity, - time, - space, - length, - error_rate, - kwargs, - ) - - transform_id = id(transform) if transform is not None else 0 - children = [inst.source for inst in source] if source else [] - - node_index = self._provenance.add_node(instr, transform_id, children) - - if transform is not None: - self._transforms[transform_id] = transform - - return node_index - - def make_isa(self, *node_indices: int) -> ISA: - """ - Create an ISA backed by this context's provenance graph from the - given node indices. - - Args: - *node_indices (int): Node indices in the provenance graph. - - Returns: - ISA: An ISA referencing the provenance graph. - """ - return self._provenance.make_isa(list(node_indices)) - - -def _make_instruction( - id: int, - encoding: int, - arity: int | None, - time: int | _IntFunction, - space: int | _IntFunction | None, - length: int | _IntFunction | None, - error_rate: float | _FloatFunction, - properties: dict[str, int], -) -> Instruction: - """Build an ``Instruction`` from keyword arguments.""" - if arity is not None: - instr = Instruction.fixed_arity( - id, - encoding, - arity, - cast(int, time), - cast(int | None, space), - cast(int | None, length), - cast(float, error_rate), - ) - else: - if isinstance(time, int): - time = constant_function(time) - if isinstance(space, int): - space = constant_function(space) - if isinstance(length, int): - length = constant_function(length) - if isinstance(error_rate, (int, float)): - error_rate = constant_function(float(error_rate)) - - instr = Instruction.variable_arity( - id, - encoding, - time, - cast(_IntFunction, space), - error_rate, - length, - ) - - for key, value in properties.items(): - prop_key = property_name_to_key(key) - if prop_key is None: - raise ValueError(f"Unknown property '{key}'.") - instr.set_property(prop_key, value) - - return instr diff --git a/source/pip/qsharp/qre/_enumeration.py b/source/pip/qsharp/qre/_enumeration.py deleted file mode 100644 index b01d706944..0000000000 --- a/source/pip/qsharp/qre/_enumeration.py +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import types -from typing import ( - Generator, - Type, - TypeVar, - Literal, - Union, - cast, - get_args, - get_origin, - get_type_hints, -) -from dataclasses import MISSING -from itertools import product -from enum import Enum - - -T = TypeVar("T") - - -def _is_union_type(tp) -> bool: - """Check if a type is a Union or Python 3.10+ union (X | Y).""" - return get_origin(tp) is Union or isinstance(tp, types.UnionType) - - -def _is_type_filter(val, union_members: tuple) -> bool: - """ - Check if *val* is a union member type or a list of union member types, - i.e. a type filter for a union field (as opposed to a fixed value or - instance domain). - """ - member_set = set(union_members) - if isinstance(val, type) and val in member_set: - return True - if isinstance(val, list) and all( - isinstance(v, type) and v in member_set for v in val - ): - return True - return False - - -def _is_union_constraint_dict(val) -> bool: - """ - Check if *val* is a dict whose keys are all types, i.e. a per-member - constraint mapping for a union field. - - Example: ``{OptionA: {"number": [2, 3]}, OptionB: {}}`` - """ - return isinstance(val, dict) and all(isinstance(k, type) for k in val) - - -def _enumerate_union_members( - union_members: tuple, - val=None, -) -> list: - """ - Enumerate instances for a union-typed field. - - *val* controls which members are enumerated and how: - - - ``None`` - enumerate all members with their default domains. - - A single type (e.g. ``OptionB``) - enumerate only that member. - - A list of types (e.g. ``[OptionA, OptionB]``) - enumerate those members. - - A dict mapping types to constraint dicts - (e.g. ``{OptionA: {"number": [2, 3]}, OptionB: {}}``) - - enumerate only the listed members, forwarding the constraint dicts. - """ - # No override - enumerate all members with defaults - if val is None: - domain: list = [] - for member_type in union_members: - domain.extend(_enumerate_instances(member_type)) - return domain - - # Single type - if isinstance(val, type): - return list(_enumerate_instances(val)) - - # List of types - if isinstance(val, list) and all(isinstance(v, type) for v in val): - domain = [] - for member_type in val: - domain.extend(_enumerate_instances(member_type)) - return domain - - # Dict of type → constraint dict - if _is_union_constraint_dict(val): - domain = [] - for member_type, member_kwargs in cast(dict, val).items(): - domain.extend(_enumerate_instances(member_type, **member_kwargs)) - return domain - - raise ValueError( - f"Invalid value for union field: {val!r}. " - "Expected a union member type, a list of types, or a dict mapping " - "types to constraint dicts." - ) - - -def _enumerate_instances(cls: Type[T], **kwargs) -> Generator[T, None, None]: - """ - Yield all instances of a dataclass given its class. - - The enumeration logic supports defining domains for fields using the - ``domain`` metadata key. Additionally, boolean fields are automatically - enumerated with ``[True, False]``, Enum fields with all their members, - and Literal types with their defined values. - - **Nested dataclass fields** can be constrained by passing a dict:: - - _enumerate_instances(Outer, inner={"option": True}) - - **Union-typed fields** support several override forms: - - - A single type to select one member:: - - _enumerate_instances(Config, option=OptionB) - - - A list of types to select a subset:: - - _enumerate_instances(Config, option=[OptionA, OptionB]) - - - A dict mapping types to constraint dicts:: - - _enumerate_instances(Config, option={OptionA: {"number": [2, 3]}, OptionB: {}}) - - Args: - cls (Type[T]): The dataclass type to enumerate. - **kwargs: Fixed values or domains for fields. If a value is a list - and the corresponding field is kw_only, it is treated as a domain - to enumerate over. For nested dataclass fields a ``dict`` value - is forwarded as keyword arguments. For union-typed fields a type, - list of types, or ``dict[type, dict]`` controls member selection - and constraints. - - Returns: - Generator[T, None, None]: A generator yielding instances of the - dataclass. - - Raises: - ValueError: If a field cannot be enumerated (no domain found). - """ - - names = [] - values = [] - fixed_kwargs = {} - - if (fields := getattr(cls, "__dataclass_fields__", None)) is None: - # There are no fields defined for this class, so just yield a single - # instance - yield cls(**kwargs) - return - - # Resolve type hints to handle stringified types from __future__.annotations - type_hints = get_type_hints(cls) - - for field in fields.values(): # type: ignore - name = field.name - # Get resolved type or fallback to field.type - current_type = type_hints.get(name, field.type) - - if name in kwargs: - val = kwargs[name] - - is_union = _is_union_type(current_type) - union_members = get_args(current_type) if is_union else () - - # Union field with a type filter or constraint dict - if is_union and ( - _is_type_filter(val, union_members) or _is_union_constraint_dict(val) - ): - names.append(name) - values.append(_enumerate_union_members(union_members, val)) - continue - - # Nested dataclass field with a dict of constraints - if ( - isinstance(val, dict) - and not is_union - and isinstance(current_type, type) - and hasattr(current_type, "__dataclass_fields__") - ): - names.append(name) - values.append(list(_enumerate_instances(current_type, **val))) - continue - - # If kw_only and list, it's a domain to enumerate - if field.kw_only and isinstance(val, list): - names.append(name) - values.append(val) - else: - # Otherwise, it's a fixed value - fixed_kwargs[name] = val - continue - - if not field.kw_only: - # We don't enumerate non-kw-only fields that aren't in kwargs - continue - - # Derived domain logic - names.append(name) - - domain = field.metadata.get("domain", None) - if domain is not None: - values.append(domain) - continue - - if current_type is bool: - values.append([True, False]) - continue - - if isinstance(current_type, type) and issubclass(current_type, Enum): - values.append(list(current_type)) - continue - - if get_origin(current_type) is Literal: - values.append(list(get_args(current_type))) - continue - - # Union types (e.g., OptionA | OptionB or Union[OptionA, OptionB]) - if _is_union_type(current_type): - values.append(_enumerate_union_members(get_args(current_type), None)) - continue - - # Nested dataclass types - if isinstance(current_type, type) and hasattr( - current_type, "__dataclass_fields__" - ): - values.append(list(_enumerate_instances(current_type))) - continue - - if field.default is not MISSING: - values.append([field.default]) - continue - - raise ValueError(f"Cannot enumerate field {name}.") - - for instance_values in product(*values): - yield cls(**fixed_kwargs, **dict(zip(names, instance_values))) diff --git a/source/pip/qsharp/qre/_estimation.py b/source/pip/qsharp/qre/_estimation.py deleted file mode 100644 index 228e139ede..0000000000 --- a/source/pip/qsharp/qre/_estimation.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations - -from typing import cast, Optional, Any - -from .. import telemetry_events -from ._application import Application -from ._architecture import Architecture -from ._qre import ( - _estimate_parallel, - _estimate_with_graph, - _EstimationCollection, - Trace, -) -from ._trace import TraceQuery, PSSPC, LatticeSurgery -from ._isa_enumeration import ISAQuery -from ._results import EstimationTable, EstimationTableEntry - - -def estimate( - application: Application, - architecture: Architecture, - isa_query: ISAQuery, - trace_query: Optional[TraceQuery] = None, - *, - max_error: float = 1.0, - post_process: bool = False, - use_graph: bool = True, - name: Optional[str] = None, -) -> EstimationTable: - """ - Estimate the resource requirements for a given application instance and - architecture. - - The application instance might return multiple traces. Each of the traces - is transformed by the trace query, which applies several trace transforms in - sequence. Each transform may return multiple traces. Similarly, the - architecture's ISA is transformed by the ISA query, which applies several - ISA transforms in sequence, each of which may return multiple ISAs. The - estimation is performed for each combination of transformed trace and ISA. - The results are collected into an EstimationTable and returned. - - The collection only contains the results that are optimal with respect to - the total number of qubits and the total runtime. - - Note: - The pruning strategy used when ``use_graph`` is set to True (default) - filters ISA instructions by comparing their per-instruction space, time, - and error independently. However, the total qubit count of a result - depends on the interaction between factory space and runtime: - ``factory_qubits = copies × factory_space`` where copies are determined - by ``count.div_ceil(runtime / factory_time)``. Because of this, an ISA - instruction that is dominated on per-instruction metrics can still - contribute to a globally Pareto-optimal result (e.g., a factory with - higher time may need fewer copies, leading to fewer total qubits). As a - consequence, ``use_graph=True`` may miss some results that - ``use_graph=False`` would find. Use ``use_graph=False`` when completeness of - the Pareto frontier is required. - - Args: - application (Application): The quantum application to be estimated. - architecture (Architecture): The target quantum architecture. - isa_query (ISAQuery): The ISA query to enumerate ISAs from the architecture. - trace_query (TraceQuery): The trace query to enumerate traces from the - application. - max_error (float): The maximum allowed error for the estimation results. - post_process (bool): If True, use the Python-threaded estimation path - (intended for future post-processing logic). If False (default), - use the Rust parallel estimation path. - use_graph (bool): If True (default), use the Rust estimation path that - builds a graph of ISAs and prunes suboptimal ISAs during estimation. - If False, use the Rust estimation path that does not perform any - pruning and simply enumerates all ISAs for each trace. - name (Optional[str]): An optional name for the estimation. If given, this - will be added as a first column to the results table for all entries. - - Returns: - EstimationTable: A table containing the optimal estimation results. - """ - - telemetry_events.on_qre_estimate(post_process=post_process, use_graph=use_graph) - - app_ctx = application.context() - arch_ctx = architecture.context() - - if trace_query is None: - trace_query = PSSPC.q() * LatticeSurgery.q() - - if post_process: - # Enumerate traces with their parameters so we can post-process later - params_and_traces = cast( - list[tuple[Any, Trace]], - list(trace_query.enumerate(app_ctx, track_parameters=True)), - ) - num_traces = len(params_and_traces) - - # Phase 1: Run all estimates in Rust (parallel, fast). - traces_only = [trace for _, trace in params_and_traces] - - if use_graph: - isa_query.populate(arch_ctx) - arch_ctx._provenance.build_pareto_index() - - num_isas = arch_ctx._provenance.total_isa_count() - - collection = _estimate_with_graph( - cast(list[Trace], traces_only), arch_ctx._provenance, max_error, True - ) - isas = collection.isas - else: - isas = list(isa_query.enumerate(arch_ctx)) - - num_isas = len(isas) - - collection = _estimate_parallel( - cast(list[Trace], traces_only), isas, max_error, True - ) - - total_jobs = collection.total_jobs - successful = collection.successful_estimates - summaries = collection.all_summaries # (trace_idx, isa_idx, qubits, runtime) - - # Phase 2: Learn per-trace runtime multiplier and qubit multiplier from - # one sample each: if post_process changes runtime or qubit count it - # will affect the Pareto optimality, but the changes depend only on the - # trace, not on the ISA. - trace_multipliers: dict[int, tuple[float, float]] = {} - trace_sample_isa: dict[int, int] = {} - for t_idx, isa_idx, _q, r in summaries: - if t_idx not in trace_sample_isa: - trace_sample_isa[t_idx] = isa_idx - for t_idx, isa_idx in trace_sample_isa.items(): - params, trace = params_and_traces[t_idx] - sample = trace.estimate(isas[isa_idx], max_error) - if sample is not None: - pre_q = sample.qubits - pre_r = sample.runtime - pp = app_ctx.application.post_process(params, sample) - if pp is not None and pre_r > 0 and pre_q > 0: - trace_multipliers[t_idx] = (pp.qubits / pre_q, pp.runtime / pre_r) - - # Phase 3: Estimate post-pp values and filter to Pareto candidates. - estimated_pp: list[tuple[int, int, int, int]] = ( - [] - ) # (t_idx, isa_idx, est_q, est_r) - for t_idx, isa_idx, q, r in summaries: - mult_q, mult_r = trace_multipliers.get(t_idx, (0.0, 0.0)) - est_q = int(q * mult_q) if mult_q > 0 else q - est_r = int(r * mult_r) if mult_r > 0 else r - estimated_pp.append((t_idx, isa_idx, est_q, est_r)) - - # Build approximate post-pp Pareto frontier to identify candidates. - estimated_pp.sort(key=lambda x: (x[2], x[3])) # sort by qubits, then runtime - approx_pareto: list[tuple[int, int, int, int]] = [] - min_r = float("inf") - for item in estimated_pp: - if item[3] < min_r: - approx_pareto.append(item) - min_r = item[3] - - # Phase 4: Re-estimate and post-process only the Pareto candidates. - pp_collection = _EstimationCollection() - for t_idx, isa_idx, _q, _r in approx_pareto: - params, trace = params_and_traces[t_idx] - result = trace.estimate(isas[isa_idx], max_error) - if result is not None: - pp_result = app_ctx.application.post_process(params, result) - if pp_result is not None: - pp_collection.insert(pp_result) - collection = pp_collection - else: - traces = list(trace_query.enumerate(app_ctx)) - num_traces = len(traces) - - if use_graph: - isa_query.populate(arch_ctx) - arch_ctx._provenance.build_pareto_index() - - num_isas = arch_ctx._provenance.total_isa_count() - - collection = _estimate_with_graph( - cast(list[Trace], traces), arch_ctx._provenance, max_error, False - ) - else: - isas = list(isa_query.enumerate(arch_ctx)) - - num_isas = len(isas) - - # Use the Rust parallel estimation path - collection = _estimate_parallel( - cast(list[Trace], traces), isas, max_error, False - ) - - total_jobs = collection.total_jobs - successful = collection.successful_estimates - - # Post-process the results and add them to a results table - table = EstimationTable() - - table.name = name - - if name is not None: - table.insert_column(0, "name", lambda entry: name) - - table.extend( - EstimationTableEntry.from_result(result, arch_ctx) for result in collection - ) - - # Fill in the stats for this estimation run - table.stats.num_traces = num_traces - table.stats.num_isas = num_isas - table.stats.total_jobs = total_jobs - table.stats.successful_estimates = successful - table.stats.pareto_results = len(collection) - - return table diff --git a/source/pip/qsharp/qre/_instruction.py b/source/pip/qsharp/qre/_instruction.py deleted file mode 100644 index 4669a86d4c..0000000000 --- a/source/pip/qsharp/qre/_instruction.py +++ /dev/null @@ -1,473 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations - -from abc import ABC, abstractmethod -from dataclasses import dataclass, field -from typing import Generator, Iterable, Optional -from enum import IntEnum - -import pandas as pd - -from ._architecture import ISAContext, Architecture -from ._enumeration import _enumerate_instances -from ._isa_enumeration import ( - ISA_ROOT, - _BindingNode, - _ComponentQuery, - ISAQuery, -) -from ._qre import ( - ISA, - Constraint, - ConstraintBound, - Instruction, - ISARequirements, - instruction_name, - property_name_to_key, -) - - -class Encoding(IntEnum): - PHYSICAL = 0 - LOGICAL = 1 - - -PHYSICAL = Encoding.PHYSICAL -LOGICAL = Encoding.LOGICAL - - -def constraint( - id: int, - encoding: Encoding = PHYSICAL, - *, - arity: Optional[int] = 1, - error_rate: Optional[ConstraintBound] = None, - **kwargs: bool, -) -> Constraint: - """ - Create an instruction constraint. - - Args: - id (int): The instruction ID. - encoding (Encoding): The instruction encoding. PHYSICAL (0) or LOGICAL (1). - arity (Optional[int]): The instruction arity. If None, instruction is - assumed to have variable arity. Default is 1. - error_rate (Optional[ConstraintBound]): The constraint on the error rate. - **kwargs (bool): Required properties that matching instructions must have. - Valid property names: distance. Set to True to require the property. - - Returns: - Constraint: The instruction constraint. - - Raises: - ValueError: If an unknown property name is provided in kwargs. - """ - c = Constraint(id, encoding, arity, error_rate) - - for key, value in kwargs.items(): - if value: - if (prop_key := property_name_to_key(key)) is None: - raise ValueError(f"Unknown property '{key}'") - - c.add_property(prop_key) - - return c - - -class ISATransform(ABC): - """ - Abstract base class for transformations between ISAs (e.g., QEC schemes). - - An ISA transform defines a mapping from a required input ISA (e.g., - architecture constraints) to a provided output ISA (logical instructions). - It supports enumeration of configuration parameters. - """ - - @staticmethod - @abstractmethod - def required_isa() -> ISARequirements: - """ - Return the requirements that an implementation ISA must satisfy. - - Returns: - ISARequirements: The requirements for the underlying ISA. - """ - ... - - @abstractmethod - def provided_isa( - self, impl_isa: ISA, ctx: ISAContext - ) -> Generator[ISA, None, None]: - """ - Yields ISAs provided by this transform given an implementation ISA. - - Args: - impl_isa (ISA): The implementation ISA that satisfies requirements. - ctx (ISAContext): The enumeration context whose provenance graph - stores the instructions. - - Yields: - ISA: A provided logical ISA. - """ - ... - - @classmethod - def enumerate_isas( - cls, - impl_isa: ISA | Iterable[ISA], - ctx: ISAContext, - **kwargs, - ) -> Generator[ISA, None, None]: - """ - Enumerate all valid ISAs for this transform given implementation ISAs. - - This method iterates over all instances of the transform class (enumerating - hyperparameters) and filters implementation ISAs against requirements. - - Args: - impl_isa (ISA | Iterable[ISA]): One or more implementation ISAs. - ctx (ISAContext): The enumeration context. - **kwargs: Arguments passed to parameter enumeration. - - Yields: - ISA: Valid provided ISAs. - """ - isas = [impl_isa] if isinstance(impl_isa, ISA) else impl_isa - for isa in isas: - if not isa.satisfies(cls.required_isa()): - continue - - for component in _enumerate_instances(cls, **kwargs): - ctx._transforms[id(component)] = component - yield from component.provided_isa(isa, ctx) - - @classmethod - def q(cls, *, source: ISAQuery | None = None, **kwargs) -> ISAQuery: - """ - Create an ISAQuery node for this transform. - - Args: - source (Node | None): The source node providing implementation ISAs. - Defaults to ISA_ROOT. - **kwargs: Fixed values or domains for dataclass fields. Keyword-only - fields with a ``metadata["domain"]`` are enumerated automatically; - passing a value for such a field overrides or restricts the - domain. Non-keyword-only fields passed here are used as fixed - values for all enumerated instances. - - For example, given a transform with a non-keyword-only field - ``threshold`` and a keyword-only field ``distance`` with a - domain, calling ``MyTransform.q(threshold=0.03)`` fixes - ``threshold`` to 0.03 while still enumerating over all values - in the ``distance`` domain. - - Returns: - ISAQuery: An enumeration node representing this transform. - """ - return _ComponentQuery( - cls, source=source if source is not None else ISA_ROOT, kwargs=kwargs - ) - - @classmethod - def bind(cls, name: str, node: ISAQuery) -> _BindingNode: - """ - Create a BindingNode for this transform. - - This is a convenience method equivalent to ``cls.q().bind(name, node)``. - - Args: - name (str): The name to bind the transform's output to. - node (Node): The child node that can reference this binding. - - Returns: - BindingNode: A binding node enclosing this transform. - """ - return cls.q().bind(name, node) - - -@dataclass(slots=True) -class InstructionSource: - nodes: list[_InstructionSourceNode] = field(default_factory=list, init=False) - roots: list[int] = field(default_factory=list, init=False) - - @classmethod - def from_isa(cls, ctx: ISAContext, isa: ISA) -> InstructionSource: - """ - Construct an InstructionSource graph from an ISA. - - The instruction source graph contains more information than the - provenance graph in the context, as it connects the instructions to the - transforms and architectures that generated them. - - Args: - ctx (ISAContext): The enumeration context containing the provenance graph. - isa (ISA): Instructions in the ISA will serve as root nodes in the source graph. - - Returns: - InstructionSource: The instruction source graph for the estimation result. - """ - - def _make_node( - graph: InstructionSource, source_table: dict[int, int], source: int - ) -> int: - if source in source_table: - return source_table[source] - - children = [ - _make_node(graph, source_table, child) - for child in ctx._provenance.children(source) - if child != 0 - ] - - node = graph.add_node( - ctx._provenance.instruction(source), - ctx._transforms.get(ctx._provenance.transform_id(source)), - children, - ) - - source_table[source] = node - return node - - graph = cls() - source_table: dict[int, int] = {} - - for inst in isa: - node_idx = isa.node_index(inst.id) - if node_idx is not None and node_idx != 0: - node = _make_node(graph, source_table, node_idx) - graph.add_root(node) - - return graph - - def add_root(self, node_id: int) -> None: - """Add a root node to the instruction source graph. - - Args: - node_id (int): The index of the node to add as a root. - """ - self.roots.append(node_id) - - def add_node( - self, - instruction: Instruction, - transform: Optional[ISATransform | Architecture], - children: list[int], - ) -> int: - """Add a node to the instruction source graph. - - Args: - instruction (Instruction): The instruction for this node. - transform (Optional[ISATransform | Architecture]): The transform - that produced the instruction. - children (list[int]): Indices of child nodes. - - Returns: - int: The index of the newly added node. - """ - node_id = len(self.nodes) - self.nodes.append(_InstructionSourceNode(instruction, transform, children)) - return node_id - - def __str__(self) -> str: - """Return a formatted string representation of the instruction source graph.""" - - def _format_node(node: _InstructionSourceNode, indent: int = 0) -> str: - result = " " * indent + f"{instruction_name(node.instruction.id) or '??'}" - if node.transform is not None: - result += f" @ {node.transform}" - for child_index in node.children: - result += "\n" + _format_node(self.nodes[child_index], indent + 2) - return result - - return "\n".join( - _format_node(self.nodes[root_index]) for root_index in self.roots - ) - - def __getitem__(self, id: int) -> _InstructionSourceNodeReference: - """ - Retrieve the first instruction source root node with the given - instruction ID. Raises KeyError if no such node exists. - - Args: - id (int): The instruction ID to search for. - - Returns: - _InstructionSourceNodeReference: The first instruction source node with the - given instruction ID. - """ - if (node := self.get(id)) is not None: - return node - - raise KeyError(f"Instruction ID {id} not found in instruction source graph.") - - def __contains__(self, id: int) -> bool: - """ - Check if there is an instruction source root node with the given - instruction ID. - - Args: - id (int): The instruction ID to search for. - - Returns: - bool: True if a node with the given instruction ID exists, False otherwise. - """ - for root in self.roots: - if self.nodes[root].instruction.id == id: - return True - - return False - - def get( - self, id: int, default: Optional[_InstructionSourceNodeReference] = None - ) -> Optional[_InstructionSourceNodeReference]: - """ - Retrieve the first instruction source root node with the given - instruction ID. Returns default if no such node exists. - - Args: - id (int): The instruction ID to search for. - default (Optional[_InstructionSourceNodeReference]): The value to return if no - node with the given ID is found. Default is None. - - Returns: - Optional[_InstructionSourceNodeReference]: The first instruction source node with the - given instruction ID, or default if no such node exists. - """ - for root in self.roots: - if self.nodes[root].instruction.id == id: - return _InstructionSourceNodeReference(self, root) - - return default - - -@dataclass(frozen=True, slots=True) -class _InstructionSourceNode: - """A node in the instruction source graph.""" - - instruction: Instruction - transform: Optional[ISATransform | Architecture] - children: list[int] - - -class _InstructionSourceNodeReference: - """Reference to a node in an InstructionSource graph.""" - - def __init__(self, graph: InstructionSource, node_id: int): - """Initialize a reference to a node in the instruction source graph. - - Args: - graph (InstructionSource): The owning instruction source graph. - node_id (int): The index of the referenced node. - """ - self.graph = graph - self.node_id = node_id - - @property - def instruction(self) -> Instruction: - """The instruction at this node.""" - return self.graph.nodes[self.node_id].instruction - - @property - def transform(self) -> Optional[ISATransform | Architecture]: - """The transform that produced this node's instruction, if any.""" - return self.graph.nodes[self.node_id].transform - - def __str__(self) -> str: - """Return a string representation of the referenced node.""" - return str(self.graph.nodes[self.node_id]) - - def __getitem__(self, id: int) -> _InstructionSourceNodeReference: - """ - Retrieve the first child instruction source node with the given - instruction ID. Raises KeyError if no such node exists. - - Args: - id (int): The instruction ID to search for. - - Returns: - _InstructionSourceNodeReference: The first child instruction source node with the - given instruction ID. - """ - if (node := self.get(id)) is not None: - return node - - raise KeyError( - f"Instruction ID {id} not found in children of instruction {instruction_name(self.instruction.id) or '??'}." - ) - - def get( - self, id: int, default: Optional[_InstructionSourceNodeReference] = None - ) -> Optional[_InstructionSourceNodeReference]: - """ - Retrieve the first child instruction source node with the given - instruction ID. Returns default if no such node exists. - - Args: - id (int): The instruction ID to search for. - default (Optional[_InstructionSourceNodeReference]): The value to return if no - node with the given ID is found. Default is None. - - Returns: - Optional[_InstructionSourceNodeReference]: The first child instruction source - node with the given instruction ID, or default if no such node - exists. - """ - - for child_id in self.graph.nodes[self.node_id].children: - if self.graph.nodes[child_id].instruction.id == id: - return _InstructionSourceNodeReference(self.graph, child_id) - - return default - - -def _isa_as_frame(self: ISA) -> pd.DataFrame: - """Convert an ISA to a pandas DataFrame. - - Args: - self (ISA): The ISA to convert. - - Returns: - pd.DataFrame: A DataFrame with columns for id, encoding, arity, - space, time, and error. - """ - data = { - "id": [instruction_name(inst.id) for inst in self], - "encoding": [Encoding(inst.encoding).name for inst in self], - "arity": [inst.arity for inst in self], - "space": [ - inst.expect_space() if inst.arity is not None else None for inst in self - ], - "time": [ - inst.expect_time() if inst.arity is not None else None for inst in self - ], - "error": [ - inst.expect_error_rate() if inst.arity is not None else None - for inst in self - ], - } - - df = pd.DataFrame(data) - df.set_index("id", inplace=True) - return df - - -def _requirements_as_frame(self: ISARequirements) -> pd.DataFrame: - """Convert ISA requirements to a pandas DataFrame. - - Args: - self (ISARequirements): The requirements to convert. - - Returns: - pd.DataFrame: A DataFrame with columns for id, encoding, and arity. - """ - data = { - "id": [instruction_name(inst.id) for inst in self], - "encoding": [Encoding(inst.encoding).name for inst in self], - "arity": [inst.arity for inst in self], - } - - df = pd.DataFrame(data) - df.set_index("id", inplace=True) - return df diff --git a/source/pip/qsharp/qre/_isa_enumeration.py b/source/pip/qsharp/qre/_isa_enumeration.py deleted file mode 100644 index 7543c071ed..0000000000 --- a/source/pip/qsharp/qre/_isa_enumeration.py +++ /dev/null @@ -1,428 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations - -import functools -import itertools -from abc import ABC, abstractmethod -from dataclasses import dataclass, field -from typing import Generator - -from ._architecture import ISAContext -from ._enumeration import _enumerate_instances -from ._qre import ISA - - -class ISAQuery(ABC): - """ - Abstract base class for all nodes in the ISA enumeration tree. - - Enumeration nodes define the structure of the search space for ISAs starting - from architectures and modified by ISA transforms such as error correction - schemes. They can be composed using operators like ``+`` (sum) and ``*`` - (product) to build complex enumeration strategies. - """ - - @abstractmethod - def enumerate(self, ctx: ISAContext) -> Generator[ISA, None, None]: - """ - Yields all ISA instances represented by this enumeration node. - - Args: - ctx (ISAContext): The enumeration context containing shared state, - e.g., access to the underlying architecture. - - Yields: - ISA: A possible ISA that can be generated from this node. - """ - pass - - def populate(self, ctx: ISAContext) -> int: - """ - Populate the provenance graph with instructions from this node. - - Unlike ``enumerate``, this does not yield ISA objects. Each transform - queries the graph for Pareto-optimal instructions matching its - requirements, and adds produced instructions directly to the graph. - - Args: - ctx (ISAContext): The enumeration context whose provenance graph - will be populated. - - Returns: - int: The starting node index of the instructions contributed by - this subtree. Used by consumers to scope graph queries to only - see their source's nodes. - """ - # Default implementation: consume enumerate for its side effects - start = ctx._provenance.raw_node_count() - for _ in self.enumerate(ctx): - pass - return start - - def __add__(self, other: ISAQuery) -> _SumNode: - """ - Perform a union of two enumeration nodes. - - Enumerating the sum node yields all ISAs from this node, followed by all - ISAs from the other node. Duplicate ISAs may be produced if both nodes - yield the same ISA. - - Args: - other (Node): The other enumeration node. - - Returns: - SumNode: A node representing the union of both enumerations. - - Example: - - The following enumerates ISAs from both SurfaceCode and ColorCode: - - .. code-block:: python - for isa in SurfaceCode.q() + ColorCode.q(): - ... - """ - if isinstance(self, _SumNode) and isinstance(other, _SumNode): - sources = self.sources + other.sources - return _SumNode(sources) - elif isinstance(self, _SumNode): - sources = self.sources + [other] - return _SumNode(sources) - elif isinstance(other, _SumNode): - sources = [self] + other.sources - return _SumNode(sources) - else: - return _SumNode([self, other]) - - def __mul__(self, other: ISAQuery) -> _ProductNode: - """ - Perform the cross product of two enumeration nodes. - - Enumerating the product node yields ISAs resulting from the Cartesian - product of ISAs from both nodes. The ISAs are combined using - concatenation (logical union). This means that instructions in the - other enumeration node with the same ID as an instruction in this - enumeration node will overwrite the instruction from this node. - - Args: - other (Node): The other enumeration node. - - Returns: - ProductNode: A node representing the product of both enumerations. - - Example: - - The following enumerates ISAs formed by combining ISAs from a - surface code and a factory: - - .. code-block:: python - - for isa in SurfaceCode.q() * Factory.q(): - ... - """ - if isinstance(self, _ProductNode) and isinstance(other, _ProductNode): - sources = self.sources + other.sources - return _ProductNode(sources) - elif isinstance(self, _ProductNode): - sources = self.sources + [other] - return _ProductNode(sources) - elif isinstance(other, _ProductNode): - sources = [self] + other.sources - return _ProductNode(sources) - else: - return _ProductNode([self, other]) - - def bind(self, name: str, node: ISAQuery) -> "_BindingNode": - """Create a BindingNode with this node as the component. - - Args: - name: The name to bind the component to. - node: The child enumeration node that may contain ISARefNodes. - - Returns: - A BindingNode with self as the component. - - Example: - - .. code-block:: python - ExampleErrorCorrection.q().bind("c", ISARefNode("c") * ISARefNode("c")) - """ - return _BindingNode(name=name, component=self, node=node) - - -@dataclass -class RootNode(ISAQuery): - """ - Represents the architecture's base ISA. - Reads from the context instead of holding a reference. - """ - - def enumerate(self, ctx: ISAContext) -> Generator[ISA, None, None]: - """ - Yields the architecture ISA from the context. - - Args: - ctx (Context): The enumeration context. - - Yields: - ISA: The architecture's provided ISA, called root. - """ - yield ctx._isa - - def populate(self, ctx: ISAContext) -> int: - """Architecture ISA is already in the graph from ``ISAContext.__init__``. - - Returns: - int: 1, since architecture nodes start at index 1. - """ - return 1 - - -# Singleton instance for convenience -ISA_ROOT = RootNode() - - -@dataclass -class _ComponentQuery(ISAQuery): - """ - Query node that enumerates ISAs based on a component type and source. - - This node takes a component type (which must have an ``enumerate_isas`` class - method) and a source node. It enumerates the source node to get base ISAs, - and then calls ``enumerate_isas`` on the component type for each base ISA - to generate derived ISAs. - - Attributes: - component: The component type to query (e.g., a QEC code class). - source: The source node providing input ISAs (default: ISA_ROOT). - kwargs: Additional keyword arguments passed to ``enumerate_isas``. - """ - - component: type - source: ISAQuery = field(default_factory=lambda: ISA_ROOT) - kwargs: dict = field(default_factory=dict) - - def enumerate(self, ctx: ISAContext) -> Generator[ISA, None, None]: - """ - Yields ISAs generated by the component from source ISAs. - - Args: - ctx (Context): The enumeration context. - - Yields: - ISA: A generated ISA instance. - """ - for isa in self.source.enumerate(ctx): - yield from self.component.enumerate_isas(isa, ctx, **self.kwargs) - - def populate(self, ctx: ISAContext) -> int: - """ - Populate the graph by querying matching instructions. - - Runs the source first to ensure dependency instructions are in - the graph, then queries the graph for all instructions matching - this component's requirements within the source's node range. - For each matching ISA × each hyperparameter instance, calls - ``provided_isa`` to add new instructions to the graph. - - Returns: - int: The starting node index of this component's own additions. - """ - source_start = self.source.populate(ctx) - impl_isas = ctx._provenance.query_satisfying( - self.component.required_isa(), min_node_idx=source_start - ) - own_start = ctx._provenance.raw_node_count() - for instance in _enumerate_instances(self.component, **self.kwargs): - ctx._transforms[id(instance)] = instance - for impl_isa in impl_isas: - for _ in instance.provided_isa(impl_isa, ctx): - pass - return own_start - - -@dataclass -class _ProductNode(ISAQuery): - """ - Node representing the Cartesian product of multiple source nodes. - - Attributes: - sources: A list of source nodes to combine. - """ - - sources: list[ISAQuery] - - def enumerate(self, ctx: ISAContext) -> Generator[ISA, None, None]: - """ - Yields ISAs formed by combining ISAs from all source nodes. - - Args: - ctx (Context): The enumeration context. - - Yields: - ISA: A combined ISA instance. - """ - source_generators = [source.enumerate(ctx) for source in self.sources] - yield from ( - functools.reduce(lambda a, b: a + b, isa_tuple) - for isa_tuple in itertools.product(*source_generators) - ) - - def populate(self, ctx: ISAContext) -> int: - """Populate the graph from each source sequentially (no cross product). - - Returns: - int: The starting node index before any source populated. - """ - first = ctx._provenance.raw_node_count() - for source in self.sources: - source.populate(ctx) - return first - - -@dataclass -class _SumNode(ISAQuery): - """ - Node representing the union of multiple source nodes. - - Attributes: - sources: A list of source nodes to enumerate sequentially. - """ - - sources: list[ISAQuery] - - def enumerate(self, ctx: ISAContext) -> Generator[ISA, None, None]: - """ - Yields ISAs from each source node in sequence. - - Args: - ctx (Context): The enumeration context. - - Yields: - ISA: An ISA instance from one of the sources. - """ - for source in self.sources: - yield from source.enumerate(ctx) - - def populate(self, ctx: ISAContext) -> int: - """Populate the graph from each source sequentially. - - Returns: - int: The starting node index before any source populated. - """ - first = ctx._provenance.raw_node_count() - for source in self.sources: - source.populate(ctx) - return first - - -@dataclass -class ISARefNode(ISAQuery): - """ - A reference to a bound ISA in the enumeration context. - - This node looks up the binding from the context and yields the bound ISA. - - Args: - name: The name of the bound ISA to reference. - """ - - name: str - - def enumerate(self, ctx: ISAContext) -> Generator[ISA, None, None]: - """ - Yields the bound ISA from the context. - - Args: - ctx (Context): The enumeration context containing bindings. - - Yields: - ISA: The bound ISA. - - Raises: - ValueError: If the name is not bound in the context. - """ - if self.name not in ctx._bindings: - raise ValueError(f"Undefined component reference: '{self.name}'") - yield ctx._bindings[self.name] - - def populate(self, ctx: ISAContext) -> int: - """Instructions already in graph from the bound component. - - Returns: - int: 1, since bound component nodes start at index 1. - """ - return 1 - - -@dataclass -class _BindingNode(ISAQuery): - """ - Enumeration node that binds a component to a name. - - This node enables the as_/ref pattern where multiple positions in the - enumeration tree share the same component instance. The bound component - is enumerated once, and its value is shared across all ISARefNodes with - the same name via the context. - - For multiple bindings, nest BindingNode instances. - - Args: - name: The name to bind the component to. - component: An EnumerationNode (e.g., _ComponentQuery) that produces the bound ISAs. - node: The child enumeration node that may contain ISARefNodes. - - Example: - - .. code-block:: python - ctx = EnumerationContext(architecture=arch) - - # Bind a code and reference it multiple times - BindingNode( - name="c", - component=ExampleErrorCorrection.q(), - node=ISARefNode("c") * ISARefNode("c"), - ).enumerate(ctx) - - # Multiple bindings via nesting - BindingNode( - name="c", - component=ExampleErrorCorrection.q(), - node=BindingNode( - name="f", - component=ExampleFactory.q(source=ISARefNode("c")), - node=ISARefNode("c") * ISARefNode("f"), - ), - ).enumerate(ctx) - """ - - name: str - component: ISAQuery - node: ISAQuery - - def enumerate(self, ctx: ISAContext) -> Generator[ISA, None, None]: - """ - Enumerate child nodes with the bound component in context. - - Args: - ctx (Context): The enumeration context. - - Yields: - ISA: An ISA instance from the child node. - """ - # Enumerate all ISAs from the component node - for isa in self.component.enumerate(ctx): - # Add binding to context and enumerate child node - new_ctx = ctx._with_binding(self.name, isa) - yield from self.node.enumerate(new_ctx) - - def populate(self, ctx: ISAContext) -> int: - """Populate the graph from both the component and the child node. - - Returns: - int: The starting node index of the component's additions. - """ - comp_start = self.component.populate(ctx) - self.node.populate(ctx) - return comp_start diff --git a/source/pip/qsharp/qre/_qre.py b/source/pip/qsharp/qre/_qre.py deleted file mode 100644 index 2d1aaa7aa5..0000000000 --- a/source/pip/qsharp/qre/_qre.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -# flake8: noqa E402 -# pyright: reportAttributeAccessIssue=false - -from .._native import ( - _binom_ppf, - block_linear_function, - Block, - constant_function, - Constraint, - ConstraintBound, - _estimate_parallel, - _estimate_with_graph, - _EstimationCollection, - EstimationResult, - FactoryResult, - _FloatFunction, - generic_function, - instruction_name, - Instruction, - InstructionFrontier, - _IntFunction, - ISA, - ISARequirements, - _ProvenanceGraph, - linear_function, - LatticeSurgery, - PSSPC, - Trace, - property_name_to_key, - property_name, - _float_to_bits, - _float_from_bits, -) diff --git a/source/pip/qsharp/qre/_qre.pyi b/source/pip/qsharp/qre/_qre.pyi deleted file mode 100644 index 370bd2c886..0000000000 --- a/source/pip/qsharp/qre/_qre.pyi +++ /dev/null @@ -1,1679 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations -from typing import Any, Callable, Iterator, Optional, overload - -import pandas as pd - -class ISA: - def __add__(self, other: ISA) -> ISA: - """ - Concatenate two ISAs (logical union). Instructions in the second - operand overwrite instructions in the first operand if they have the - same ID. - """ - ... - - def __contains__(self, id: int) -> bool: - """ - Check if the ISA contains an instruction with the given ID. - - Args: - id (int): The instruction ID. - - Returns: - bool: True if the ISA contains an instruction with the given ID, False otherwise. - """ - ... - - def satisfies(self, requirements: ISARequirements) -> bool: - """ - Check if the ISA satisfies the given ISA requirements. - """ - ... - - def __getitem__(self, id: int) -> Instruction: - """ - Get an instruction by its ID. - - Args: - id (int): The instruction ID. - - Returns: - Instruction: The instruction. - """ - ... - - def get( - self, id: int, default: Optional[Instruction] = None - ) -> Optional[Instruction]: - """ - Get an instruction by its ID, or return a default value if not found. - - Args: - id (int): The instruction ID. - default (Optional[Instruction]): The default value to return if the - instruction is not found. - - Returns: - Optional[Instruction]: The instruction, or the default value if not found. - """ - ... - - def __len__(self) -> int: - """ - Return the number of instructions in the ISA. - - Returns: - int: The number of instructions. - """ - ... - - def node_index(self, id: int) -> Optional[int]: - """ - Return the provenance graph node index for the given instruction ID. - - Args: - id (int): The instruction ID. - - Returns: - Optional[int]: The node index, or None if not found. - """ - ... - - def add_node(self, instruction_id: int, node_index: int) -> None: - """ - Add a pre-existing provenance graph node to the ISA. - - Args: - instruction_id (int): The instruction ID. - node_index (int): The node index in the provenance graph. - """ - ... - - def as_frame(self) -> pd.DataFrame: - """ - Return a pandas DataFrame representation of the ISA. - - The DataFrame will have one row per instruction, with columns for - instruction properties such as time, space, and error rate. The exact - columns may vary based on the properties of the instructions in the ISA. - - Returns: - pd.DataFrame: A DataFrame representation of the ISA. - """ - ... - - def __iter__(self) -> Iterator[Instruction]: - """ - Return an iterator over the instructions. - - Note: - The order of instructions is not guaranteed. - - Returns: - Iterator[Instruction]: The instruction iterator. - """ - ... - - def __str__(self) -> str: - """ - Return a string representation of the ISA. - - Note: - The order of instructions in the output is not guaranteed. - - Returns: - str: A string representation of the ISA. - """ - ... - -class ISARequirements: - @overload - def __new__(cls, *constraints: Constraint) -> ISARequirements: ... - @overload - def __new__(cls, constraints: list[Constraint], /) -> ISARequirements: ... - def __new__(cls, *constraints: Constraint | list[Constraint]) -> ISARequirements: - """ - Create an ISA requirements specification from a list of instructions - constraints. - - Args: - constraints (list[Constraint] | *Constraint): The list of instruction - constraints. - """ - ... - - def __len__(self) -> int: - """ - Return the number of constraints in the requirements specification. - - Returns: - int: The number of constraints. - """ - ... - - def __iter__(self) -> Iterator[Constraint]: - """ - Return an iterator over the constraints. - - Note: - The order of constraints is not guaranteed. - - Returns: - Iterator[Constraint]: The constraint iterator. - """ - ... - - def as_frame(self) -> pd.DataFrame: - """ - Return a pandas DataFrame representation of the ISA requirements. - - The DataFrame will have one row per instruction, with columns for - constraint properties such as encoding. - - Returns: - pd.DataFrame: A DataFrame representation of the ISA requirements. - """ - ... - -class Instruction: - @staticmethod - def fixed_arity( - id: int, - encoding: int, - arity: int, - time: int, - space: Optional[int], - length: Optional[int], - error_rate: float, - ) -> Instruction: - """ - Create an instruction with a fixed arity. - - Note: - This function is not intended to be called directly by the user, use qre.instruction instead. - - Args: - id (int): The instruction ID. - encoding (int): The instruction encoding. 0 = Physical, 1 = Logical. - arity (int): The instruction arity. - time (int): The instruction time in ns. - space (Optional[int]): The instruction space in number of physical - qubits. If None, length is used. - length (Optional[int]): The arity including ancilla qubits. If None, - arity is used. - error_rate (float): The instruction error rate. - - Returns: - Instruction: The instruction. - """ - ... - - @staticmethod - def variable_arity( - id: int, - encoding: int, - time_fn: _IntFunction, - space_fn: _IntFunction, - error_rate_fn: _FloatFunction, - length_fn: Optional[_IntFunction], - ) -> Instruction: - """ - Create an instruction with variable arity. - - Note: - This function is not intended to be called directly by the user, use qre.instruction instead. - - Args: - id (int): The instruction ID. - encoding (int): The instruction encoding. 0 = Physical, 1 = Logical. - time_fn (_IntFunction): The time function. - space_fn (_IntFunction): The space function. - error_rate_fn (_FloatFunction): The error rate function. - length_fn (Optional[_IntFunction]): The length function. - If None, space_fn is used. - - Returns: - Instruction: The instruction. - """ - ... - - def with_id(self, id: int) -> Instruction: - """ - Return a copy of the instruction with the given ID. - - Note: - The created instruction will not inherit the source property of the - original instruction and must be set by the user if intended. - - Args: - id (int): The instruction ID. - - Returns: - Instruction: A copy of the instruction with the given ID. - """ - ... - - @property - def id(self) -> int: - """ - The instruction ID. - - Returns: - int: The instruction ID. - """ - ... - - @property - def encoding(self) -> int: - """ - The instruction encoding. 0 = Physical, 1 = Logical. - - Returns: - int: The instruction encoding. - """ - ... - - @property - def arity(self) -> Optional[int]: - """ - The instruction arity. - - Returns: - Optional[int]: The instruction arity. - """ - ... - - def space(self, arity: Optional[int] = None) -> Optional[int]: - """ - The instruction space in number of physical qubits. - - Args: - arity (Optional[int]): The specific arity to check. - - Returns: - Optional[int]: The instruction space in number of physical qubits. - """ - ... - - def time(self, arity: Optional[int] = None) -> Optional[int]: - """ - The instruction time in ns. - - Args: - arity (Optional[int]): The specific arity to check. - - Returns: - Optional[int]: The instruction time in ns. - """ - ... - - def error_rate(self, arity: Optional[int] = None) -> Optional[float]: - """ - The instruction error rate. - - Args: - arity (Optional[int]): The specific arity to check. - - Returns: - Optional[float]: The instruction error rate. - """ - ... - - def expect_space(self, arity: Optional[int] = None) -> int: - """ - The instruction space in number of physical qubits. Raises an error if not found. - - Args: - arity (Optional[int]): The specific arity to check. - - Returns: - int: The instruction space in number of physical qubits. - """ - ... - - def expect_time(self, arity: Optional[int] = None) -> int: - """ - The instruction time in ns. Raises an error if not found. - - Args: - arity (Optional[int]): The specific arity to check. - - Returns: - int: The instruction time in ns. - """ - ... - - def expect_error_rate(self, arity: Optional[int] = None) -> float: - """ - The instruction error rate. Raises an error if not found. - - Args: - arity (Optional[int]): The specific arity to check. - - Returns: - float: The instruction error rate. - """ - ... - - def set_source(self, index: int) -> None: - """ - Set the source index for the instruction. - - Args: - index (int): The source index to set. - """ - ... - - @property - def source(self) -> int: - """ - Get the source index for the instruction. - - Returns: - int: The source index for the instruction. - """ - ... - - def set_property(self, key: int, value: int) -> None: - """ - Set a property on the instruction. - - Args: - key (int): The property key. - value (int): The property value. - """ - ... - - def get_property(self, key: int) -> Optional[int]: - """ - Get a property by its key. - - Args: - key (int): The property key. - - Returns: - Optional[int]: The property value, or None if not found. - """ - ... - - def has_property(self, key: int) -> bool: - """ - Check if the instruction has a property with the given key. - - Args: - key (int): The property key. - - Returns: - bool: True if the instruction has the property, False otherwise. - """ - ... - - def get_property_or(self, key: int, default: int) -> int: - """ - Get a property by its key, or return a default value if not found. - - Args: - key (int): The property key. - default (int): The default value to return if the property is not found. - - Returns: - int: The property value, or the default value if not found. - """ - ... - - def __getitem__(self, key: int) -> int: - """ - Get a property by its key, or raise an error if not found. - - Args: - key (int): The property key. - - Returns: - int: The property value. - """ - ... - - def __str__(self) -> str: - """ - Return a string representation of the instruction. - - Returns: - str: A string representation of the instruction. - """ - ... - -class ConstraintBound: - """ - A bound for a constraint. - """ - - @staticmethod - def lt(value: float) -> ConstraintBound: - """ - Create a less than constraint bound. - - Args: - value (float): The value. - - Returns: - ConstraintBound: The constraint bound. - """ - ... - - @staticmethod - def le(value: float) -> ConstraintBound: - """ - Create a less equal constraint bound. - - Args: - value (float): The value. - - Returns: - ConstraintBound: The constraint bound. - """ - ... - - @staticmethod - def eq(value: float) -> ConstraintBound: - """ - Create an equal constraint bound. - - Args: - value (float): The value. - - Returns: - ConstraintBound: The constraint bound. - """ - ... - - @staticmethod - def gt(value: float) -> ConstraintBound: - """ - Create a greater than constraint bound. - - Args: - value (float): The value. - - Returns: - ConstraintBound: The constraint bound. - """ - ... - - @staticmethod - def ge(value: float) -> ConstraintBound: - """ - Create a greater equal constraint bound. - - Args: - value (float): The value. - - Returns: - ConstraintBound: The constraint bound. - """ - ... - -class Constraint: - """ - An instruction constraint that can be used to describe ISA requirements - for ISA transformations. - """ - - def __new__( - cls, - id: int, - encoding: int, - arity: Optional[int], - error_rate: Optional[ConstraintBound], - ) -> Constraint: - """ - Note: - This function is not intended to be called directly by the user, use qre.constraint instead. - - Args: - id (int): The instruction ID. - encoding (int): The instruction encoding. 0 = Physical, 1 = Logical. - arity (Optional[int]): The instruction arity. If None, instruction is - assumed to have variable arity. - error_rate (Optional[ConstraintBound]): The constraint on the error rate. - - Returns: - InstructionConstraint: The instruction constraint. - """ - ... - - @property - def id(self) -> int: - """ - The instruction ID. - - Returns: - int: The instruction ID. - """ - ... - - @property - def encoding(self) -> int: - """ - The instruction encoding. 0 = Physical, 1 = Logical. - - Returns: - int: The instruction encoding. - """ - ... - - @property - def arity(self) -> Optional[int]: - """ - The instruction arity. - - Returns: - Optional[int]: The instruction arity. - """ - ... - - @property - def error_rate(self) -> Optional[ConstraintBound]: - """ - The constraint on the instruction error rate. - - Returns: - Optional[ConstraintBound]: The constraint on the instruction error rate. - """ - ... - - def add_property(self, property: int) -> None: - """ - Add a property requirement to the constraint. - - Args: - property (int): The property key that must be present in matching instructions. - """ - ... - - def has_property(self, property: int) -> bool: - """ - Check if the constraint requires a specific property. - - Args: - property (int): The property key to check. - - Returns: - bool: True if the constraint requires this property, False otherwise. - """ - ... - -class _IntFunction: - def __call__(self, arity: int) -> int: ... - -class _FloatFunction: - def __call__(self, arity: int) -> float: ... - -@overload -def constant_function(value: int) -> _IntFunction: ... -@overload -def constant_function(value: float) -> _FloatFunction: ... -def constant_function( - value: int | float, -) -> _IntFunction | _FloatFunction: - """ - Create a constant function. - - Args: - value (int | float): The constant value. - - Returns: - _IntFunction | _FloatFunction: The constant function. - """ - ... - -@overload -def linear_function(slope: int) -> _IntFunction: ... -@overload -def linear_function(slope: float) -> _FloatFunction: ... -def linear_function( - slope: int | float, -) -> _IntFunction | _FloatFunction: - """ - Create a linear function. - - Args: - slope (int | float): The slope. - - Returns: - _IntFunction | _FloatFunction: The linear function. - """ - ... - -@overload -def block_linear_function( - block_size: int, slope: int, offset: Optional[int] = None -) -> _IntFunction: ... -@overload -def block_linear_function( - block_size: int, slope: float, offset: Optional[float] = None -) -> _FloatFunction: ... -def block_linear_function( - block_size: int, slope: int | float, offset: Optional[int | float] = None -) -> _IntFunction | _FloatFunction: - """ - Create a block linear function that takes an arity (number of qubits) as - input. Given an arity, it will compute the number of blocks `num_blocks` by - computing `ceil(arity / block_size)` and then return `slope * num_blocks + - offset`. - - Args: - block_size (int): The block size. - slope (int | float): The slope. - offset (Optional[int | float]): The offset. Default is `None`, which is - treated as 0 for int and 0.0 for float. - - Returns: - _IntFunction | _FloatFunction: The block linear function. - """ - ... - -@overload -def generic_function(func: Callable[[int], int]) -> _IntFunction: ... -@overload -def generic_function(func: Callable[[int], float]) -> _FloatFunction: ... -def generic_function( - func: Callable[[int], int | float], -) -> _IntFunction | _FloatFunction: - """ - Create a generic function from a Python callable. - - Note: - Only use this function if the other function constructors - (constant_function, linear_function, and block_linear_function) do not - meet your needs, as using a Python callable can have performance - implications. If using this function, keep the logic in the callable as - simple as possible to minimize overhead. - - Args: - func (Callable[[int], int | float]): The Python callable. - - Returns: - _IntFunction | _FloatFunction: The generic function. - """ - ... - -class _ProvenanceGraph: - """ - Represents the provenance graph of instructions in a trace. Each node in - the graph corresponds to an instruction and the transform from which it was - produced, and edges represent transformations applied to instructions during - enumeration. - """ - - def add_node( - self, instruction: Instruction, transform_id: int, children: list[int] - ) -> int: - """ - Add a node to the provenance graph. - - Args: - instruction (int): The instruction corresponding to the node. - transform_id (int): The transform ID corresponding to the node. - children (list[int]): The list of child node indices in the provenance graph. - - Returns: - int: The index of the added node in the provenance graph. - """ - ... - - def instruction(self, node_index: int) -> Instruction: - """ - Return the instruction for a given node index. - - Args: - node_index (int): The index of the node in the provenance graph. - - Returns: - int: The instruction corresponding to the node. - """ - ... - - def transform_id(self, node_index: int) -> int: - """ - Return the transform ID for a given node index. - - Args: - node_index (int): The index of the node in the provenance graph. - - Returns: - int: The transform ID corresponding to the node. - """ - ... - - def children(self, node_index: int) -> list[int]: - """ - Return the list of child node indices for a given node index. - - Args: - node_index (int): The index of the node in the provenance graph. - - Returns: - list[int]: The list of child node indices. - """ - ... - - def num_nodes(self) -> int: - """ - Return the number of nodes in the provenance graph. - - Returns: - int: The number of nodes in the provenance graph. - """ - ... - - def num_edges(self) -> int: - """ - Return the number of edges in the provenance graph. - - Returns: - int: The number of edges in the provenance graph. - """ - ... - - @overload - def add_instruction( - self, - instruction: Instruction, - ) -> int: ... - @overload - def add_instruction( - self, - id: int, - encoding: int = 0, - *, - arity: Optional[int] = 1, - time: int | _IntFunction = ..., - space: Optional[int | _IntFunction] = None, - length: Optional[int | _IntFunction] = None, - error_rate: float | _FloatFunction = ..., - **kwargs: int, - ) -> int: ... - def add_instruction( - self, - id_or_instruction: int | Instruction, - encoding: int = 0, - *, - arity: Optional[int] = 1, - time: int | _IntFunction = ..., - space: Optional[int | _IntFunction] = None, - length: Optional[int | _IntFunction] = None, - error_rate: float | _FloatFunction = ..., - **kwargs: int, - ) -> int: - """ - Add an instruction to the provenance graph with no transform or - children. - - Can be called with a pre-existing ``Instruction`` or with keyword - args to create one inline. - - Args: - id_or_instruction: An instruction ID (int) or ``Instruction``. - encoding: 0 = Physical, 1 = Logical. Ignored for ``Instruction``. - arity: Instruction arity, ``None`` for variable. Ignored for - ``Instruction``. - time: Time in ns (or ``_IntFunction``). Ignored for ``Instruction``. - space: Space in physical qubits (or ``_IntFunction``). Ignored for - ``Instruction``. - length: Arity including ancillas. Ignored for ``Instruction``. - error_rate: Error rate (or ``_FloatFunction``). Ignored for - ``Instruction``. - **kwargs: Additional properties (e.g. ``distance=9``). - - Returns: - int: The node index of the added instruction. - """ - ... - - def make_isa(self, node_indices: list[int]) -> ISA: - """ - Create an ISA backed by this provenance graph from the given node - indices. - - Args: - node_indices: A list of node indices in the provenance graph. - - Returns: - ISA: An ISA referencing this provenance graph. - """ - ... - - def build_pareto_index(self) -> None: - """ - Builds the per-instruction-ID Pareto index. - - For each instruction ID, retains only the Pareto-optimal nodes w.r.t. - (space, time, error_rate) evaluated at arity 1. Must be called after - all nodes have been added. - """ - ... - - def query_satisfying( - self, - requirements: ISARequirements, - min_node_idx: Optional[int] = None, - ) -> list[ISA]: - """ - Return ISAs formed from Pareto-optimal graph nodes satisfying the - given requirements. - - For each constraint in requirements, selects matching Pareto-optimal - nodes. Returns the Cartesian product of per-constraint matches, - augmented with one representative node per unconstrained instruction - ID. - - Must be called after ``build_pareto_index``. - - Args: - requirements: The ISA requirements to satisfy. - min_node_idx: If provided, only consider Pareto nodes at or above - this index for constrained groups. - - Returns: - list[ISA]: ISAs formed from matching Pareto-optimal nodes. - """ - ... - - def raw_node_count(self) -> int: - """ - Return the raw node count (including the sentinel at index 0). - - Returns: - int: The number of nodes in the graph. - """ - ... - - def total_isa_count(self) -> int: - """ - Return the total number of ISAs that can be formed from Pareto-optimal - nodes. - - Requires ``build_pareto_index`` to have been called. - - Returns: - int: The total number of ISAs that can be formed. - """ - ... - -class EstimationResult: - """ - Represents the result of a resource estimation. - """ - - def __new__( - cls, *, qubits: int = 0, runtime: int = 0, error: float = 0.0 - ) -> EstimationResult: - """ - Create a new estimation result. - - Args: - qubits (int): The number of logical qubits. - runtime (int): The runtime in nanoseconds. - error (float): The error probability of the computation. - - Returns: - EstimationResult: The estimation result. - """ - ... - - @property - def qubits(self) -> int: - """ - The number of logical qubits. - - Returns: - int: The number of logical qubits. - """ - ... - - @qubits.setter - def qubits(self, qubits: int) -> None: - """ - Set the number of logical qubits. - - Args: - qubits (int): The number of logical qubits to set. - """ - ... - - @property - def runtime(self) -> int: - """ - The runtime in nanoseconds. - - Returns: - int: The runtime in nanoseconds. - """ - ... - - @runtime.setter - def runtime(self, runtime: int) -> None: - """ - Set the runtime. - - Args: - runtime (int): The runtime in nanoseconds to set. - """ - ... - - @property - def error(self) -> float: - """ - The error probability of the computation. - - Returns: - float: The error probability of the computation. - """ - ... - - @error.setter - def error(self, error: float) -> None: - """ - Set the error probability. - - Args: - error (float): The error probability to set. - """ - ... - - @property - def factories(self) -> dict[int, FactoryResult]: - """ - The factory results. - - Returns: - dict[int, FactoryResult]: A dictionary mapping factory IDs to their results. - """ - ... - - @property - def isa(self) -> ISA: - """ - The ISA used for the estimation. - - Returns: - ISA: The ISA used for the estimation. - """ - ... - - @property - def properties(self) -> dict[int, bool | int | float | str]: - """ - Custom properties from application generation and trace transform. - - Returns: - dict[int, bool | int | float | str]: A dictionary mapping property keys to their values. - """ - ... - - def set_property(self, key: int, value: bool | int | float | str) -> None: - """ - Set a custom property. - - Args: - key (int) The property key. - value (bool | int | float | str): The property value. All values of type `int`, `float`, `bool`, and `str` - are supported. Any other value is converted to a string using its `__str__` method. - """ - ... - - def __str__(self) -> str: - """ - Return a string representation of the estimation result. - - Returns: - str: A string representation of the estimation result. - """ - ... - -class _EstimationCollection: - """ - Represents a collection of estimation results. Results are stored as a 2D - Pareto frontier with physical qubits and runtime as objectives. - """ - - def __new__(cls) -> _EstimationCollection: - """ - Create a new estimation collection. - - Returns: - _EstimationCollection: The estimation collection. - """ - ... - - def insert(self, result: EstimationResult) -> None: - """ - Insert an estimation result into the collection. - - Args: - result (EstimationResult): The estimation result to insert. - """ - ... - - def __len__(self) -> int: - """ - Return the number of estimation results in the collection. - - Returns: - int: The number of estimation results. - """ - ... - - def __iter__(self) -> Iterator[EstimationResult]: - """ - Return an iterator over the estimation results. - - Returns: - Iterator[EstimationResult]: The estimation result iterator. - """ - ... - - @property - def total_jobs(self) -> int: - """ - Return the total number of (trace, ISA) estimation jobs. - - Returns: - int: The total number of jobs. - """ - ... - - @property - def successful_estimates(self) -> int: - """ - Return the number of estimation jobs that completed successfully - (before Pareto filtering). - - Returns: - int: The number of successful estimates. - """ - ... - - @property - def all_summaries(self) -> list[tuple[int, int, int, int]]: - """ - Return lightweight summaries of ALL successful estimates as a list - of (trace_index, isa_index, qubits, runtime) tuples. - - Returns: - list[tuple[int, int, int, int]]: List of (trace_index, isa_index, - qubits, runtime) for every successful estimation. - """ - ... - - @property - def isas(self) -> list[ISA]: - """ - Return the list of ISAs for which estimates were performed. - - Returns: - list[ISA]: The list of ISAs. - """ - ... - -class FactoryResult: - """ - Represents the result of a factory used in resource estimation. - """ - - @property - def copies(self) -> int: - """ - The number of factory copies. - - Returns: - int: The number of factory copies. - """ - ... - - @property - def runs(self) -> int: - """ - The number of factory runs. - - Returns: - int: The number of factory runs. - """ - ... - - @property - def error_rate(self) -> float: - """ - The error rate of the factory. - - Returns: - float: The error rate of the factory. - """ - ... - - @property - def states(self) -> int: - """ - The number of states produced by the factory. - - Returns: - int: The number of states produced by the factory. - """ - ... - -class Trace: - """ - Represents a quantum program optimized for resource estimation. - - A trace originates from a quantum application and can be modified via trace - transformations. It consists of blocks of operations. - """ - - def __new__(cls, compute_qubits: int) -> Trace: - """ - Create a new trace. - - Returns: - Trace: The trace. - """ - ... - - def clone_empty(self, compute_qubits: Optional[int] = None) -> Trace: - """ - Create a new trace with the same metadata but empty block. - - Args: - compute_qubits (Optional[int]): The number of compute qubits. If None, - the number of compute qubits of the original trace is used. - - Returns: - Trace: The new trace. - """ - ... - - @classmethod - def from_json(cls, json: str) -> Trace: - """ - Create a trace from a JSON string. - - Args: - json (str): The JSON string. - - Returns: - Trace: The trace. - """ - ... - - def to_json(self) -> str: - """ - Serializes the trace to a JSON string. - - Returns: - str: The JSON string representation of the trace. - """ - ... - - @property - def compute_qubits(self) -> int: - """ - The number of compute qubits. - - Returns: - int: The number of compute qubits. - """ - ... - - @compute_qubits.setter - def compute_qubits(self, qubits: int) -> None: - """ - Set the number of compute qubits. - - Args: - qubits (int): The number of compute qubits to set. - """ - ... - - @property - def base_error(self) -> float: - """ - The base error of the trace. - - Returns: - float: The base error of the trace. - """ - ... - - def increment_base_error(self, amount: float) -> None: - """ - Increments the base error. - - Args: - amount (float): The amount to increment. - """ - ... - - @property - def memory_qubits(self) -> Optional[int]: - """ - The number of memory qubits, if set. - - Returns: - Optional[int]: The number of memory qubits, or None if not set. - """ - ... - - def has_memory_qubits(self) -> bool: - """ - Check if the trace has memory qubits set. - - Returns: - bool: True if memory qubits are set, False otherwise. - """ - ... - - @memory_qubits.setter - def memory_qubits(self, qubits: int) -> None: - """ - Set the number of memory qubits. - - Args: - qubits (int): The number of memory qubits. - """ - ... - - def increment_memory_qubits(self, amount: int) -> None: - """ - Increments the number of memory qubits. If memory qubits have not been - set, initializes them to 0 before incrementing. - - Args: - amount (int): The amount to increment. - """ - ... - - def increment_resource_state(self, resource_id: int, amount: int) -> None: - """ - Increments a resource state count. - - Args: - resource_id (int): The resource state ID. - amount (int): The amount to increment. - """ - ... - - def set_property(self, key: int, value: Any) -> None: - """ - Set a property. All values of type `int`, `float`, `bool`, and `str` - are supported. Any other value is converted to a string using its - `__str__` method. - - Args: - key (int): The property key. - value (Any): The property value. - """ - ... - - def get_property(self, key: int) -> Optional[int | float | bool | str]: - """ - Get a property. - - Args: - key (int): The property key. - - Returns: - Optional[int | float | bool | str]: The property value, or None if not found. - """ - ... - - def has_property(self, key: int) -> bool: - """ - Check if a property with the given key exists. - - Args: - key (int): The property key. - - Returns: - bool: True if the property exists, False otherwise. - """ - ... - - @property - def total_qubits(self) -> int: - """ - The total number of qubits (compute + memory). - - Returns: - int: The total number of qubits. - """ - ... - - @property - def depth(self) -> int: - """ - The trace depth. - - Returns: - int: The trace depth. - """ - ... - - @property - def num_gates(self) -> int: - """ - The total number of gates in the trace. - - Returns: - int: The total number of gates. - """ - ... - - def estimate( - self, isa: ISA, max_error: Optional[float] = None - ) -> Optional[EstimationResult]: - """ - Estimate resources for the trace given a logical ISA. - - Args: - isa (ISA): The logical ISA. - max_error (Optional[float]): The maximum allowed error. If None, - Pareto points are computed. - - Returns: - Optional[EstimationResult]: The estimation result if max_error is - provided, otherwise valid Pareto points. - """ - ... # The implementation in Rust returns Option, so it fits - - @property - def resource_states(self) -> dict[int, int]: - """ - The resource states used in the trace. - - Returns: - dict[int, int]: A dictionary mapping instruction IDs to their counts. - """ - ... - - def add_operation( - self, id: int, qubits: list[int], params: list[float] = [] - ) -> None: - """ - Add an operation to the trace. - - Args: - id (int): The operation ID. - qubits (list[int]): The qubits involved in the operation. - params (list[float]): The operation parameters. - """ - ... - - def root_block(self) -> Block: - """ - Return the root block of the trace. - - Returns: - Block: The root block of the trace. - """ - ... - - def add_block(self, repetitions: int = 1) -> Block: - """ - Add a block to the trace. - - Args: - repetitions (int): The number of times the block is repeated. - - Returns: - Block: The block. - """ - ... - - @property - def required_isa(self) -> ISARequirements: - """ - The required ISA for the trace. - - Returns: - ISARequirements: The required ISA for the trace. - """ - ... - - def __str__(self) -> str: - """ - Return a string representation of the trace. - - Returns: - str: A string representation of the trace. - """ - ... - -class Block: - """ - Represents a block of operations in a trace. - - An operation in a block can either refer to an instruction applied to some - qubits or can be another block to create a hierarchical structure. Blocks - can be repeated. - """ - - def add_operation( - self, id: int, qubits: list[int], params: list[float] = [] - ) -> None: - """ - Add an operation to the block. - - Args: - id (int): The operation ID. - qubits (list[int]): The qubits involved in the operation. - params (list[float]): The operation parameters. - """ - ... - - def add_block(self, repetitions: int = 1) -> Block: - """ - Add a nested block to the block. - - Args: - repetitions (int): The number of times the block is repeated. - - Returns: - Block: The block. - """ - ... - - def __str__(self) -> str: - """ - Return a string representation of the block. - - Returns: - str: A string representation of the block. - """ - ... - -class PSSPC: - def __new__(cls, num_ts_per_rotation: int, ccx_magic_states: bool) -> PSSPC: ... - def transform(self, trace: Trace) -> Optional[Trace]: ... - -class LatticeSurgery: - def __new__(cls, slow_down_factor: float) -> LatticeSurgery: ... - def transform(self, trace: Trace) -> Optional[Trace]: ... - -class InstructionFrontier: - """ - Represents a Pareto frontier of instructions with space, time, and error - rates as objectives. - """ - - def __new__(cls, *, with_error_objective: bool = True) -> InstructionFrontier: - """ - Create a new instruction frontier. - - Args: - with_error_objective (bool): If True (default), the frontier uses - three objectives (space, time, error rate). If False, it uses - two objectives (space, time). - """ - ... - - def insert(self, point: Instruction): - """ - Insert an instruction into the frontier. - - Args: - point (Instruction): The instruction to insert. - """ - ... - - def extend(self, points: list[Instruction]) -> None: - """ - Extend the frontier with a list of instructions. - - Args: - points (list[Instruction]): The instructions to insert. - """ - ... - - def __len__(self) -> int: - """ - Return the number of instructions in the frontier. - - Returns: - int: The number of instructions. - """ - ... - - def __iter__(self) -> Iterator[Instruction]: - """ - Return an iterator over the instructions in the frontier. - - Returns: - Iterator[Instruction]: The iterator. - """ - ... - - @staticmethod - def load( - filename: str, *, with_error_objective: bool = True - ) -> InstructionFrontier: - """ - Load an instruction frontier from a file. - - Args: - filename (str): The file name. - with_error_objective (bool): If True (default), the frontier uses - three objectives (space, time, error rate). If False, it uses - two objectives (space, time). - - Returns: - InstructionFrontier: The loaded instruction frontier. - """ - ... - - def dump(self, filename: str) -> None: - """ - Dump the instruction frontier to a file. - - Args: - filename (str): The file name. - """ - ... - -def _estimate_parallel( - traces: list[Trace], - isas: list[ISA], - max_error: float = 1.0, - post_process: bool = False, -) -> _EstimationCollection: - """ - Estimate resources for multiple traces and ISAs in parallel. - - Args: - traces (list[Trace]): The list of traces. - isas (list[ISA]): The list of ISAs. - max_error (float): The maximum allowed error. The default is 1.0. - post_process (bool): If True, computes auxiliary data such as result - summaries needed for post-processing after estimation. - - Returns: - _EstimationCollection: The estimation collection. - """ - ... - -def _estimate_with_graph( - traces: list[Trace], - graph: _ProvenanceGraph, - max_error: float = 1.0, - post_process: bool = False, -) -> _EstimationCollection: - """ - Estimate resources using a Pareto-filtered provenance graph. - - Instead of forming the full Cartesian product of ISAs × traces, this - function enumerates per-trace instruction combinations from the - Pareto-optimal subsets in the frozen graph. - - Args: - traces (list[Trace]): The list of traces to estimate. - graph (_ProvenanceGraph): The provenance graph to use for estimation. - max_error (float): The maximum allowed error. The default is 1.0. - post_process (bool): If True, computes auxiliary data such as result - summaries and ISAs needed for post-processing after estimation. - - Returns: - _EstimationCollection: The estimation collection. - """ - ... - -def _binom_ppf(q: float, n: int, p: float) -> int: - """ - A replacement for SciPy's binom.ppf that is faster and does not require - SciPy as a dependency. - """ - ... - -def _float_to_bits(f: float) -> int: - """Convert a float to its bit representation as an integer.""" - ... - -def _float_from_bits(b: int) -> float: - """Convert a float from its bit representation as an integer.""" - ... - -def instruction_name(id: int) -> Optional[str]: - """ - Return the name of an instruction given its ID, if known. - - Args: - id (int): The instruction ID. - - Returns: - Optional[str]: The name of the instruction, or None if the ID is not recognized. - """ - ... - -def property_name_to_key(name: str) -> Optional[int]: - """ - Convert a property name to its corresponding key, if known. - - Args: - name (str): The property name. - - Returns: - Optional[int]: The property key, or None if the name is not recognized. - """ - ... - -def property_name(id: int) -> Optional[str]: - """ - Convert a property key to its corresponding name, if known. - - Args: - id (int): The property key. - - Returns: - Optional[str]: The property name, or None if the key is not recognized. - """ - ... diff --git a/source/pip/qsharp/qre/_results.py b/source/pip/qsharp/qre/_results.py deleted file mode 100644 index 2429f68787..0000000000 --- a/source/pip/qsharp/qre/_results.py +++ /dev/null @@ -1,418 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations - -from dataclasses import dataclass, field -from typing import Optional, Callable, Any, Iterable - -import pandas as pd - -from ._architecture import ISAContext -from ._qre import FactoryResult, instruction_name, EstimationResult, property_name -from ._instruction import InstructionSource -from .property_keys import ( - PHYSICAL_COMPUTE_QUBITS, - PHYSICAL_MEMORY_QUBITS, - PHYSICAL_FACTORY_QUBITS, -) - - -class EstimationTable(list["EstimationTableEntry"]): - """A table of quantum resource estimation results. - - Extends ``list[EstimationTableEntry]`` and provides configurable columns for - displaying estimation data. By default the table includes *qubits*, - *runtime* (displayed as a ``pandas.Timedelta``), and *error* columns. - Additional columns can be added or inserted with ``add_column`` and - ``insert_column``. - """ - - def __init__(self): - """Initialize an empty estimation table with default columns.""" - super().__init__() - - self.name: Optional[str] = None - self.stats = EstimationTableStats() - - self._columns: list[tuple[str, EstimationTableColumn]] = [ - ("qubits", EstimationTableColumn(lambda entry: entry.qubits)), - ( - "runtime", - EstimationTableColumn( - lambda entry: entry.runtime, - formatter=lambda x: pd.Timedelta(x, unit="ns"), - ), - ), - ("error", EstimationTableColumn(lambda entry: entry.error)), - ] - - def add_column( - self, - name: str, - function: Callable[[EstimationTableEntry], Any], - formatter: Optional[Callable[[Any], Any]] = None, - ) -> None: - """Add a column to the estimation table. - - Args: - name (str): The name of the column. - function (Callable[[EstimationTableEntry], Any]): A function that - takes an EstimationTableEntry and returns the value for this - column. - formatter (Optional[Callable[[Any], Any]]): An optional function - that formats the output of ``function`` for display purposes. - """ - self._columns.append((name, EstimationTableColumn(function, formatter))) - - def insert_column( - self, - index: int, - name: str, - function: Callable[[EstimationTableEntry], Any], - formatter: Optional[Callable[[Any], Any]] = None, - ) -> None: - """Insert a column at the specified index in the estimation table. - - Args: - index (int): The index at which to insert the column. - name (str): The name of the column. - function (Callable[[EstimationTableEntry], Any]): A function that - takes an EstimationTableEntry and returns the value for this - column. - formatter (Optional[Callable[[Any], Any]]): An optional function - that formats the output of ``function`` for display purposes. - """ - self._columns.insert(index, (name, EstimationTableColumn(function, formatter))) - - def add_qubit_partition_column(self) -> None: - """Add columns for the physical compute, factory, and memory qubit counts.""" - self.add_column( - "physical_compute_qubits", - lambda entry: entry.properties.get(PHYSICAL_COMPUTE_QUBITS, 0), - ) - self.add_column( - "physical_factory_qubits", - lambda entry: entry.properties.get(PHYSICAL_FACTORY_QUBITS, 0), - ) - self.add_column( - "physical_memory_qubits", - lambda entry: entry.properties.get(PHYSICAL_MEMORY_QUBITS, 0), - ) - - def add_factory_summary_column(self) -> None: - """Add a column to the estimation table that summarizes the factories used in the estimation.""" - - def summarize_factories(entry: EstimationTableEntry) -> str: - if not entry.factories: - return "None" - return ", ".join( - f"{factory_result.copies}×{instruction_name(id)}" - for id, factory_result in entry.factories.items() - ) - - self.add_column("factories", summarize_factories) - - def add_property_column( - self, - property_key: int, - column_name: Optional[str] = None, - default_value: Any = None, - ) -> None: - """Add a column for a specific property key from the estimation results. - - Args: - property_key (int): The property key to add as a column. - column_name (Optional[str]): An optional name for the column. If not provided, the column will be named "property_{property_key}". - default_value (Any): The default value to use if the property key is not present in an entry's properties. Defaults to None. - """ - if column_name is None: - # property_name may return None for unknown and custom property keys - column_name = property_name(property_key) - - if column_name is None: - column_name = f"property_{property_key}" - else: - # Normalize the column name to lowercase - column_name = column_name.lower() - - self.add_column( - column_name, - lambda entry: entry.properties.get(property_key, default_value), - ) - - def as_frame(self): - """Convert the estimation table to a ``pandas.DataFrame``. - - Each row corresponds to an ``EstimationTableEntry`` and each - column is determined by the columns registered on this table. Column - formatters, when present, are applied to the values before they are - placed in the frame. - - Returns: - pandas.DataFrame: A DataFrame representation of the estimation - results. - """ - return pd.DataFrame( - [ - { - column_name: ( - column.formatter(column.function(entry)) - if column.formatter is not None - else column.function(entry) - ) - for column_name, column in self._columns - } - for entry in self - ] - ) - - def plot(self, **kwargs): - """Plot this table's results. - - Convenience wrapper around ``plot_estimates``. All keyword - arguments are forwarded. - - Returns: - matplotlib.figure.Figure: The figure containing the plot. - """ - return plot_estimates(self, **kwargs) - - -@dataclass(frozen=True, slots=True) -class EstimationTableColumn: - """Definition of a single column in an ``EstimationTable``. - - Attributes: - function: A callable that extracts the raw column value from an - ``EstimationTableEntry``. - formatter: An optional callable that transforms the raw value for - display purposes (e.g. converting nanoseconds to a - ``pandas.Timedelta``). - """ - - function: Callable[[EstimationTableEntry], Any] - formatter: Optional[Callable[[Any], Any]] = None - - -@dataclass(frozen=True, slots=True) -class EstimationTableEntry: - """A single row in an ``EstimationTable``. - - Each entry represents one Pareto-optimal estimation result for a - particular combination of application trace and architecture ISA. - - Attributes: - qubits: Total number of physical qubits required. - runtime: Total runtime of the algorithm in nanoseconds. - error: Total estimated error probability. - source: The instruction source derived from the architecture ISA used - for this estimation. - factories: A mapping from instruction id to the - ``FactoryResult`` describing the magic-state factory used - and the number of copies required. - properties: Additional key-value properties attached to the - estimation result. - """ - - qubits: int - runtime: int - error: float - source: InstructionSource - factories: dict[int, FactoryResult] = field(default_factory=dict) - properties: dict[int, int | float | bool | str] = field(default_factory=dict) - - @classmethod - def from_result( - cls, result: EstimationResult, ctx: ISAContext - ) -> EstimationTableEntry: - """Create an entry from an estimation result and architecture context. - - Args: - result (EstimationResult): The raw estimation result. - ctx (ISAContext): The architecture context used for the estimation. - - Returns: - EstimationTableEntry: A new table entry populated from the result. - """ - return cls( - qubits=result.qubits, - runtime=result.runtime, - error=result.error, - source=InstructionSource.from_isa(ctx, result.isa), - factories=result.factories.copy(), - properties=result.properties.copy(), - ) - - -@dataclass(slots=True) -class EstimationTableStats: - """Statistics for a single estimation run. - - Attributes: - num_traces (int): Number of traces evaluated. - num_isas (int): Number of ISAs evaluated. - total_jobs (int): Total estimation jobs executed. - successful_estimates (int): Number of jobs that produced a result. - pareto_results (int): Number of Pareto-optimal results retained. - """ - - num_traces: int = 0 - num_isas: int = 0 - total_jobs: int = 0 - successful_estimates: int = 0 - pareto_results: int = 0 - - -# Mapping from runtime unit name to its value in nanoseconds. -_TIME_UNITS: dict[str, float] = { - "ns": 1, - "µs": 1e3, - "us": 1e3, - "ms": 1e6, - "s": 1e9, - "min": 60e9, - "hours": 3600e9, - "days": 86_400e9, - "weeks": 604_800e9, - "months": 31 * 86_400e9, - "years": 365 * 86_400e9, - "decades": 10 * 365 * 86_400e9, - "centuries": 100 * 365 * 86_400e9, -} - -# Ordered subset of _TIME_UNITS used for default x-axis tick labels. -_TICK_UNITS: list[tuple[str, float]] = [ - ("1 ns", _TIME_UNITS["ns"]), - ("1 µs", _TIME_UNITS["µs"]), - ("1 ms", _TIME_UNITS["ms"]), - ("1 s", _TIME_UNITS["s"]), - ("1 min", _TIME_UNITS["min"]), - ("1 hour", _TIME_UNITS["hours"]), - ("1 day", _TIME_UNITS["days"]), - ("1 week", _TIME_UNITS["weeks"]), - ("1 month", _TIME_UNITS["months"]), - ("1 year", _TIME_UNITS["years"]), - ("1 decade", _TIME_UNITS["decades"]), - ("1 century", _TIME_UNITS["centuries"]), -] - - -def plot_estimates( - data: EstimationTable | Iterable[EstimationTable], - *, - runtime_unit: Optional[str] = None, - figsize: tuple[float, float] = (15, 8), - scatter_args: dict[str, Any] = {"marker": "x"}, -): - """Plot estimation results displaying qubits vs runtime. - - Creates a log-log scatter plot where the x-axis shows the total runtime and - the y-axis shows the total number of physical qubits. - - *data* may be a single ``EstimationTable`` or an iterable of tables. When - multiple tables are provided, each is plotted as a separate series. If a - table has a ``EstimationTable.name`` (set via the *name* parameter of - ``estimate``), it is used as the legend label for that series. - - When *runtime_unit* is ``None`` (the default), the x-axis uses - human-readable time-unit tick labels spanning nanoseconds to centuries. - When a unit string is given (e.g. ``"hours"``), all runtimes are scaled to - that unit and the x-axis label includes the unit while the ticks are plain - numbers. - - Supported *runtime_unit* values: ``"ns"``, ``"µs"`` (or ``"us"``), ``"ms"``, - ``"s"``, ``"min"``, ``"hours"``, ``"days"``, ``"weeks"``, ``"months"``, - ``"years"``. - - Args: - data: A single EstimationTable or an iterable of - EstimationTable objects to plot. - runtime_unit: Optional time unit to scale the x-axis to. - figsize: Figure dimensions in inches as ``(width, height)``. - scatter_args: Additional keyword arguments to pass to - ``matplotlib.axes.Axes.scatter`` when plotting the points. - - Returns: - matplotlib.figure.Figure: The figure containing the plot. - - Raises: - ImportError: If matplotlib is not installed. - ValueError: If all tables are empty or *runtime_unit* is not - recognised. - """ - try: - import matplotlib.pyplot as plt - except ImportError: - raise ImportError( - "Missing optional 'matplotlib' dependency. To install run: " - "pip install matplotlib" - ) - - # Normalize to a list of tables - if isinstance(data, EstimationTable): - tables = [data] - else: - tables = list(data) - - if not tables or all(len(t) == 0 for t in tables): - raise ValueError("Cannot plot an empty EstimationTable.") - - if runtime_unit is not None and runtime_unit not in _TIME_UNITS: - raise ValueError( - f"Unknown runtime_unit {runtime_unit!r}. " - f"Supported units: {', '.join(_TIME_UNITS)}" - ) - - fig, ax = plt.subplots(figsize=figsize) - ax.set_ylabel("Physical qubits") - ax.set_xscale("log") - ax.set_yscale("log") - - all_xs: list[float] = [] - has_labels = False - - for table in tables: - if len(table) == 0: - continue - - ys = [entry.qubits for entry in table] - - if runtime_unit is not None: - scale = _TIME_UNITS[runtime_unit] - xs = [entry.runtime / scale for entry in table] - else: - xs = [float(entry.runtime) for entry in table] - - all_xs.extend(xs) - - label = table.name - if label is not None: - has_labels = True - - ax.scatter(x=xs, y=ys, label=label, **scatter_args) - - if runtime_unit is not None: - ax.set_xlabel(f"Runtime ({runtime_unit})") - else: - ax.set_xlabel("Runtime") - - time_labels, time_units = zip(*_TICK_UNITS) - - cutoff = ( - next( - (i for i, x in enumerate(time_units) if x > max(all_xs)), - len(time_units) - 1, - ) - + 1 - ) - - ax.set_xticks(time_units[:cutoff]) - ax.set_xticklabels(time_labels[:cutoff], rotation=90) - - if has_labels: - ax.legend() - - plt.close(fig) - - return fig diff --git a/source/pip/qsharp/qre/_trace.py b/source/pip/qsharp/qre/_trace.py deleted file mode 100644 index 49974e80d9..0000000000 --- a/source/pip/qsharp/qre/_trace.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations -from abc import ABC, abstractmethod -from dataclasses import dataclass, KW_ONLY, field -from itertools import product -from types import NoneType -from typing import Any, Optional, Generator, Type, TYPE_CHECKING - -if TYPE_CHECKING: - from ._application import _Context -from ._enumeration import _enumerate_instances -from ._qre import PSSPC as _PSSPC, LatticeSurgery as _LatticeSurgery, Trace - - -class TraceTransform(ABC): - """Abstract base class for trace transformations.""" - - @abstractmethod - def transform(self, trace: Trace) -> Optional[Trace]: - """Apply this transformation to a trace. - - Args: - trace (Trace): The input trace. - - Returns: - Optional[Trace]: The transformed trace, or None if the - transformation is not applicable. - """ - ... - - @classmethod - def q(cls, **kwargs) -> TraceQuery: - """Create a trace query for this transform type. - - Args: - **kwargs: Domain overrides for parameter enumeration. - - Returns: - TraceQuery: A trace query wrapping this transform type. - """ - return TraceQuery(cls, **kwargs) - - -@dataclass -class PSSPC(TraceTransform): - """Pauli-based computation trace transform (PSSPC). - - Converts rotation gates and optionally CCX gates into T-state-based - operations suitable for lattice surgery resource estimation. - - Attributes: - num_ts_per_rotation (int): Number of T states used per rotation - gate. Default is 20. - ccx_magic_states (bool): If True, CCX gates are treated as magic - states rather than being decomposed into T gates. Default is - False. - """ - - _: KW_ONLY - num_ts_per_rotation: int = field( - default=20, metadata={"domain": list(range(5, 21))} - ) - ccx_magic_states: bool = field(default=False) - - def __post_init__(self): - self._psspc = _PSSPC(self.num_ts_per_rotation, self.ccx_magic_states) - - def transform(self, trace: Trace) -> Optional[Trace]: - """Apply the PSSPC transformation to a trace. - - Args: - trace (Trace): The input trace. - - Returns: - Optional[Trace]: The transformed trace. - """ - return self._psspc.transform(trace) - - -@dataclass -class LatticeSurgery(TraceTransform): - """Lattice surgery trace transform. - - Converts a trace into a form suitable for lattice-surgery-based - resource estimation. - - Attributes: - slow_down_factor (float): Multiplicative factor applied to the - trace depth. Default is 1.0. - """ - - _: KW_ONLY - slow_down_factor: float = field(default=1.0, metadata={"domain": [1.0]}) - - def __post_init__(self): - self._lattice_surgery = _LatticeSurgery(self.slow_down_factor) - - def transform(self, trace: Trace) -> Optional[Trace]: - """Apply the lattice surgery transformation to a trace. - - Args: - trace (Trace): The input trace. - - Returns: - Optional[Trace]: The transformed trace. - """ - return self._lattice_surgery.transform(trace) - - -class _Node(ABC): - """Abstract base class for trace enumeration nodes.""" - - @abstractmethod - def enumerate(self, ctx: _Context) -> Generator[Trace, None, None]: ... - - -class TraceQuery(_Node): - """A query that enumerates transformed traces from an application. - - A trace query chains a sequence of trace transforms, each with optional - keyword arguments to override their default parameter domains. - """ - - # This is a sequence of trace transforms together with possible kwargs to - # override their default domains. The first element might be - sequence: list[tuple[Type, dict[str, Any]]] - - def __init__(self, t: Type, **kwargs): - self.sequence = [(t, kwargs)] - - def enumerate( - self, ctx: _Context, track_parameters: bool = False - ) -> Generator[Trace | tuple[Any, Trace], None, None]: - """Enumerate transformed traces from the application context. - - Args: - ctx (_Context): The application enumeration context. - track_parameters (bool): If True, yield ``(parameters, trace)`` - tuples instead of plain traces. Default is False. - - Yields: - Trace | tuple[Any, Trace]: A transformed trace, or a - ``(parameters, trace)`` tuple when *track_parameters* is True. - """ - sequence = self.sequence - kwargs = {} - if len(sequence) > 0 and sequence[0][0] is NoneType: - kwargs = sequence[0][1] - sequence = sequence[1:] - - if track_parameters: - source = ctx.application.enumerate_traces_with_parameters(**kwargs) - else: - source = ((None, t) for t in ctx.application.enumerate_traces(**kwargs)) - - for params, trace in source: - if not sequence: - yield (params, trace) if track_parameters else trace - continue - - transformer_instances = [] - - for t, transformer_kwargs in sequence: - instances = _enumerate_instances(t, **transformer_kwargs) - transformer_instances.append(instances) - - for combination in product(*transformer_instances): - transformed = trace - for transformer in combination: - transformed = transformer.transform(transformed) - yield (params, transformed) if track_parameters else transformed - - def __mul__(self, other: TraceQuery) -> TraceQuery: - """Chain another trace query onto this one. - - Args: - other (TraceQuery): The trace query to append. - - Returns: - TraceQuery: A new query with the combined transform sequence. - - Raises: - ValueError: If *other* begins with a None transform. - """ - new_query = TraceQuery.__new__(TraceQuery) - - if len(other.sequence) > 0 and other.sequence[0][0] is NoneType: - raise ValueError( - "Cannot multiply with a TraceQuery that has a None transform at the beginning of its sequence." - ) - - new_query.sequence = self.sequence + other.sequence - return new_query diff --git a/source/pip/qsharp/qre/application/__init__.py b/source/pip/qsharp/qre/application/__init__.py deleted file mode 100644 index f6ee4c9f08..0000000000 --- a/source/pip/qsharp/qre/application/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from ._cirq import CirqApplication -from ._qir import QIRApplication -from ._qsharp import QSharpApplication -from ._openqasm import OpenQASMApplication - -__all__ = [ - "CirqApplication", - "QIRApplication", - "QSharpApplication", - "OpenQASMApplication", -] diff --git a/source/pip/qsharp/qre/application/_cirq.py b/source/pip/qsharp/qre/application/_cirq.py deleted file mode 100644 index a49c58e317..0000000000 --- a/source/pip/qsharp/qre/application/_cirq.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - - -from __future__ import annotations - -from dataclasses import dataclass - -import cirq - -from ... import telemetry_events -from .._application import Application -from .._qre import Trace -from ..interop import trace_from_cirq - - -@dataclass -class CirqApplication(Application[None]): - """Application that produces a resource estimation trace from a Cirq circuit. - - Accepts either a Cirq ``Circuit`` object or an OpenQASM string. When a - QASM string is provided, it is parsed into a circuit using - ``cirq.contrib.qasm_import`` (requires the optional ``ply`` dependency). - - Args: - circuit_or_qasm: A Cirq Circuit or an OpenQASM string. - classical_control_probability: Probability that a classically - controlled operation is included in the trace. Defaults to 0.5. - """ - - circuit_or_qasm: str | cirq.CIRCUIT_LIKE - classical_control_probability: float = 0.5 - - def __post_init__(self): - telemetry_events.on_qre_application_created("CirqApplication") - if isinstance(self.circuit_or_qasm, str): - try: - from cirq.contrib.qasm_import import circuit_from_qasm - - self._circuit = circuit_from_qasm(self.circuit_or_qasm) - except ImportError: - raise ImportError( - "Missing optional 'ply' dependency. To install run: " - "pip install ply" - ) - else: - self._circuit = self.circuit_or_qasm - - def get_trace(self, parameters: None = None) -> Trace: - """Return the resource estimation trace for the Cirq circuit. - - Args: - parameters (None): Unused. Defaults to None. - - Returns: - Trace: The resource estimation trace. - """ - return trace_from_cirq(self._circuit) diff --git a/source/pip/qsharp/qre/application/_openqasm.py b/source/pip/qsharp/qre/application/_openqasm.py deleted file mode 100644 index 53c525e54e..0000000000 --- a/source/pip/qsharp/qre/application/_openqasm.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - - -from __future__ import annotations - - -import random -from dataclasses import dataclass -from typing import Callable - -from ... import code -from ... import telemetry_events -from ...estimator import LogicalCounts -from .._qre import Trace -from .._application import Application -from ..interop import trace_from_entry_expr - - -@dataclass -class OpenQASMApplication(Application[None]): - """Application that produces a resource estimation trace from OpenQASM code. - - Accepts an OpenQASM program string or a callable. - - Attributes: - program (str | Callable): The OpenQASM program as string or callable. - args (tuple): The arguments to pass to the callable, if one is - provided. Default is an empty tuple. - """ - - program: str | Callable | LogicalCounts - args: tuple = () - - def __post_init__(self): - """Log telemetry for OpenQASMApplication creation.""" - telemetry_events.on_qre_application_created("OpenQASMApplication") - - def get_trace(self, parameters: None = None) -> Trace: - """Return the resource estimation trace for the OpenQASM program. - - Args: - parameters (None): Unused. Defaults to None. - - Returns: - Trace: The resource estimation trace. - """ - if isinstance(self.program, str): - from qsharp.openqasm import import_openqasm, ProgramType - - name_found = False - for _ in range(1_000): - name = f"openqasm{random.randint(0, 1_000_000)}" - if not hasattr(code, "qasm_import") or not hasattr( - code.qasm_import, name - ): - name_found = True - break - - if not name_found: - raise RuntimeError( - "Failed to find a unique name for the OpenQASM program." - ) - - import_openqasm(self.program, name=name, program_type=ProgramType.File) - self.program = getattr(code.qasm_import, name) - - return trace_from_entry_expr(self.program, *self.args) diff --git a/source/pip/qsharp/qre/application/_qir.py b/source/pip/qsharp/qre/application/_qir.py deleted file mode 100644 index 3536468fe7..0000000000 --- a/source/pip/qsharp/qre/application/_qir.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - - -from __future__ import annotations - -from dataclasses import dataclass - -from ... import telemetry_events -from .._qre import Trace -from .._application import Application -from ..interop import trace_from_qir - - -@dataclass -class QIRApplication(Application[None]): - """Application that produces a resource estimation trace from base profile QIR code. - - Accepts QIR input as LLVM IR text or bitcode. The QIR input must adhere to - the base profile. - - Attributes: - input (str | bytes): QIR input as LLVM IR text (str) or - bitcode (bytes). - """ - - input: str | bytes - - def __post_init__(self): - """Log telemetry for QIRApplication creation.""" - telemetry_events.on_qre_application_created("QIRApplication") - - def get_trace(self, parameters: None = None) -> Trace: - """Return the resource estimation trace for the QIR program. - - Args: - parameters (None): Unused. Defaults to None. - - Returns: - Trace: The resource estimation trace. - """ - return trace_from_qir(self.input) diff --git a/source/pip/qsharp/qre/application/_qsharp.py b/source/pip/qsharp/qre/application/_qsharp.py deleted file mode 100644 index bfc11d1a98..0000000000 --- a/source/pip/qsharp/qre/application/_qsharp.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - - -from __future__ import annotations - -from pathlib import Path -from dataclasses import dataclass, field -from typing import Callable - -from ...estimator import LogicalCounts -from ... import telemetry_events -from .._qre import Trace -from .._application import Application -from ..interop import trace_from_entry_expr_cached - - -@dataclass -class QSharpApplication(Application[None]): - """Application that produces a resource estimation trace from Q# code. - - Accepts a Q# entry expression string, a callable, or pre-computed - ``LogicalCounts``. - - Attributes: - entry_expr (str | Callable | LogicalCounts): The Q# entry - expression, a callable returning logical counts, or - pre-computed logical counts. - args (tuple): The arguments to pass to the callable, if one is - provided. Default is an empty tuple. - cache_dir (Path): Directory for caching compiled traces. - use_cache (bool): Whether to use the trace cache. Default is False. - """ - - entry_expr: str | Callable | LogicalCounts - args: tuple = () - cache_dir: Path = field( - default=Path.home() / ".cache" / "re3" / "qsharp", repr=False - ) - use_cache: bool = field(default=False, repr=False) - - def __post_init__(self): - """Log telemetry for QSharpApplication creation.""" - telemetry_events.on_qre_application_created("QSharpApplication") - - def get_trace(self, parameters: None = None) -> Trace: - """Return the resource estimation trace for the Q# program. - - Args: - parameters (None): Unused. Defaults to None. - - Returns: - Trace: The resource estimation trace. - """ - if self.use_cache and isinstance(self.entry_expr, str): - cache_path = self.cache_dir / f"{self.entry_expr}.json" - else: - cache_path = None - - return trace_from_entry_expr_cached(self.entry_expr, cache_path, *self.args) diff --git a/source/pip/qsharp/qre/instruction_ids.py b/source/pip/qsharp/qre/instruction_ids.py index cec4a9c070..7cb87e21c0 100644 --- a/source/pip/qsharp/qre/instruction_ids.py +++ b/source/pip/qsharp/qre/instruction_ids.py @@ -1,10 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -# pyright: reportAttributeAccessIssue=false - - -from .._native import instruction_ids - -for name in instruction_ids.__all__: - globals()[name] = getattr(instruction_ids, name) +# Deprecated: use qdk.qre.instruction_ids instead. +from qdk.qre.instruction_ids import * # noqa: F401,F403 diff --git a/source/pip/qsharp/qre/instruction_ids.pyi b/source/pip/qsharp/qre/instruction_ids.pyi deleted file mode 100644 index 240146ec76..0000000000 --- a/source/pip/qsharp/qre/instruction_ids.pyi +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -# Paulis -PAULI_I: int -PAULI_X: int -PAULI_Y: int -PAULI_Z: int - -# Clifford gates -H: int -H_XZ: int -H_XY: int -H_YZ: int -SQRT_X: int -SQRT_X_DAG: int -SQRT_Y: int -SQRT_Y_DAG: int -S: int -SQRT_Z: int -S_DAG: int -SQRT_Z_DAG: int -CNOT: int -CX: int -CY: int -CZ: int -SWAP: int - -# State preparation -PREP_X: int -PREP_Y: int -PREP_Z: int - -# Generic Cliffords -ONE_QUBIT_CLIFFORD: int -TWO_QUBIT_CLIFFORD: int -N_QUBIT_CLIFFORD: int - -# Measurements -MEAS_X: int -MEAS_Y: int -MEAS_Z: int -MEAS_RESET_X: int -MEAS_RESET_Y: int -MEAS_RESET_Z: int -MEAS_XX: int -MEAS_YY: int -MEAS_ZZ: int -MEAS_XZ: int -MEAS_XY: int -MEAS_YZ: int - -# Non-Clifford gates -SQRT_SQRT_X: int -SQRT_SQRT_X_DAG: int -SQRT_SQRT_Y: int -SQRT_SQRT_Y_DAG: int -SQRT_SQRT_Z: int -T: int -SQRT_SQRT_Z_DAG: int -T_DAG: int -CCX: int -CCY: int -CCZ: int -CSWAP: int -AND: int -AND_DAG: int -RX: int -RY: int -RZ: int -CRX: int -CRY: int -CRZ: int -RXX: int -RYY: int -RZZ: int - -# Generic unitary gates -ONE_QUBIT_UNITARY: int -TWO_QUBIT_UNITARY: int - -# Multi-qubit Pauli measurement -MULTI_PAULI_MEAS: int - -# Some generic logical instructions -LATTICE_SURGERY: int - -# Memory/compute operations (used in compute parts of memory-compute layouts) -READ_FROM_MEMORY: int -WRITE_TO_MEMORY: int -MEMORY: int - -# Some special hardware physical instructions -CYCLIC_SHIFT: int -PHYSICAL_MOVE: int -HAND_OFF: int - -# Generic operation (for unified RE) -GENERIC: int diff --git a/source/pip/qsharp/qre/interop/__init__.py b/source/pip/qsharp/qre/interop/__init__.py deleted file mode 100644 index 52917a3a42..0000000000 --- a/source/pip/qsharp/qre/interop/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from ._cirq import ( - PeakUsageGreedyQubitManager, - PopBlock, - PushBlock, - QubitType, - ReadFromMemoryGate, - TypedQubit, - WriteToMemoryGate, - assert_qubits_type, - read_from_memory, - trace_from_cirq, - write_to_memory, -) -from ._qir import trace_from_qir -from ._qsharp import trace_from_entry_expr, trace_from_entry_expr_cached - -__all__ = [ - "trace_from_cirq", - "trace_from_entry_expr", - "trace_from_entry_expr_cached", - "trace_from_qir", - "PushBlock", - "PopBlock", - "QubitType", - "TypedQubit", - "PeakUsageGreedyQubitManager", - "ReadFromMemoryGate", - "WriteToMemoryGate", - "write_to_memory", - "read_from_memory", - "assert_qubits_type", -] diff --git a/source/pip/qsharp/qre/interop/_cirq.py b/source/pip/qsharp/qre/interop/_cirq.py deleted file mode 100644 index 85808006ee..0000000000 --- a/source/pip/qsharp/qre/interop/_cirq.py +++ /dev/null @@ -1,822 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations - -import random -from dataclasses import dataclass -from enum import Enum -from math import pi -from typing import Iterable, Iterator, Sequence, cast - -import cirq -from cirq import ( - CCXPowGate, - CCZPowGate, - ClassicallyControlledOperation, - CXPowGate, - CZPowGate, - GateOperation, - HPowGate, - MeasurementGate, - PhaseGradientGate, - ResetChannel, - SwapPowGate, - XPowGate, - YPowGate, - ZPowGate, -) - -from qsharp.qre import Block, Trace -from qsharp.qre.instruction_ids import ( - CCX, - CCZ, - CX, - CZ, - MEAS_Z, - PAULI_X, - PAULI_Y, - PAULI_Z, - READ_FROM_MEMORY, - RX, - RY, - RZ, - S_DAG, - SQRT_SQRT_X, - SQRT_SQRT_X_DAG, - SQRT_SQRT_Y, - SQRT_SQRT_Y_DAG, - SQRT_X, - SQRT_X_DAG, - SQRT_Y, - SQRT_Y_DAG, - SWAP, - T_DAG, - WRITE_TO_MEMORY, - H, - S, - T, -) - -_TOLERANCE = 1e-8 - - -def _approx_eq(a: float, b: float) -> bool: - """Check whether two floats are approximately equal.""" - return abs(a - b) <= _TOLERANCE - - -def trace_from_cirq( - circuit: cirq.CIRCUIT_LIKE, - *, - classical_control_probability: float = 0.5, - rotation_threshold: float = 1e-6, - track_memory_qubits: bool = True, -) -> Trace: - """Convert a Cirq circuit into a resource estimation Trace. - - Iterates through all moments and operations in the circuit, converting - each gate into trace operations. Gates with a ``_to_trace`` method are - converted directly; others are recursively decomposed via Cirq's - ``_decompose_with_context_`` or ``_decompose_`` protocols. - - Args: - circuit: The Cirq circuit to convert. - classical_control_probability: Probability that a classically - controlled operation is included in the trace. Defaults to 0.5. - rotation_threshold: Rotation exponents with absolute value below - this threshold are treated as identity and omitted from the - trace. This applies to single-qubit rotations (RX, RY, RZ) as - well as to the rotation components of controlled-Z - decompositions. Defaults to 1e-6. - track_memory_qubits (bool): When True, memory qubits are tracked - separately from compute qubits. When False, all qubits are treated - as compute qubits. Also, if True, read-from-memory and - write-to-memory instructions are preserved in the trace, otherwise, - they are decompsed into SWAP and RESET instructions. Defaults to - True. - - Returns: - Trace: A Trace representing an execution profile of the circuit. - """ - - if isinstance(circuit, cirq.Circuit): - # circuit is already in the expected format, so we can process it directly. - pass - elif isinstance(circuit, cirq.Gate): - circuit = cirq.Circuit(circuit.on(*cirq.LineQid.for_gate(circuit))) - else: - # circuit is OP_TREE - circuit = cirq.Circuit(circuit) - - context = _CirqTraceBuilder( - circuit, classical_control_probability, rotation_threshold, track_memory_qubits - ) - - for moment in circuit: - for op in moment.operations: - context.handle_op(op) - - return context.trace - - -class _CirqTraceBuilder: - """Builds a resource estimation ``Trace`` from a Cirq circuit. - - This class walks the operations produced by ``trace_from_cirq`` and - translates each one into trace instructions. It maintains the state - needed during the conversion: - - * A ``Trace`` instance that accumulates the result. - * A stack of ``Block`` objects so that ``PushBlock`` / ``PopBlock`` - markers can create nested repeated sections. - * A qubit-id mapping (``_QidToTraceId``) that assigns each Cirq qubit - a sequential integer index. - * A Cirq ``DecompositionContext`` for gates that need recursive - decomposition. - - Args: - circuit: The Cirq circuit being converted. - classical_control_probability: Probability that a classically - controlled operation is included in the trace. - rotation_threshold: Rotation exponents with absolute value below - this threshold are treated as identity. - """ - - def __init__( - self, - circuit: cirq.Circuit, - classical_control_probability: float, - rotation_threshold: float, - track_memory_qubits: bool = True, - ): - self._circuit = circuit - self._trace = Trace(0) - self._classical_control_probability = classical_control_probability - self._rotation_threshold = rotation_threshold - self._track_memory_qubits = track_memory_qubits - self._blocks = [self._trace.root_block()] - self._q_to_id = _QidToTraceId(circuit.all_qubits()) - self._decomp_context = cirq.DecompositionContext( - qubit_manager=PeakUsageGreedyQubitManager( - "trace_from_cirq", size=0, maximize_reuse=True - ) - ) - - def push_block(self, repetitions: int): - """Open a new repeated block with the given number of repetitions.""" - block = self.block.add_block(repetitions) - self._blocks.append(block) - - def pop_block(self): - """Close the current repeated block, returning to the parent.""" - self._blocks.pop() - - @property - def trace(self) -> Trace: - """Determine compute and memory qubits from the circuit's qubits as well - as from the qubit manager before returning the trace.""" - - qm = cast(PeakUsageGreedyQubitManager, self._decomp_context.qubit_manager) - num_memory_qubits, num_compute_qubits = 0, 0 - - for q in self._circuit.all_qubits(): - if ( - self._track_memory_qubits - and isinstance(q, TypedQubit) - and q.qubit_type == QubitType.MEMORY - ): - num_memory_qubits += 1 - else: - # Untyped qubits are considered COMPUTE by default. - num_compute_qubits += 1 - - if self._track_memory_qubits: - num_memory_qubits += qm.memory_qubit_count() - else: - num_compute_qubits += qm.memory_qubit_count() - num_compute_qubits += qm.compute_qubit_count() - - self._trace.compute_qubits = num_compute_qubits - if self._track_memory_qubits and num_memory_qubits > 0: - self._trace.memory_qubits = num_memory_qubits - - return self._trace - - @property - def block(self) -> Block: - """The innermost open block in the trace.""" - return self._blocks[-1] - - @property - def q_to_id(self) -> _QidToTraceId: - """Mapping from Cirq ``Qid`` to integer trace qubit index.""" - return self._q_to_id - - @property - def classical_control_probability(self) -> float: - """Probability used to stochastically include classically controlled - operations.""" - return self._classical_control_probability - - @property - def rotation_threshold(self) -> float: - """Rotation exponents with absolute value below this threshold are - treated as identity.""" - return self._rotation_threshold - - @property - def decomp_context(self) -> cirq.DecompositionContext: - """Cirq decomposition context shared across all recursive - decompositions.""" - return self._decomp_context - - def handle_op( - self, - op: cirq.OP_TREE | TraceGate | PushBlock | PopBlock, - ) -> None: - """Recursively convert a single operation into trace instructions. - - Supported operation forms: - - - ``TraceGate``: A raw trace instruction, added directly to the - current block. - - ``PushBlock`` / ``PopBlock``: Control block nesting with - repetitions. - - ``GateOperation``: Dispatched via ``_to_trace`` if available on - the gate, otherwise decomposed via - ``_decompose_with_context_`` or ``_decompose_``. - - ``ClassicallyControlledOperation``: Included with the probability - given by ``classical_control_probability``. - - ``list`` / iterable: Each element is handled recursively. - - Any other ``cirq.Operation``: Decomposed via - ``_decompose_with_context_``. - - Args: - op: The operation to convert. - """ - if isinstance(op, TraceGate): - qs = [ - self.q_to_id[q] - for q in ([op.qubits] if isinstance(op.qubits, cirq.Qid) else op.qubits) - ] - - if op.params is None: - self.block.add_operation(op.id, qs) - else: - self.block.add_operation( - op.id, qs, op.params if isinstance(op.params, list) else [op.params] - ) - elif isinstance(op, PushBlock): - self.push_block(op.repetitions) - elif isinstance(op, PopBlock): - self.pop_block() - elif isinstance(op, cirq.Operation): - if isinstance(op, GateOperation): - gate = op.gate - - if hasattr(gate, "_to_trace"): - for sub_op in gate._to_trace(self, op): # type: ignore - self.handle_op(sub_op) - elif hasattr(gate, "_decompose_with_context_"): - for sub_op in gate._decompose_with_context_(op.qubits, self.decomp_context): # type: ignore - self.handle_op(sub_op) - elif hasattr(gate, "_decompose_"): - # decompose the gate and handle the resulting operations recursively - for sub_op in gate._decompose_(op.qubits): # type: ignore - self.handle_op(sub_op) - else: - for sub_op in op._decompose_with_context_(self.decomp_context): # type: ignore - self.handle_op(sub_op) - elif isinstance(op, ClassicallyControlledOperation): - if random.random() < self.classical_control_probability: - self.handle_op(op.without_classical_controls()) - elif isinstance(op, cirq.CircuitOperation): - if isinstance(op.repetitions, int): - self.push_block(op.repetitions) - for sub_op in op.circuit: # type: ignore - self.handle_op(sub_op) - self.pop_block() - else: - raise ValueError( - "Only integer repetitions are supported for CircuitOperation." - ) - else: - for sub_op in op._decompose_with_context_(self.decomp_context): # type: ignore - self.handle_op(sub_op) - else: - # op is Iterable[OP_TREE] - for sub_op in op: - self.handle_op(sub_op) - - -@dataclass(frozen=True, slots=True) -class PushBlock: - """Signals the start of a repeated block in the trace. - - Args: - repetitions: Number of times the block is repeated. - """ - - repetitions: int - - -@dataclass(frozen=True, slots=True) -class PopBlock: - """Signals the end of the current repeated block in the trace.""" - - ... - - -@dataclass(frozen=True, slots=True) -class TraceGate: - """A raw trace instruction emitted during Cirq circuit conversion. - - Attributes: - id (int): The instruction ID. - qubits (list[cirq.Qid] | cirq.Qid): The target qubits. - params (list[float] | float | None): Optional gate parameters. - """ - - id: int - qubits: list[cirq.Qid] | cirq.Qid - params: list[float] | float | None = None - - -class _QidToTraceId(dict): - """Mapping from Cirq qubits to integer trace qubit indices. - - Initialized with a set of known qubits. If an unknown qubit is looked - up, it is automatically assigned the next available index. - """ - - def __init__(self, init: Iterable[cirq.Qid]): - super().__init__({q: i for i, q in enumerate(init)}) - - def __getitem__(self, key: cirq.Qid) -> int: - """ - If the key is not present, add it to the mapping with the next available id. - """ - - if key not in self: - self[key] = len(self) - return super().__getitem__(key) - - -def h_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): - """Convert an HPowGate into trace instructions.""" - if _approx_eq(abs(self.exponent), 1): - yield TraceGate(H, [op.qubits[0]]) - else: - yield from op._decompose_with_context_(context.decomp_context) # type: ignore - - -def x_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): - """Convert an XPowGate into trace instructions.""" - q = [op.qubits[0]] - exp = self.exponent - if _approx_eq(exp, 1) or _approx_eq(exp, -1): - yield TraceGate(PAULI_X, q) - elif _approx_eq(exp, 0.5): - yield TraceGate(SQRT_X, q) - elif _approx_eq(exp, -0.5): - yield TraceGate(SQRT_X_DAG, q) - elif _approx_eq(exp, 0.25): - yield TraceGate(SQRT_SQRT_X, q) - elif _approx_eq(exp, -0.25): - yield TraceGate(SQRT_SQRT_X_DAG, q) - else: - if abs(exp) >= context.rotation_threshold: - yield TraceGate(RX, q, exp * pi) - - -def y_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): - """Convert a YPowGate into trace instructions.""" - q = [op.qubits[0]] - exp = self.exponent - if _approx_eq(exp, 1) or _approx_eq(exp, -1): - yield TraceGate(PAULI_Y, q) - elif _approx_eq(exp, 0.5): - yield TraceGate(SQRT_Y, q) - elif _approx_eq(exp, -0.5): - yield TraceGate(SQRT_Y_DAG, q) - elif _approx_eq(exp, 0.25): - yield TraceGate(SQRT_SQRT_Y, q) - elif _approx_eq(exp, -0.25): - yield TraceGate(SQRT_SQRT_Y_DAG, q) - else: - if abs(exp) >= context.rotation_threshold: - yield TraceGate(RY, q, exp * pi) - - -def z_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): - """Convert a ZPowGate into trace instructions.""" - q = [op.qubits[0]] - exp = self.exponent - if _approx_eq(exp, 1) or _approx_eq(exp, -1): - yield TraceGate(PAULI_Z, q) - elif _approx_eq(exp, 0.5): - yield TraceGate(S, q) - elif _approx_eq(exp, -0.5): - yield TraceGate(S_DAG, q) - elif _approx_eq(exp, 0.25): - yield TraceGate(T, q) - elif _approx_eq(exp, -0.25): - yield TraceGate(T_DAG, q) - else: - if abs(exp) >= context.rotation_threshold: - yield TraceGate(RZ, q, exp * pi) - - -def cx_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): - """Convert a CXPowGate into trace instructions.""" - if _approx_eq(abs(self.exponent), 1): - yield TraceGate(CX, [op.qubits[0], op.qubits[1]]) - else: - yield from op._decompose_with_context_(context.decomp_context) # type: ignore - - -def cz_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): - """Convert a CZPowGate into trace instructions.""" - exp = self.exponent - c, t = op.qubits[0], op.qubits[1] - if _approx_eq(abs(exp), 1): - yield TraceGate(CZ, [c, t]) - elif _approx_eq(exp, 0.5): - # controlled S gate - yield TraceGate(T, [c]) - yield TraceGate(T, [t]) - yield TraceGate(CZ, [c, t]) - yield TraceGate(T_DAG, [t]) - yield TraceGate(CZ, [c, t]) - elif _approx_eq(exp, -0.5): - # controlled S† gate - yield TraceGate(T_DAG, [c]) - yield TraceGate(T_DAG, [t]) - yield TraceGate(CZ, [c, t]) - yield TraceGate(T, [t]) - yield TraceGate(CZ, [c, t]) - else: - half_exp = exp / 2 - if abs(half_exp) >= context.rotation_threshold: - rads = half_exp * pi - yield TraceGate(RZ, [c], [rads]) - yield TraceGate(RZ, [t], [rads]) - yield TraceGate(CZ, [c, t]) - yield TraceGate(RZ, [t], [-rads]) - yield TraceGate(CZ, [c, t]) - - -def swap_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): - """Convert a SwapPowGate into trace instructions.""" - if _approx_eq(abs(self.exponent), 1): - yield TraceGate(SWAP, [op.qubits[0], op.qubits[1]]) - else: - yield from op._decompose_with_context_(context.decomp_context) # type: ignore - - -def ccx_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): - """Convert a CCXPowGate into trace instructions.""" - if _approx_eq(abs(self.exponent), 1): - yield TraceGate(CCX, [op.qubits[0], op.qubits[1], op.qubits[2]]) - else: - yield from op._decompose_with_context_(context.decomp_context) # type: ignore - - -def ccz_pow_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): - """Convert a CCZPowGate into trace instructions.""" - if _approx_eq(abs(self.exponent), 1): - yield TraceGate(CCZ, [op.qubits[0], op.qubits[1], op.qubits[2]]) - else: - yield from op._decompose_with_context_(context.decomp_context) # type: ignore - - -def measurement_gate_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): - """Convert a MeasurementGate into trace instructions.""" - for q in op.qubits: - yield TraceGate(MEAS_Z, [q]) - - -def reset_channel_to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation): - """Convert a ResetChannel into trace instructions (no-op).""" - yield from () - - -# Attach _to_trace methods to Cirq gate classes so that handle_op can -# convert them directly into trace instructions without decomposition. -HPowGate._to_trace = h_pow_gate_to_trace -XPowGate._to_trace = x_pow_gate_to_trace -YPowGate._to_trace = y_pow_gate_to_trace -ZPowGate._to_trace = z_pow_gate_to_trace -CXPowGate._to_trace = cx_pow_gate_to_trace -CZPowGate._to_trace = cz_pow_gate_to_trace -SwapPowGate._to_trace = swap_pow_gate_to_trace -CCXPowGate._to_trace = ccx_pow_gate_to_trace -CCZPowGate._to_trace = ccz_pow_gate_to_trace -MeasurementGate._to_trace = measurement_gate_to_trace -ResetChannel._to_trace = reset_channel_to_trace - -# Decomposition overrides - - -def phase_gradient_decompose(self, qubits): - """Override PhaseGradientGate._decompose_ to skip rotations with very small angles. - - The original implementation may lead to floating-point overflows for - large values of i. - """ - - for i, q in enumerate(qubits): - exp = self.exponent / 2**i - if abs(exp) < 1e-6: - break - yield cirq.Z(q) ** exp - - -PhaseGradientGate._decompose_ = phase_gradient_decompose - - -class QubitType(Enum): - """Qubit type. - - Each logical qubit can be either a compute or memory qubit. Compute qubits - can be used normally. - - Memory qubits have a restriction that gates cannot be applied to them. The - only allowed operations on memory qubits are reads/writes, where state is - moved from memory to compute gate or from compute to memory gate. - - We assume that when error correction is applied, memory qubits are encoded - with a more efficient error correction scheme requiring less resources, but - not allowing gate application (e.g. Yoked surface codes, - https://arxiv.org/abs/2312.04522). - """ - - COMPUTE = 1 - MEMORY = 2 - - -class TypedQubit(cirq.Qid): - """Qubit with type.""" - - def __init__( - self, - qubit: cirq.Qid, - qubit_type: QubitType, - ): - """Initializes typed qubit.""" - self._qubit = qubit - self.qubit_type = qubit_type - - def _comparison_key(self) -> object: - """Comparison key.""" - return self._qubit._comparison_key() - - @property - def dimension(self) -> int: - """Dimension.""" - return cast("int", self._qubit.dimension) - - def __repr__(self) -> str: - """String representation of the qubit.""" - return repr(self._qubit) - - -def _as_typed_qubit(q: cirq.Qid) -> TypedQubit: - """Converts qubit to TypedQubit.""" - assert isinstance(q, TypedQubit) - return q - - -def assert_qubits_type(qs: Sequence[cirq.Qid], qubit_type: QubitType) -> None: - """Asserts that qubits have specified type, but only if they are TypedQubits.""" - if len(qs) == 0 or not isinstance(qs[0], TypedQubit): - return - - for q in qs: - actual_type = _as_typed_qubit(q).qubit_type - assert ( - actual_type == qubit_type - ), f"{q} expected to be {qubit_type}, was {actual_type}." - - -class _TypedQubitManager(cirq.GreedyQubitManager): - """Qubit manager managing qubits of specified type. - - All allocated qubits will have specified type. - Tracks current and peak number of qubits. - """ - - def __init__( - self, prefix: str, qubit_type: QubitType, *, size: int, maximize_reuse: bool - ): - """Initialize the manager.""" - prefix = prefix + "_" + qubit_type.name[0] - super().__init__(prefix, size=size, maximize_reuse=maximize_reuse) - self.qubit_type = qubit_type - self.current_in_use = 0 - self.peak_in_use = 0 - - def _allocate_qid(self, name: str, dim: int) -> cirq.Qid: - """Allocates single qubit.""" - return TypedQubit(super()._allocate_qid(name, dim), self.qubit_type) - - def qalloc(self, n: int, dim: int) -> list[cirq.Qid]: - """Allocate ``n`` qubits and update the usage counters.""" - qs = super().qalloc(n, dim) - self.current_in_use += len(qs) - self.peak_in_use = max(self.peak_in_use, self.current_in_use) - return cast("list[cirq.Qid]", qs) - - def qfree(self, qubits: Iterable[cirq.Qid]) -> None: - """Free the given qubits and update the usage counters.""" - super().qfree(qubits) - self.current_in_use -= len(set(qubits)) - - -class PeakUsageGreedyQubitManager(cirq.QubitManager): - """A qubit manager tracking compute and memory qubits separately. - - It consists of two independent qubit managers for each qubit type. Each manager - uses greedy allocation strategy from ``cirq.GreedyQubitManager``. - - Qubits of one type, after freed, cannot be reused as qubits of different type. - Therefore, peak qubit count is equal to sum of peak qubit counts for each type. - """ - - def __init__(self, prefix: str, *, size: int, maximize_reuse: bool): - """Initialize the PeakUsageGreedyQubitManager. - - Args: - prefix: Naming prefix for allocated qubits. - size: Initial pool size passed through to ``cirq.GreedyQubitManager``. - Example: 0. - maximize_reuse: Flag to control qubit reuse strategy. If ``False``, this - mode uses a FIFO (First in First out) strategy s.t. next allocated qubit - is one which was freed the earliest. If ``True``, this mode uses a LIFO - (Last in First out) strategy s.t. the next allocated qubit is one which - was freed the latest. - - """ - self.typed_managers = { - qubit_type: _TypedQubitManager( - prefix, qubit_type, size=size, maximize_reuse=maximize_reuse - ) - for qubit_type in QubitType - } - - def qalloc( - self, n: int, dim: int, qubit_type: QubitType = QubitType.COMPUTE - ) -> list[cirq.Qid]: - """Allocate ``n`` qubits and update the usage counters. - - Args: - n: Number of qubits to allocate. - dim: Dimension of each qubit. Example: 2 for qubits. - qubit_type: Type of qubits (COMPUTE or MEMORY). - - Returns: - List of allocated qubits. - - """ - return self.typed_managers[qubit_type].qalloc(n, dim) - - def qborrow(self, n: int, dim: int = 2) -> list[cirq.Qid]: - """Borrow qubits (not supported).""" - raise NotImplementedError("qborrow is not supported.") - - def qfree(self, qubits: Iterable[cirq.Qid]) -> None: - """Free the given qubits.""" - qubits_by_type: dict[QubitType, list[cirq.Qid]] = {t: [] for t in QubitType} - for q in qubits: - qubits_by_type[_as_typed_qubit(q).qubit_type].append(q) - for qubit_type, qs in qubits_by_type.items(): - if len(qs) > 0: - self.typed_managers[qubit_type].qfree(qs) - - def current_in_use(self) -> int: - """Number of qubits currently in use.""" - return sum(qm.current_in_use for qm in self.typed_managers.values()) - - def qubit_count(self) -> int: - """Returns the peak number of qubits of all types. - - It is equal to sum of peak counts for each type, because qubits of one type - cannot be reused as qubits of a different type. - """ - return self.compute_qubit_count() + self.memory_qubit_count() - - def compute_qubit_count(self) -> int: - """Returns the peak number of simultaneously in-use COMPUTE qubits.""" - return self.typed_managers[QubitType.COMPUTE].peak_in_use - - def memory_qubit_count(self) -> int: - """Returns the peak number of simultaneously in-use MEMORY qubits.""" - return self.typed_managers[QubitType.MEMORY].peak_in_use - - -class ReadFromMemoryGate(cirq.Gate): - """Moves qubit states from MEMORY register to COMPUTE register. - - Assumes COMPUTE qubits are prepared in 0 state. Leaves MEMORY qubits in 0 state. - """ - - def __init__(self, n: int): - """Initializes ReadFromMemoryGate.""" - self.n = n - - def _num_qubits_(self) -> int: - """Number of qubits passed in to this gate.""" - return 2 * self.n - - def _decompose_(self, qubits: Sequence[cirq.Qid]) -> Iterator[cirq.Operation]: - """Decomposes this gate into equivalent SWAP gates.""" - comp_qs, mem_qs = self._get_qubits(qubits) - for i in range(self.n): - yield cirq.reset(comp_qs[i]) - yield cirq.SWAP(mem_qs[i], comp_qs[i]) - - def _to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation, **_kwargs): - """Convert this gate into trace instructions.""" - if context._track_memory_qubits: - comp_qs, mem_qs = self._get_qubits(op.qubits) - for i in range(self.n): - yield TraceGate(READ_FROM_MEMORY, [mem_qs[i], comp_qs[i]]) - else: - yield from self._decompose_(op.qubits) - - def _get_qubits( - self, qubits: Sequence[cirq.Qid] - ) -> tuple[Sequence[cirq.Qid], Sequence[cirq.Qid]]: - """Get qubits for this gate partitioned into compute and memory - qubits.""" - assert len(qubits) == 2 * self.n - mem_qs = qubits[0 : self.n] - comp_qs = qubits[self.n : 2 * self.n] - assert_qubits_type(mem_qs, QubitType.MEMORY) - assert_qubits_type(comp_qs, QubitType.COMPUTE) - return comp_qs, mem_qs - - -class WriteToMemoryGate(cirq.Gate): - """Moves qubit states from COMPUTE register to MEMORY register. - - Assumes MEMORY qubits are prepared in 0 state. Leaves COMPUTE qubits in 0 state. - """ - - def __init__(self, n: int): - """Initializes WriteToMemoryGate.""" - self.n = n - - def _num_qubits_(self) -> int: - """Number of qubits passed in to this gate.""" - return 2 * self.n - - def _decompose_(self, qubits: Sequence[cirq.Qid]) -> Iterator[cirq.Operation]: - """Decomposes this gate into equivalent SWAP gates.""" - comp_qs, mem_qs = self._get_qubits(qubits) - for i in range(self.n): - yield cirq.reset(mem_qs[i]) - yield cirq.SWAP(mem_qs[i], comp_qs[i]) - - def _to_trace(self, context: _CirqTraceBuilder, op: cirq.Operation, **_kwargs): - """Convert this gate into trace instructions.""" - if context._track_memory_qubits: - comp_qs, mem_qs = self._get_qubits(op.qubits) - for i in range(self.n): - yield TraceGate(WRITE_TO_MEMORY, [comp_qs[i], mem_qs[i]]) - else: - yield from self._decompose_(op.qubits) - - def _get_qubits( - self, qubits: Sequence[cirq.Qid] - ) -> tuple[Sequence[cirq.Qid], Sequence[cirq.Qid]]: - assert len(qubits) == 2 * self.n - mem_qs = qubits[0 : self.n] - comp_qs = qubits[self.n : 2 * self.n] - assert_qubits_type(mem_qs, QubitType.MEMORY) - assert_qubits_type(comp_qs, QubitType.COMPUTE) - - return comp_qs, mem_qs - - -def write_to_memory( - memory_qubits: Sequence[cirq.Qid], compute_qubits: Sequence[cirq.Qid] -) -> cirq.Operation: - """Operation to write qubits to memory.""" - assert_qubits_type(memory_qubits, QubitType.MEMORY) - assert_qubits_type(compute_qubits, QubitType.COMPUTE) - n = len(memory_qubits) - assert n == len(compute_qubits) - return WriteToMemoryGate(n).on(*memory_qubits, *compute_qubits) - - -def read_from_memory( - memory_qubits: Sequence[cirq.Qid], compute_qubits: Sequence[cirq.Qid] -) -> cirq.Operation: - """Operation to read qubits from memory.""" - assert_qubits_type(memory_qubits, QubitType.MEMORY) - assert_qubits_type(compute_qubits, QubitType.COMPUTE) - n = len(memory_qubits) - assert n == len(compute_qubits) - return ReadFromMemoryGate(n).on(*memory_qubits, *compute_qubits) diff --git a/source/pip/qsharp/qre/interop/_qir.py b/source/pip/qsharp/qre/interop/_qir.py deleted file mode 100644 index ebfb9559d1..0000000000 --- a/source/pip/qsharp/qre/interop/_qir.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations - -import pyqir - -from ..._native import QirInstructionId -from ..._simulation import AggregateGatesPass -from .. import instruction_ids as ids -from .._qre import Trace - -# Maps QirInstructionId to (instruction_id, arity) where arity is: -# 1 = single-qubit gate: tuple is (op, qubit) -# 2 = two-qubit gate: tuple is (op, qubit1, qubit2) -# 3 = three-qubit gate: tuple is (op, qubit1, qubit2, qubit3) -# -1 = single-qubit rotation: tuple is (op, angle, qubit) -# -2 = two-qubit rotation: tuple is (op, angle, qubit1, qubit2) -_GATE_MAP: list[tuple[QirInstructionId, int, int]] = [ - # Single-qubit gates - (QirInstructionId.I, ids.PAULI_I, 1), - (QirInstructionId.H, ids.H, 1), - (QirInstructionId.X, ids.PAULI_X, 1), - (QirInstructionId.Y, ids.PAULI_Y, 1), - (QirInstructionId.Z, ids.PAULI_Z, 1), - (QirInstructionId.S, ids.S, 1), - (QirInstructionId.SAdj, ids.S_DAG, 1), - (QirInstructionId.SX, ids.SQRT_X, 1), - (QirInstructionId.SXAdj, ids.SQRT_X_DAG, 1), - (QirInstructionId.T, ids.T, 1), - (QirInstructionId.TAdj, ids.T_DAG, 1), - # Two-qubit gates - (QirInstructionId.CNOT, ids.CNOT, 2), - (QirInstructionId.CX, ids.CX, 2), - (QirInstructionId.CY, ids.CY, 2), - (QirInstructionId.CZ, ids.CZ, 2), - (QirInstructionId.SWAP, ids.SWAP, 2), - # Three-qubit gates - (QirInstructionId.CCX, ids.CCX, 3), - # Single-qubit rotations (op, angle, qubit) - (QirInstructionId.RX, ids.RX, -1), - (QirInstructionId.RY, ids.RY, -1), - (QirInstructionId.RZ, ids.RZ, -1), - # Two-qubit rotations (op, angle, qubit1, qubit2) - (QirInstructionId.RXX, ids.RXX, -2), - (QirInstructionId.RYY, ids.RYY, -2), - (QirInstructionId.RZZ, ids.RZZ, -2), -] - -_MEAS_MAP: list[tuple[QirInstructionId, int]] = [ - (QirInstructionId.M, ids.MEAS_Z), - (QirInstructionId.MZ, ids.MEAS_Z), - (QirInstructionId.MResetZ, ids.MEAS_RESET_Z), -] - -_SKIP = ( - # Resets qubit to |0⟩ without measuring; we do not currently account for - # that in resource estimation - QirInstructionId.RESET, - # Runtime qubit state transfer; an implementation detail, not a logical operation - QirInstructionId.Move, - # Reads a measurement result from classical memory; purely classical I/O - QirInstructionId.ReadResult, - # The following are classical output recording operations that do not represent - # quantum operations and have no impact on resource estimation. - QirInstructionId.ResultRecordOutput, - QirInstructionId.BoolRecordOutput, - QirInstructionId.IntRecordOutput, - QirInstructionId.DoubleRecordOutput, - QirInstructionId.TupleRecordOutput, - QirInstructionId.ArrayRecordOutput, -) - - -def trace_from_qir(input: str | bytes) -> Trace: - """Convert a QIR program into a resource-estimation Trace. - - Parses the QIR module, extracts quantum gates, and builds a Trace that - can be used for resource estimation. Conditional branches are resolved - by always following the false path (assuming measurement results are Zero). - - Args: - input: QIR input as LLVM IR text (str) or bitcode (bytes). - - Returns: - A Trace containing the quantum operations from the QIR program. - """ - context = pyqir.Context() - - if isinstance(input, str): - mod = pyqir.Module.from_ir(context, input) - else: - mod = pyqir.Module.from_bitcode(context, input) - - gates, num_qubits, _ = AggregateGatesPass().run(mod) - - trace = Trace(compute_qubits=num_qubits) - - for gate in gates: - # NOTE: AggregateGatesPass does not return QirInstruction objects - assert isinstance(gate, tuple) - _add_gate(trace, gate) - - return trace - - -def _add_gate(trace: Trace, gate: tuple) -> None: - """Add a single QIR gate tuple to the trace.""" - op = gate[0] - - for qir_id, instr_id, arity in _GATE_MAP: - if op == qir_id: - if arity == 1: - trace.add_operation(instr_id, [gate[1]]) - elif arity == 2: - trace.add_operation(instr_id, [gate[1], gate[2]]) - elif arity == 3: - trace.add_operation(instr_id, [gate[1], gate[2], gate[3]]) - elif arity == -1: - trace.add_operation(instr_id, [gate[2]], [gate[1]]) - elif arity == -2: - trace.add_operation(instr_id, [gate[2], gate[3]], [gate[1]]) - return - - for qir_id, instr_id in _MEAS_MAP: - if op == qir_id: - trace.add_operation(instr_id, [gate[1]]) - return - - for skip_id in _SKIP: - if op == skip_id: - return - - # The only unhandled QirInstructionId is CorrelatedNoise - assert op == QirInstructionId.CorrelatedNoise, f"Unexpected QIR instruction: {op}" - raise NotImplementedError(f"Unsupported QIR instruction: {op}") diff --git a/source/pip/qsharp/qre/interop/_qsharp.py b/source/pip/qsharp/qre/interop/_qsharp.py deleted file mode 100644 index 83c2cee60d..0000000000 --- a/source/pip/qsharp/qre/interop/_qsharp.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations - -from pathlib import Path -import time -from typing import Callable, Optional - -from ..._qsharp import logical_counts -from ...estimator import LogicalCounts -from .._qre import Trace -from ..instruction_ids import CCX, MEAS_Z, RZ, T, READ_FROM_MEMORY, WRITE_TO_MEMORY -from ..property_keys import ( - EVALUATION_TIME, - ALGORITHM_COMPUTE_QUBITS, - ALGORITHM_MEMORY_QUBITS, -) - - -def _bucketize_rotation_counts( - rotation_count: int, rotation_depth: int -) -> list[tuple[int, int]]: - """ - Return a list of (count, depth) pairs representing the rotation layers in - the trace. - - The following properties hold for the returned list ``result``: - - sum(depth for _, depth in result) == rotation_depth - - sum(count * depth for count, depth in result) == rotation_count - - count > 0 for each (count, _) in result - - count <= qubit_count for each (count, _) in result holds by definition - when rotation_count <= rotation_depth * qubit_count - - Args: - rotation_count: Total number of rotations. - rotation_depth: Total depth of the rotation layers. - - Returns: - A list of (count, depth) pairs, where 'count' is the number of - rotations in a layer and 'depth' is the depth of that layer. - """ - if rotation_depth == 0: - return [] - - base = rotation_count // rotation_depth - extra = rotation_count % rotation_depth - - result: list[tuple[int, int]] = [] - if extra > 0: - result.append((base + 1, extra)) - if rotation_depth - extra > 0: - result.append((base, rotation_depth - extra)) - return result - - -def trace_from_entry_expr(entry_expr: str | Callable | LogicalCounts, *args) -> Trace: - """Convert a Q# entry expression into a resource-estimation Trace. - - Evaluates the entry expression to obtain logical counts, then builds - a trace containing the corresponding quantum operations. - - Args: - entry_expr (str | Callable | LogicalCounts): A Q# entry expression - string, a callable, or pre-computed logical counts. - *args: The arguments to pass to the callable, if one is provided. - - Returns: - Trace: A trace representing the resource profile of the program. - """ - - start = time.time_ns() - counts = ( - logical_counts(entry_expr, *args) - if not isinstance(entry_expr, LogicalCounts) - else entry_expr - ) - evaluation_time = time.time_ns() - start - - ccx_count = counts.get("cczCount", 0) + counts.get("ccixCount", 0) - - # Q# logical counts report total number of qubits (compute + memory) - num_qubits = counts.get("numQubits", 0) - # Compute qubits may be reported separately - compute_qubits = counts.get("numComputeQubits", num_qubits) - memory_qubits = num_qubits - compute_qubits - - trace = Trace(compute_qubits) - - rotation_count = counts.get("rotationCount", 0) - rotation_depth = counts.get("rotationDepth", rotation_count) - - if rotation_count != 0 and rotation_depth != 0: - for count, depth in _bucketize_rotation_counts(rotation_count, rotation_depth): - block = trace.add_block(repetitions=depth) - for i in range(count): - block.add_operation(RZ, [i]) - - if t_count := counts.get("tCount", 0): - block = trace.add_block(repetitions=t_count) - block.add_operation(T, [0]) - - if ccx_count: - block = trace.add_block(repetitions=ccx_count) - block.add_operation(CCX, [0, 1, 2]) - - if meas_count := counts.get("measurementCount", 0): - block = trace.add_block(repetitions=meas_count) - block.add_operation(MEAS_Z, [0]) - - if memory_qubits != 0: - trace.memory_qubits = memory_qubits - - if rfm_count := counts.get("readFromMemoryCount", 0): - block = trace.add_block(repetitions=rfm_count) - block.add_operation(READ_FROM_MEMORY, [0, compute_qubits]) - - if wtm_count := counts.get("writeToMemoryCount", 0): - block = trace.add_block(repetitions=wtm_count) - block.add_operation(WRITE_TO_MEMORY, [0, compute_qubits]) - - trace.set_property(EVALUATION_TIME, evaluation_time) - trace.set_property(ALGORITHM_COMPUTE_QUBITS, compute_qubits) - trace.set_property(ALGORITHM_MEMORY_QUBITS, memory_qubits) - return trace - - -def trace_from_entry_expr_cached( - entry_expr: str | Callable | LogicalCounts, cache_path: Optional[Path], *args -) -> Trace: - """Convert a Q# entry expression into a Trace, with optional caching. - - If *cache_path* is provided and exists, the trace is loaded from disk. - Otherwise, the trace is computed via ``trace_from_entry_expr`` and - optionally written to *cache_path*. - - Args: - entry_expr (str | Callable | LogicalCounts): A Q# entry expression - string, a callable, or pre-computed logical counts. - cache_path (Optional[Path]): Path for reading/writing the cached - trace. If None, caching is disabled. - - Returns: - Trace: A trace representing the resource profile of the program. - """ - if cache_path and cache_path.exists(): - return Trace.from_json(cache_path.read_text()) - - trace = trace_from_entry_expr(entry_expr, *args) - - if cache_path: - cache_path.parent.mkdir(parents=True, exist_ok=True) - cache_path.write_text(trace.to_json()) - - return trace diff --git a/source/pip/qsharp/qre/models/__init__.py b/source/pip/qsharp/qre/models/__init__.py deleted file mode 100644 index 3da76797ac..0000000000 --- a/source/pip/qsharp/qre/models/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from .factories import Litinski19Factory, MagicUpToClifford, RoundBasedFactory -from .qec import ( - SurfaceCode, - ThreeAux, - OneDimensionalYokedSurfaceCode, - TwoDimensionalYokedSurfaceCode, -) -from .qubits import GateBased, Majorana - -__all__ = [ - "GateBased", - "Litinski19Factory", - "Majorana", - "MagicUpToClifford", - "RoundBasedFactory", - "SurfaceCode", - "ThreeAux", - "OneDimensionalYokedSurfaceCode", - "TwoDimensionalYokedSurfaceCode", -] diff --git a/source/pip/qsharp/qre/models/factories/__init__.py b/source/pip/qsharp/qre/models/factories/__init__.py deleted file mode 100644 index e652dfc983..0000000000 --- a/source/pip/qsharp/qre/models/factories/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from ._litinski import Litinski19Factory -from ._round_based import RoundBasedFactory -from ._utils import MagicUpToClifford - -__all__ = ["Litinski19Factory", "MagicUpToClifford", "RoundBasedFactory"] diff --git a/source/pip/qsharp/qre/models/factories/_litinski.py b/source/pip/qsharp/qre/models/factories/_litinski.py deleted file mode 100644 index ffe4b2558d..0000000000 --- a/source/pip/qsharp/qre/models/factories/_litinski.py +++ /dev/null @@ -1,395 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations - -from dataclasses import dataclass -from math import ceil -from typing import Generator - -from ..._architecture import ISAContext -from ..._qre import ISARequirements, ConstraintBound, ISA -from ..._instruction import ISATransform, constraint, LOGICAL -from ...instruction_ids import T, CNOT, H, MEAS_Z, CCZ - - -@dataclass -class Litinski19Factory(ISATransform): - """ - T and CCZ factories based on the paper - [arXiv:1905.06903](https://arxiv.org/abs/1905.06903). - - It contains two categories of estimates. If the input T error rate is - similar to the Clifford error, it produces magic state instructions based on - Table 1 in the paper. If the input T error rate is at most 10 times higher - than the Clifford error rate, it produces magic state instructions based on - Table 2 in the paper. - - It requires Clifford error rates of at most 0.1% for CNOT, H, and MEAS_Z - instructions. If these instructions have different error rates, the maximum - error rate is assumed. - - References: - - - Daniel Litinski: Magic state distillation: not as costly as you think, - [arXiv:1905.06903](https://arxiv.org/abs/1905.06903) - """ - - def __post_init__(self): - self._initialize_entries() - - @staticmethod - def required_isa() -> ISARequirements: - return ISARequirements( - # T error rate may be at least 10x higher than Clifford error rates - constraint(T, error_rate=ConstraintBound.le(1e-2)), - constraint(H, error_rate=ConstraintBound.le(1e-3)), - constraint(CNOT, arity=2, error_rate=ConstraintBound.le(1e-3)), - constraint(MEAS_Z, error_rate=ConstraintBound.le(1e-3)), - ) - - def provided_isa( - self, impl_isa: ISA, ctx: ISAContext - ) -> Generator[ISA, None, None]: - """Yield ISAs with T and CCZ factory instructions. - - Args: - impl_isa (ISA): The implementation ISA providing physical gates. - ctx (ISAContext): The enumeration context. - - Yields: - ISA: An ISA containing distilled T and/or CCZ instructions. - """ - h = impl_isa[H] - cnot = impl_isa[CNOT] - meas_z = impl_isa[MEAS_Z] - t = impl_isa[T] - - clifford_error_rate = max( - h.expect_error_rate(), - cnot.expect_error_rate(), - meas_z.expect_error_rate(), - ) - - t_error_rate = t.expect_error_rate() - - entries_by_state = None - - if clifford_error_rate <= 1e-4: - if t_error_rate <= 1e-4: - entries_by_state = self._entries[1e-4][0] - elif t_error_rate <= 1e-3: - entries_by_state = self._entries[1e-4][1] - else: - # NOTE: This assertion is valid due to the constraint bound in the - # required_isa method - assert clifford_error_rate <= 1e-3 - if t_error_rate <= 1e-3: - entries_by_state = self._entries[1e-3][0] - elif t_error_rate <= 1e-2: - entries_by_state = self._entries[1e-3][1] - - if entries_by_state is None: - return - - t_entries = entries_by_state.get(T, []) - ccz_entries = entries_by_state.get(CCZ, []) - - syndrome_extraction_time = ( - 4 * impl_isa[CNOT].expect_time() - + impl_isa[H].expect_time() - + impl_isa[MEAS_Z].expect_time() - ) - - def make_node(entry: _Entry) -> int: - # Convert cycles (number of syndrome extraction cycles) to time - # based on fast surface code - time = ceil(syndrome_extraction_time * entry.cycles) - - # NOTE: If the protocol outputs multiple states, we assume that the - # space cost is divided by the number of output states. This is a - # simplification that allows us to fit all protocols in the ISA, but - # it may not be accurate for all protocols. - return ctx.add_instruction( - entry.state, - arity=3 if entry.state == CCZ else 1, - encoding=LOGICAL, - space=ceil(entry.space / entry.output_states), - time=time, - error_rate=entry.error_rate, - transform=self, - source=[cnot, h, meas_z, t], - ) - - # Yield combinations of T and CCZ entries - if ccz_entries: - for t_entry in t_entries: - for ccz_entry in ccz_entries: - yield ctx.make_isa( - make_node(t_entry), - make_node(ccz_entry), - ) - else: - # Table 2 scenarios: only T gates available - for t_entry in t_entries: - yield ctx.make_isa(make_node(t_entry)) - - def _initialize_entries(self): - """Initialize the distillation protocol lookup tables.""" - self._entries = { - # Assuming a Clifford error rate of at most 1e-4: - 1e-4: ( - # Assuming a T error rate of at most 1e-4 (Table 1): - { - T: [ - _Entry(_Protocol(15, 1, 7, 3, 3), 4.4e-8, 810, 18.1), - _Entry(_Protocol(15, 1, 9, 3, 3), 9.3e-10, 1_150, 18.1), - _Entry(_Protocol(15, 1, 11, 5, 5), 1.9e-11, 2_070, 30.0), - _Entry( - [ - (_Protocol(15, 1, 9, 3, 3), 4), - (_Protocol(20, 4, 15, 7, 9), 1), - ], - 2.4e-15, - 16_400, - 90.3, - ), - _Entry( - [ - (_Protocol(15, 1, 9, 3, 3), 4), - (_Protocol(15, 1, 25, 9, 9), 1), - ], - 6.3e-25, - 18_600, - 67.8, - ), - _Entry(_Protocol(15, 1, 9, 3, 3), 1.5e-9, 762, 36.2), - ], - CCZ: [ - _Entry( - [ - (_Protocol(15, 1, 7, 3, 3), 4), - (_Protocol(8, 1, 15, 7, 9, CCZ), 1), - ], - 7.2e-14, - 12_400, - 36.1, - ), - ], - }, - # Assuming a T error rate of at most 1e-3 (10x higher than Clifford, Table 2): - { - T: [ - _Entry(_Protocol(15, 1, 9, 3, 3), 2.1e-8, 1_150, 18.2), - _Entry( - [ - (_Protocol(15, 1, 7, 3, 3), 6), - (_Protocol(20, 4, 13, 5, 7), 1), - ], - 1.4e-12, - 13_200, - 70, - ), - _Entry( - [ - (_Protocol(15, 1, 9, 3, 3), 4), - (_Protocol(20, 4, 15, 7, 9), 1), - ], - 6.6e-15, - 16_400, - 91.2, - ), - _Entry( - [ - (_Protocol(15, 1, 9, 3, 3), 4), - (_Protocol(15, 1, 25, 9, 9), 1), - ], - 4.2e-22, - 18_600, - 68.4, - ), - ], - CCZ: [], - }, - ), - # Assuming a Clifford error rate of at most 1e-3: - 1e-3: ( - # Assuming a T error rate of at most 1e-3 (Table 1): - { - T: [ - _Entry(_Protocol(15, 1, 17, 7, 7), 4.5e-8, 4_620, 42.6), - _Entry( - [ - (_Protocol(15, 1, 13, 5, 5), 6), - (_Protocol(20, 4, 23, 11, 13), 1), - ], - 1.4e-10, - 43_300, - 130, - ), - _Entry( - [ - (_Protocol(15, 1, 13, 5, 5), 4), - (_Protocol(20, 4, 27, 13, 15), 1), - ], - 2.6e-11, - 46_800, - 157, - ), - _Entry( - [ - (_Protocol(15, 1, 11, 5, 5), 6), - (_Protocol(15, 1, 25, 11, 11), 1), - ], - 2.7e-12, - 30_700, - 82.5, - ), - _Entry( - [ - (_Protocol(15, 1, 13, 5, 5), 6), - (_Protocol(15, 1, 29, 11, 13), 1), - ], - 3.3e-14, - 39_100, - 97.5, - ), - _Entry( - [ - (_Protocol(15, 1, 15, 7, 7), 6), - (_Protocol(15, 1, 41, 17, 17), 1), - ], - 4.5e-20, - 73_400, - 128, - ), - ], - CCZ: [ - _Entry( - [ - (_Protocol(15, 1, 13, 7, 7), 6), - (_Protocol(8, 1, 25, 15, 15, CCZ), 1), - ], - 5.2e-11, - 47_000, - 60, - ), - ], - }, - # Assuming a T error rate of at most 1e-2 (10x higher than Clifford, Table 2): - { - T: [ - _Entry( - [ - (_Protocol(15, 1, 13, 5, 5), 6), - (_Protocol(20, 4, 21, 11, 13), 1), - ], - 5.7e-9, - 40_700, - 130, - ), - _Entry( - [ - (_Protocol(15, 1, 11, 5, 5), 6), - (_Protocol(15, 1, 21, 9, 11), 1), - ], - 2.1e-10, - 27_400, - 85.7, - ), - _Entry( - [ - (_Protocol(15, 1, 11, 5, 5), 6), - (_Protocol(15, 1, 23, 11, 11), 1), - ], - 2.5e-11, - 29_500, - 85.7, - ), - _Entry( - [ - (_Protocol(15, 1, 11, 5, 5), 6), - (_Protocol(15, 1, 25, 11, 11), 1), - ], - 6.4e-12, - 30_700, - 85.7, - ), - _Entry( - [ - (_Protocol(15, 1, 13, 7, 7), 8), - (_Protocol(15, 1, 29, 13, 13), 1), - ], - 1.5e-13, - 52_400, - 97.5, - ), - ], - CCZ: [], - }, - ), - } - - -@dataclass(frozen=True, slots=True) -class _Entry: - """A single distillation protocol entry from the Litinski tables. - - Attributes: - protocol (list[tuple[_Protocol, int]] | _Protocol): The distillation - protocol or pipeline of protocols. - error_rate (float): Output error rate of the protocol. - space (int): Space cost in physical qubits. - cycles (float): Number of syndrome extraction cycles. - """ - - protocol: list[tuple[_Protocol, int]] | _Protocol - error_rate: float - # Space estimation in number of physical qubits - space: int - # Number of code cycles to estimate time; a code cycle corresponds to - # measuring all surface-code check operators exactly once. - cycles: float - - @property - def output_states(self) -> int: - """Return the number of output magic states.""" - if isinstance(self.protocol, list): - return self.protocol[-1][0].output_states - else: - return self.protocol.output_states - - @property - def state(self) -> int: - """Return the magic state instruction ID (T or CCZ).""" - if isinstance(self.protocol, list): - return self.protocol[-1][0].state - else: - return self.protocol.state - - -@dataclass(frozen=True, slots=True) -class _Protocol: - """Parameters for a single distillation protocol. - - Attributes: - input_states (int): Number of input T states. - output_states (int): Number of output T states. - d_x (int): Spatial X distance. - d_z (int): Spatial Z distance. - d_m (int): Temporal distance. - state (int): Magic state instruction ID. Default is T. - """ - - # Number of input T states in protocol - input_states: int - # Number of output T states in protocol - output_states: int - # Spatial X distance (arXiv:1905.06903, Section 2) - d_x: int - # Spatial Z distance (arXiv:1905.06903, Section 2) - d_z: int - # Temporal distance (arXiv:1905.06903, Section 2) - d_m: int - # Magic state - state: int = T diff --git a/source/pip/qsharp/qre/models/factories/_round_based.py b/source/pip/qsharp/qre/models/factories/_round_based.py deleted file mode 100644 index 982ce78cec..0000000000 --- a/source/pip/qsharp/qre/models/factories/_round_based.py +++ /dev/null @@ -1,461 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations - -import copy -import hashlib -import logging -from dataclasses import dataclass, field -from itertools import combinations_with_replacement -from math import ceil -from pathlib import Path -from typing import Callable, Generator, Iterable, Optional, Sequence - -from ..._qre import ( - ISA, - InstructionFrontier, - ISARequirements, - Instruction, - _binom_ppf, - _ProvenanceGraph, -) -from ..._instruction import ( - LOGICAL, - PHYSICAL, - ISAQuery, - ISATransform, - constraint, -) -from ..._architecture import ISAContext -from ...instruction_ids import CNOT, LATTICE_SURGERY, T, MEAS_ZZ -from ..qec import SurfaceCode - - -logger = logging.getLogger(__name__) - - -@dataclass(frozen=True) -class RoundBasedFactory(ISATransform): - """ - A magic state factory that produces T gate instructions using round-based - distillation pipelines. - - This factory explores combinations of distillation units (such as "15-to-1 - RM prep" and "15-to-1 space efficient") to find optimal configurations that - minimize time and space while achieving target error rates. It supports - both physical-level distillation (when the input T gate is physically - encoded) and logical-level distillation (using lattice surgery via surface - codes). - - In order to account for the success probability of distillation rounds, the - factory models the pipeline using a failure probability requirement - (defaulting to 1%) that each round must meet. The number of distillation - units per round is adjusted to meet this requirement, which in turn affects - the overall space requirements. - - Space requirements are calculated using a user-provided function that - aggregates per-round space (e.g., sum or max). The ``sum`` function models - the case in which qubits are not reused across rounds, while the ``max`` - function models the case in which qubits are reused across rounds. - - For the enumeration of logical-level distillation units, the factory relies - on a user-provided ``ISAQuery`` (defaulting to ``SurfaceCode.q()``) to explore - different surface code configurations and their corresponding lattice - surgery instructions. These need to be provided by the user and cannot - automatically be derived from the provided implementation ISA, as they can - only contain a subset of the required instructions. The user needs to - ensure that the provided query matches the architecture for which this - factory is being used. - - Results are cached to disk for efficiency. - - Attributes: - code_query: ISAQuery - Query to enumerate QEC codes for logical distillation units. - Defaults to SurfaceCode.q(). - physical_qubit_calculation: Callable[[Iterable], int] - Function to calculate total physical qubits from per-round space - requirements, e.g., sum or max. Defaults to sum. - cache_dir: Path - Directory for caching computed factory configurations. Defaults to - ~/.cache/re3/round_based. - use_cache: bool - Whether to use cached results. Defaults to True. - - References: - - - Sergei Bravyi, Alexei Kitaev: Universal Quantum Computation with ideal - Clifford gates and noisy ancillas, - [arXiv:quant-ph/0403025](https://arxiv.org/abs/quant-ph/0403025) - - Michael E. Beverland, Prakash Murali, Matthias Troyer, Krysta M. Svore, - Torsten Hoefler, Vadym Kliuchnikov, Guang Hao Low, Mathias Soeken, Aarthi - Sundaram, Alexander Vaschillo: Assessing requirements to scale to - practical quantum advantage, - [arXiv:2211.07629](https://arxiv.org/pdf/2211.07629) - """ - - code_query: ISAQuery = field(default_factory=lambda: SurfaceCode.q()) - physical_qubit_calculation: Callable[[Iterable], int] = field(default=sum) - # optional: make cache directory configurable - cache_dir: Path = field( - default=Path.home() / ".cache" / "re3" / "round_based", repr=False - ) - use_cache: bool = field(default=True, repr=False) - - @staticmethod - def required_isa() -> ISARequirements: - # NOTE: A T gate is required, but a CNOT is only required to explore - # physical units. - return ISARequirements( - constraint(T), - ) - - def provided_isa( - self, impl_isa: ISA, ctx: ISAContext - ) -> Generator[ISA, None, None]: - cache_path = self._cache_path(impl_isa) - - # 1) Try to load from cache - if self.use_cache and cache_path.exists(): - cached_states = InstructionFrontier.load(str(cache_path)) - for state in cached_states: - yield ctx.make_isa( - ctx.add_instruction(state, transform=self, source=[impl_isa[T]]) - ) - return - - # 2) Compute as before - t_gate_error = impl_isa[T].expect_error_rate() - - units: list[_DistillationUnit] = [] - initial_unit = [] - - # Physical units? - if impl_isa[T].encoding == PHYSICAL: - clifford_gate = impl_isa.get(CNOT) or impl_isa.get(MEAS_ZZ) - if clifford_gate is None: - raise ValueError( - "CNOT or MEAS_ZZ instruction is required for physical units" - ) - - gate_time = clifford_gate.expect_time() - clifford_error = clifford_gate.expect_error_rate() - units.extend(self._physical_units(gate_time, clifford_error)) - else: - initial_unit.append( - _DistillationUnit( - 1, - impl_isa[T].expect_time(), - impl_isa[T].expect_space(), - [1, 0], - [0], - ) - ) - - # create a fresh inner context of the given one - inner_ctx = copy.copy(ctx) - inner_ctx._provenance = _ProvenanceGraph() - for code_isa in self.code_query.enumerate(inner_ctx): - units.extend(self._logical_units(code_isa[LATTICE_SURGERY])) - - optimal_states = InstructionFrontier() - - for r in range(1, 4 - len(initial_unit)): - for k in combinations_with_replacement(units, r): - pipeline = _Pipeline.try_create( - initial_unit + list(k), - t_gate_error, - physical_qubit_calculation=self.physical_qubit_calculation, - ) - if pipeline is not None: - state = self._state_from_pipeline(pipeline) - optimal_states.insert(state) - logger.debug(f"Optimal states after {r} rounds: {len(optimal_states)}") - - # 3) Save to cache, then yield - if self.use_cache: - optimal_states.dump(str(cache_path)) - - for state in optimal_states: - yield ctx.make_isa( - ctx.add_instruction(state, transform=self, source=[impl_isa[T]]) - ) - - def _physical_units(self, gate_time, clifford_error) -> list[_DistillationUnit]: - """Return physical distillation units for the given gate parameters.""" - return [ - _DistillationUnit( - num_input_states=15, - time=24 * gate_time, - space=31, - error_rate_coeffs=[35, 0.0, 0.0, 7.1 * clifford_error], - failure_probability_coeffs=[15, 356 * clifford_error], - name="15-to-1 RM prep", - ), - _DistillationUnit( - num_input_states=15, - time=45 * gate_time, - space=12, - error_rate_coeffs=[35, 0.0, 0.0, 7.1 * clifford_error], - failure_probability_coeffs=[15, 356 * clifford_error], - name="15-to-1 space efficient", - ), - ] - - def _logical_units( - self, lattice_surgery_instruction: Instruction - ) -> list[_DistillationUnit]: - """Return logical distillation units derived from a lattice surgery instruction.""" - logical_cycle_time = lattice_surgery_instruction.expect_time(1) - logical_error = lattice_surgery_instruction.expect_error_rate(1) - - return [ - _DistillationUnit( - num_input_states=15, - time=11 * logical_cycle_time, - space=lattice_surgery_instruction.expect_space(31), - error_rate_coeffs=[35, 0.0, 0.0, 7.1 * logical_error], - failure_probability_coeffs=[15, 356 * logical_error], - name="15-to-1 RM prep", - ), - _DistillationUnit( - num_input_states=15, - time=13 * logical_cycle_time, - space=lattice_surgery_instruction.expect_space(20), - error_rate_coeffs=[35, 0.0, 0.0, 7.1 * logical_error], - failure_probability_coeffs=[15, 356 * logical_error], - name="15-to-1 space efficient", - ), - ] - - def _state_from_pipeline(self, pipeline: _Pipeline) -> Instruction: - """Create a T-gate instruction from a distillation pipeline.""" - return Instruction.fixed_arity( - T, - int(LOGICAL), - 1, - pipeline.time, - pipeline.space, - None, - pipeline.error_rate, - ) - - def _cache_key(self, impl_isa: ISA) -> str: - """Build a deterministic key from factory configuration and impl_isa.""" - parts = [ - f"factory={type(self).__qualname__}", - f"code_query={repr(self.code_query)}", - f"physical_qubit_calculation={self.physical_qubit_calculation.__name__}", - ] - - # Include full instruction details, sorted by id for determinism - for instr in sorted(impl_isa, key=lambda i: i.id): - parts.append( - f"id={instr.id}|encoding={instr.encoding}|arity={instr.arity}" - f"|time={instr.time()}|space={instr.space()}" - f"|error_rate={instr.error_rate()}" - ) - - data = "\n".join(parts).encode("utf-8") - return hashlib.sha256(data).hexdigest() - - def _cache_path(self, impl_isa: ISA) -> Path: - """Return the cache file path for the given implementation ISA.""" - self.cache_dir.mkdir(parents=True, exist_ok=True) - return self.cache_dir / f"{self._cache_key(impl_isa)}.json" - - -class _Pipeline: - """A multi-round distillation pipeline.""" - - def __init__( - self, - units: Sequence[_DistillationUnit], - initial_input_error_rate: float, - *, - failure_probability_requirement: float = 0.01, - physical_qubit_calculation: Callable[[Iterable], int] = sum, - ): - self.failure_probability_requirement = failure_probability_requirement - self.rounds: list["_DistillationRound"] = [] - self.output_error_rate: float = initial_input_error_rate - self.physical_qubit_calculation = physical_qubit_calculation - - self._add_rounds(units) - - @classmethod - def try_create( - cls, - units: Sequence[_DistillationUnit], - initial_input_error_rate: float, - *, - failure_probability_requirement: float = 0.01, - physical_qubit_calculation: Callable[[Iterable], int] = sum, - ) -> Optional[_Pipeline]: - """Create a pipeline if the configuration is feasible. - - Returns: - Optional[_Pipeline]: The pipeline, or None if the required - number of units per round is infeasible. - """ - pipeline = cls( - units, - initial_input_error_rate, - failure_probability_requirement=failure_probability_requirement, - physical_qubit_calculation=physical_qubit_calculation, - ) - if not pipeline._compute_units_per_round(): - return None - return pipeline - - def _compute_units_per_round(self) -> bool: - """Adjust the number of units per round to meet output requirements.""" - if len(self.rounds) > 0: - states_needed_next = self.rounds[-1].unit.num_output_states - - for dist_round in reversed(self.rounds): - if not dist_round.adjust_num_units_to(states_needed_next): - return False - states_needed_next = dist_round.num_input_states - - return True - - def _add_rounds(self, units: Sequence[_DistillationUnit]): - """Append distillation rounds from the given units.""" - per_round_failure_prob_req = self.failure_probability_requirement / len(units) - - for unit in units: - self.rounds.append( - _DistillationRound( - unit, - per_round_failure_prob_req, - self.output_error_rate, - ) - ) - self.output_error_rate = unit.error_rate(self.output_error_rate) - - @property - def space(self) -> int: - """Total physical-qubit space of the pipeline.""" - return self.physical_qubit_calculation(round.space for round in self.rounds) - - @property - def time(self) -> int: - """Total time of the pipeline in nanoseconds.""" - return sum(round.unit.time for round in self.rounds) - - @property - def error_rate(self) -> float: - """Output error rate of the pipeline.""" - return self.output_error_rate - - @property - def num_output_states(self) -> int: - """Number of output magic states produced by the pipeline.""" - return self.rounds[-1].compute_num_output_states() - - -@dataclass(slots=True) -class _DistillationUnit: - """A single distillation unit with fixed input/output characteristics.""" - - num_input_states: int - time: int - space: int - error_rate_coeffs: Sequence[float] - failure_probability_coeffs: Sequence[float] - name: Optional[str] = None - num_output_states: int = 1 - - def error_rate(self, input_error_rate: float) -> float: - """Compute the output error rate for a given input error rate.""" - result = 0.0 - for c in self.error_rate_coeffs: - result = result * input_error_rate + c - return result - - def failure_probability(self, input_error_rate: float) -> float: - """Compute the failure probability for a given input error rate.""" - result = 0.0 - for c in self.failure_probability_coeffs: - result = result * input_error_rate + c - return result - - -@dataclass(slots=True) -class _DistillationRound: - """A single round in a distillation pipeline.""" - - unit: _DistillationUnit - failure_probability_requirement: float - input_error_rate: float - num_units: int = 1 - failure_probability: float = field(init=False) - - def __post_init__(self): - self.failure_probability = self.unit.failure_probability(self.input_error_rate) - - def adjust_num_units_to(self, output_states_needed_next: int) -> bool: - """Adjust the number of units to produce at least the required output states.""" - if self.failure_probability == 0.0: - self.num_units = output_states_needed_next - return True - - # Binary search to find the minimal number of units needed - self.num_units = ceil(output_states_needed_next / self.max_num_output_states) - - while True: - num_output_states = self.compute_num_output_states() - if num_output_states < output_states_needed_next: - self.num_units *= 2 - - # Distillation round requires unreasonably high number of units - if self.num_units >= 1_000_000_000_000_000: - return False - else: - break - - upper = self.num_units - lower = self.num_units // 2 - while lower < upper: - self.num_units = (lower + upper) // 2 - num_output_states = self.compute_num_output_states() - if num_output_states >= output_states_needed_next: - upper = self.num_units - else: - lower = self.num_units + 1 - self.num_units = upper - - return True - - @property - def space(self) -> int: - """Total physical-qubit space for this round.""" - return self.num_units * self.unit.space - - @property - def num_input_states(self) -> int: - """Total number of input states consumed by this round.""" - return self.num_units * self.unit.num_input_states - - @property - def max_num_output_states(self) -> int: - """Maximum number of output states this round can produce.""" - return self.num_units * self.unit.num_output_states - - def compute_num_output_states(self) -> int: - """Compute the expected number of output states accounting for failure probability.""" - failure_prob = self.failure_probability - - if failure_prob <= 1e-8: - return self.num_units * self.unit.num_output_states - - # A replacement for SciPy's binom.ppf that is faster - k = _binom_ppf( - self.failure_probability_requirement, - self.num_units, - 1.0 - failure_prob, - ) - - return int(k) * self.unit.num_output_states diff --git a/source/pip/qsharp/qre/models/factories/_utils.py b/source/pip/qsharp/qre/models/factories/_utils.py deleted file mode 100644 index 0fbec26ed7..0000000000 --- a/source/pip/qsharp/qre/models/factories/_utils.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from typing import Generator - -from ..._architecture import ISAContext -from ..._qre import ISARequirements, ISA -from ..._instruction import ISATransform -from ...instruction_ids import ( - SQRT_SQRT_X, - SQRT_SQRT_X_DAG, - SQRT_SQRT_Y, - SQRT_SQRT_Y_DAG, - SQRT_SQRT_Z, - SQRT_SQRT_Z_DAG, - CCX, - CCY, - CCZ, -) - - -class MagicUpToClifford(ISATransform): - """ - An ISA transform that adds Clifford equivalent representations of magic - states. For example, if the input ISA contains a T gate, the provided ISA - will also contain ``SQRT_SQRT_X``, ``SQRT_SQRT_X_DAG``, ``SQRT_SQRT_Y``, - ``SQRT_SQRT_Y_DAG``, and ``T_DAG``. The same is applied for ``CCZ`` gates and - their Clifford equivalents. - - Example: - - .. code-block:: python - app = SomeApplication() - arch = SomeArchitecture() - - # This will contain CCX states - trace_query = PSSPC.q(ccx_magic_states=True) * LatticeSurgery.q() - - # This will contain CCZ states - isa_query = SurfaceCode.q() * Litinski19Factory.q() - - # There will be no results from the estimation because there is no - # instruction to support CCX magic states in the query - results = estimate(app, arch, isa_query, trace_query) - assert len(results) == 0 - - # We solve this by wrapping the Litinski19Factory with the - # MagicUpToClifford transform, which transforms the CCZ states in the - # provided ISA into CCX states. - isa_query = SurfaceCode.q() * MagicUpToClifford.q(source=Litinski19Factory.q()) - - # Now we will get results - results = estimate(app, arch, isa_query, trace_query) - assert len(results) != 0 - """ - - @staticmethod - def required_isa() -> ISARequirements: - return ISARequirements() - - def provided_isa(self, impl_isa, ctx: ISAContext) -> Generator[ISA, None, None]: - # Families of equivalent gates under Clifford conjugation. - families = [ - [ - SQRT_SQRT_X, - SQRT_SQRT_X_DAG, - SQRT_SQRT_Y, - SQRT_SQRT_Y_DAG, - SQRT_SQRT_Z, - SQRT_SQRT_Z_DAG, - ], - [CCX, CCY, CCZ], - ] - - # For each family, if any member of the family is present in the input ISA, add all members of the family to the provided ISA. - for family in families: - for id in family: - if id in impl_isa: - instr = impl_isa[id] - for equivalent_id in family: - if equivalent_id != id: - node_idx = ctx.add_instruction( - instr.with_id(equivalent_id), - transform=self, - source=[instr], - ) - impl_isa.add_node(equivalent_id, node_idx) - break # Check next family - - yield impl_isa diff --git a/source/pip/qsharp/qre/models/qec/__init__.py b/source/pip/qsharp/qre/models/qec/__init__.py deleted file mode 100644 index 4e4cf816f7..0000000000 --- a/source/pip/qsharp/qre/models/qec/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from ._surface_code import SurfaceCode -from ._three_aux import ThreeAux -from ._yoked import OneDimensionalYokedSurfaceCode, TwoDimensionalYokedSurfaceCode - -__all__ = [ - "SurfaceCode", - "ThreeAux", - "OneDimensionalYokedSurfaceCode", - "TwoDimensionalYokedSurfaceCode", -] diff --git a/source/pip/qsharp/qre/models/qec/_surface_code.py b/source/pip/qsharp/qre/models/qec/_surface_code.py deleted file mode 100644 index 079187635a..0000000000 --- a/source/pip/qsharp/qre/models/qec/_surface_code.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations -from dataclasses import KW_ONLY, dataclass, field -from typing import Generator, Optional -from ..._instruction import ( - ISA, - ISARequirements, - ISATransform, - constraint, - ConstraintBound, - LOGICAL, -) -from ..._isa_enumeration import ISAContext -from ..._qre import linear_function -from ...instruction_ids import CNOT, H, LATTICE_SURGERY, MEAS_Z -from ...property_keys import ( - SURFACE_CODE_ONE_QUBIT_TIME_FACTOR, - SURFACE_CODE_TWO_QUBIT_TIME_FACTOR, -) - - -@dataclass -class SurfaceCode(ISATransform): - """ - This class models the gate-based rotated surface code. - - Attributes: - crossing_prefactor: float - The prefactor for logical error rate due to error correction - crossings. (Default is 0.03, see Eq. (11) in - [arXiv:1208.0928](https://arxiv.org/abs/1208.0928)) - error_correction_threshold: float - The error correction threshold for the surface code. (Default is - 0.01 (1%), see [arXiv:1009.3686](https://arxiv.org/abs/1009.3686)) - one_qubit_gate_depth: int - The depth of one-qubit gates in each syndrome extraction cycle. - (Default is 1, see Fig. 2 in [arXiv:1009.3686](https://arxiv.org/abs/1009.3686)) - two_qubit_gate_depth: int - The depth of two-qubit gates in each syndrome extraction cycle. - (Default is 4, see Fig. 2 in [arXiv:1009.3686](https://arxiv.org/abs/1009.3686)) - code_cycle_override: Optional[int] - If provided, this value will be used as the time for each syndrome - extraction cycle instead of the default calculation based on gate - times and depths. (Default is None) - code_cycle_offset: int - An additional time offset to add to the syndrome extraction cycle - time. (Default is 0) - - Hyper parameters: - distance: int - The code distance of the surface code. - - References: - - - Dominic Horsman, Austin G. Fowler, Simon Devitt, Rodney Van Meter: Surface - code quantum computing by lattice surgery, - [arXiv:1111.4022](https://arxiv.org/abs/1111.4022) - - Austin G. Fowler, Matteo Mariantoni, John M. Martinis, Andrew N. Cleland: - Surface codes: Towards practical large-scale quantum computation, - [arXiv:1208.0928](https://arxiv.org/abs/1208.0928) - - David S. Wang, Austin G. Fowler, Lloyd C. L. Hollenberg: Quantum computing - with nearest neighbor interactions and error rates over 1%, - [arXiv:1009.3686](https://arxiv.org/abs/1009.3686) - """ - - crossing_prefactor: float = 0.03 - error_correction_threshold: float = 0.01 - one_qubit_gate_depth: int = 1 - two_qubit_gate_depth: int = 4 - code_cycle_override: Optional[int] = None - code_cycle_offset: int = 0 - _: KW_ONLY - distance: int = field(default=3, metadata={"domain": range(3, 26, 2)}) - - @staticmethod - def required_isa() -> ISARequirements: - return ISARequirements( - constraint(H, error_rate=ConstraintBound.lt(0.01)), - constraint(CNOT, arity=2, error_rate=ConstraintBound.lt(0.01)), - constraint(MEAS_Z, error_rate=ConstraintBound.lt(0.01)), - ) - - def provided_isa( - self, impl_isa: ISA, ctx: ISAContext - ) -> Generator[ISA, None, None]: - cnot = impl_isa[CNOT] - h = impl_isa[H] - meas_z = impl_isa[MEAS_Z] - - cnot_time = cnot.expect_time() - h_time = h.expect_time() - meas_time = meas_z.expect_time() - - physical_error_rate = max( - cnot.expect_error_rate(), - h.expect_error_rate(), - meas_z.expect_error_rate(), - ) - - # There are d^2 data qubits and (d^2 - 1) ancilla qubits in the rotated - # surface code. (See Section 7.1 in arXiv:1111.4022) - space_formula = linear_function(2 * self.distance**2 - 1) - - # Each syndrome extraction cycle consists of ancilla preparation, 4 - # rounds of CNOTs, and measurement. (See Fig. 2 in arXiv:1009.3686); - # these may be modified by the one_qubit_gate_depth and - # two_qubit_gate_depth parameters, or scaled by the time factors - # provided in the instruction properties. The syndrome extraction cycle - # is repeated d times for a distance-d code. - one_qubit_gate_depth = self.one_qubit_gate_depth * h.get_property_or( - SURFACE_CODE_ONE_QUBIT_TIME_FACTOR, 1 - ) - two_qubit_gate_depth = self.two_qubit_gate_depth * cnot.get_property_or( - SURFACE_CODE_TWO_QUBIT_TIME_FACTOR, 1 - ) - - if self.code_cycle_override is not None: - code_cycle_time = self.code_cycle_override - else: - code_cycle_time = ( - one_qubit_gate_depth * h_time - + two_qubit_gate_depth * cnot_time - + meas_time - ) - code_cycle_time += self.code_cycle_offset - time_value = code_cycle_time * self.distance - - # See Eqs. (10) and (11) in arXiv:1208.0928 - error_formula = linear_function( - self.crossing_prefactor - * ( - (physical_error_rate / self.error_correction_threshold) - ** ((self.distance + 1) // 2) - ) - ) - - # We provide a generic lattice surgery instruction (See Section 3 in - # arXiv:1111.4022) - yield ctx.make_isa( - ctx.add_instruction( - LATTICE_SURGERY, - encoding=LOGICAL, - arity=None, - space=space_formula, - time=time_value, - error_rate=error_formula, - transform=self, - source=[cnot, h, meas_z], - distance=self.distance, - code_cycle_time=code_cycle_time, - ), - ) diff --git a/source/pip/qsharp/qre/models/qec/_three_aux.py b/source/pip/qsharp/qre/models/qec/_three_aux.py deleted file mode 100644 index 80136e4126..0000000000 --- a/source/pip/qsharp/qre/models/qec/_three_aux.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from __future__ import annotations - -from dataclasses import KW_ONLY, dataclass, field -from typing import Generator - -from ..._architecture import ISAContext -from ..._instruction import ( - LOGICAL, - ISATransform, - constraint, -) -from ..._qre import ( - ISA, - ISARequirements, - linear_function, -) -from ...instruction_ids import ( - LATTICE_SURGERY, - MEAS_X, - MEAS_XX, - MEAS_Z, - MEAS_ZZ, -) - - -@dataclass -class ThreeAux(ISATransform): - """ - This class models the pairwise measurement-based surface code with three - auxiliary qubits per stabilizer measurement. - - Hyper parameters: - distance: int - The code distance of the surface code. - single_rail: bool - Whether to use single-rail encoding. - - References: - - - Linnea Grans-Samuelsson, Ryan V. Mishmash, David Aasen, Christina Knapp, - Bela Bauer, Brad Lackey, Marcus P. da Silva, Parsa Bonderson: Improved - Pairwise Measurement-Based Surface Code, - [arXiv:2310.12981](https://arxiv.org/abs/2310.12981) - """ - - _: KW_ONLY - distance: int = field(default=3, metadata={"domain": range(3, 26, 2)}) - single_rail: bool = field(default=False) - - @staticmethod - def required_isa() -> ISARequirements: - return ISARequirements( - constraint(MEAS_X), - constraint(MEAS_Z), - constraint(MEAS_XX, arity=2), - constraint(MEAS_ZZ, arity=2), - ) - - def provided_isa( - self, impl_isa: ISA, ctx: ISAContext - ) -> Generator[ISA, None, None]: - meas_x = impl_isa[MEAS_X] - meas_z = impl_isa[MEAS_Z] - meas_xx = impl_isa[MEAS_XX] - meas_zz = impl_isa[MEAS_ZZ] - - gate_time = max(meas_xx.expect_time(), meas_zz.expect_time()) - - physical_error_rate = max( - meas_x.expect_error_rate(), - meas_z.expect_error_rate(), - meas_xx.expect_error_rate(), - meas_zz.expect_error_rate(), - ) - - # See arXiv:2310.12981, Table 1 and Figs. 2, 3, 4, 6, and 7 - depth = 5 if self.single_rail else 4 - - # See arXiv:2310.12981, Table 1 - error_correction_threshold = 0.0051 if self.single_rail else 0.0066 - - # See arXiv:2310.12981, Fig. 23 - crossing_prefactor = 0.05 - - # d^2 data qubits and 3 qubits for each of the d^2 - 1 stabilizer - # measurements - space_formula = linear_function(4 * self.distance**2 - 3) - - # The measurement circuits do not overlap perfectly, so there is an - # additional 4 steps that need to be accounted for independent of the - # distance (see Section 2 between Eqs. (2) and (3) in arXiv:2310.12981) - time_value = gate_time * (depth * self.distance + 4) - - # Typical fitting curve for surface code logical error (see - # arXiv:1208.0928) - error_formula = linear_function( - crossing_prefactor - * ( - (physical_error_rate / error_correction_threshold) - ** ((self.distance + 1) // 2) - ) - ) - - yield ctx.make_isa( - ctx.add_instruction( - LATTICE_SURGERY, - encoding=LOGICAL, - arity=None, - space=space_formula, - time=time_value, - error_rate=error_formula, - transform=self, - source=[meas_x, meas_z, meas_xx, meas_zz], - distance=self.distance, - code_cycle_time=gate_time * depth * self.distance, - ) - ) diff --git a/source/pip/qsharp/qre/models/qec/_yoked.py b/source/pip/qsharp/qre/models/qec/_yoked.py deleted file mode 100644 index 9cb1b26527..0000000000 --- a/source/pip/qsharp/qre/models/qec/_yoked.py +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from dataclasses import dataclass -from math import ceil -from typing import Generator - -from ..._instruction import ISATransform, constraint, LOGICAL -from ..._qre import ISA, ISARequirements, generic_function -from ..._architecture import ISAContext -from ...instruction_ids import LATTICE_SURGERY, MEMORY -from ...property_keys import DISTANCE - - -@dataclass -class OneDimensionalYokedSurfaceCode(ISATransform): - """ - This class models the Yoked surface code to provide a generic memory - instruction based on lattice surgery instructions from a surface code like - error correction code. - - Attributes: - crossing_prefactor: float - The prefactor for logical error rate (Default is 0.016) - error_correction_threshold: float - The error correction threshold for the surface code (Default is - 0.064) - - Hyper parameters: - shape_heuristic: ShapeHeuristic - The heuristic to determine the shape of the surface code patch for a - given number of logical qubits. (Default is ShapeHeuristic.MIN_AREA) - - References: - - - Craig Gidney, Michael Newman, Peter Brooks, Cody Jones: Yoked surface - codes, [arXiv:2312.04522](https://arxiv.org/abs/2312.04522) - """ - - # NOTE: The crossing_prefactor is relative to that of the underlying surface - # code. That is if the surface code model is p(SC) = - # A*(p(phy)/th(SC))^((d+1)/2), then multiplier for its yoked extension is - # crossing_prefactor*A - crossing_prefactor: float = 8 / 15 - - # NOTE: The threshold is relative to that of the underlying surface code. - # Namely, as the yoking doubles the distance, one would expect the yoked - # surface code to have a threshold of sqrt(th(SC)). However modeling shows - # it falls short of this. - error_correction_threshold: float = 64 / 10 - - @staticmethod - def required_isa() -> ISARequirements: - # We require a lattice surgery instruction that also provides the code - # distance as a property. This is necessary to compute the time - # and error rate formulas for the provided memory instruction. - return ISARequirements( - constraint(LATTICE_SURGERY, LOGICAL, arity=None, distance=True), - ) - - def provided_isa( - self, impl_isa: ISA, ctx: ISAContext - ) -> Generator[ISA, None, None]: - lattice_surgery = impl_isa[LATTICE_SURGERY] - distance = lattice_surgery.get_property(DISTANCE) - assert distance is not None - - def space(arity: int) -> int: - a, b = self._min_area_shape(arity) - return lattice_surgery.expect_space(a * b) - - space_fn = generic_function(space) - - def time(arity: int) -> int: - a, b = self._min_area_shape(arity) - s = lattice_surgery.expect_time(a * b) - return s * (8 * distance * (a - 1) + 2 * distance) - - time_fn = generic_function(time) - - def error_rate(arity: int) -> float: - a, b = self._min_area_shape(arity) - rounds = 2 * (a - 2) - # logical error rate on a single surface code patch - p = lattice_surgery.expect_error_rate(1) - return ( - rounds**2 - * (a * b) ** 2 - * self.crossing_prefactor - * p - * (1 / self.error_correction_threshold) ** ((distance + 1) // 2) - ) - - error_rate_fn = generic_function(error_rate) - - yield ctx.make_isa( - ctx.add_instruction( - MEMORY, - arity=None, - encoding=LOGICAL, - space=space_fn, - time=time_fn, - error_rate=error_rate_fn, - transform=self, - source=[lattice_surgery], - distance=distance, - ) - ) - - @staticmethod - def _min_area_shape(num_qubits: int) -> tuple[int, int]: - """ - Given a number of qubits num_qubits, returns numbers (a + 1) and (b + 2) - such that a * b >= num_qubits and a * b is as small as possible. - """ - - best_a = None - best_b = None - best_qubits = num_qubits**2 - - for a in range(1, num_qubits): - # Compute required number of columns to reach the required number - # of logical qubits - b = ceil(num_qubits / a) - - qubits = (a + 1) * (b + 2) - if qubits < best_qubits: - best_qubits = qubits - best_a = a - best_b = b - - assert best_a is not None - assert best_b is not None - return best_a + 1, best_b + 2 - - -@dataclass -class TwoDimensionalYokedSurfaceCode(ISATransform): - """ - This class models the Yoked surface code to provide a generic memory - instruction based on lattice surgery instructions from a surface code like - error correction code. - - Attributes: - crossing_prefactor: float - The prefactor for logical error rate (Default is 0.016) - error_correction_threshold: float - The error correction threshold for the surface code (Default is - 0.064) - - Hyper parameters: - shape_heuristic: ShapeHeuristic - The heuristic to determine the shape of the surface code patch for a - given number of logical qubits. (Default is ShapeHeuristic.MIN_AREA) - - References: - - - Craig Gidney, Michael Newman, Peter Brooks, Cody Jones: Yoked surface - codes, [arXiv:2312.04522](https://arxiv.org/abs/2312.04522) - """ - - # NOTE: The crossing_prefactor is relative to that of the underlying surface - # code. That is if the surface code model is p(SC) = - # A*(p(phy)/th(SC))^((d+1)/2), then multiplier for its yoked extension is - # crossing_prefactor*A - crossing_prefactor: float = 5 / 600 - - # NOTE: The threshold is relative to that of the underlying surface code. - # Namely, as the yoking doubles the distance, one would expect the yoked - # surface code to have a threshold of sqrt(th(SC)). However modeling shows - # it falls short of this. - error_correction_threshold: float = 2500 / 10 - - @staticmethod - def required_isa() -> ISARequirements: - # We require a lattice surgery instruction that also provides the code - # distance as a property. This is necessary to compute the time - # and error rate formulas for the provided memory instruction. - return ISARequirements( - constraint(LATTICE_SURGERY, LOGICAL, arity=None, distance=True), - ) - - def provided_isa( - self, impl_isa: ISA, ctx: ISAContext - ) -> Generator[ISA, None, None]: - lattice_surgery = impl_isa[LATTICE_SURGERY] - distance = lattice_surgery.get_property(DISTANCE) - assert distance is not None - - def space(arity: int) -> int: - a, b = self._square_shape(arity) - return lattice_surgery.expect_space(a * b) - - space_fn = generic_function(space) - - def time(arity: int) -> int: - a, b = self._square_shape(arity) - s = lattice_surgery.expect_time(a * b) - return s * (8 * distance * max(a - 2, b - 2) + 2 * distance) - - time_fn = generic_function(time) - - def error_rate(arity: int) -> float: - a, b = self._square_shape(arity) - rounds = 2 * max(a - 3, b - 3) - # logical error rate on a single surface code patch - p = lattice_surgery.expect_error_rate(1) - return ( - rounds**4 - * (a * b) ** 2 - * self.crossing_prefactor - * p - * (1 / self.error_correction_threshold) ** ((distance + 1) // 2) - ) - - error_rate_fn = generic_function(error_rate) - - yield ctx.make_isa( - ctx.add_instruction( - MEMORY, - arity=None, - encoding=LOGICAL, - space=space_fn, - time=time_fn, - error_rate=error_rate_fn, - transform=self, - source=[lattice_surgery], - distance=distance, - ) - ) - - @staticmethod - def _square_shape(num_qubits: int) -> tuple[int, int]: - """ - Given a number of qubits num_qubits, returns numbers (a + 2) and (b + 2) - such that a * b >= num_qubits and a and b are as close as possible. - """ - - a = int(num_qubits**0.5) - while num_qubits % a != 0: - a -= 1 - b = num_qubits // a - return a + 2, b + 2 diff --git a/source/pip/qsharp/qre/models/qubits/__init__.py b/source/pip/qsharp/qre/models/qubits/__init__.py deleted file mode 100644 index ab7887faf3..0000000000 --- a/source/pip/qsharp/qre/models/qubits/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from ._gate_based import GateBased -from ._msft import Majorana - -__all__ = ["GateBased", "Majorana"] diff --git a/source/pip/qsharp/qre/models/qubits/_gate_based.py b/source/pip/qsharp/qre/models/qubits/_gate_based.py deleted file mode 100644 index d9ee589485..0000000000 --- a/source/pip/qsharp/qre/models/qubits/_gate_based.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from dataclasses import KW_ONLY, dataclass, field -from typing import Optional - -from ..._architecture import Architecture, ISAContext -from ..._instruction import ISA, Encoding -from ...instruction_ids import ( - CNOT, - CZ, - MEAS_X, - MEAS_Y, - MEAS_Z, - PAULI_I, - PAULI_X, - PAULI_Y, - PAULI_Z, - RX, - RY, - RZ, - S_DAG, - SQRT_X, - SQRT_X_DAG, - SQRT_Y, - SQRT_Y_DAG, - SQRT_SQRT_X, - SQRT_SQRT_X_DAG, - SQRT_SQRT_Y, - SQRT_SQRT_Y_DAG, - T_DAG, - H, - S, - T, -) - - -@dataclass -class GateBased(Architecture): - """ - A generic gate-based architecture. The error rate can be set arbitrarily - and is either 1e-3 or 1e-4 in the reference. - - Args: - error_rate: The error rate for all gates. Defaults to 1e-4. - gate_time: The time (in ns) for single-qubit gates. - measurement_time: The time (in ns) for measurement operations. - two_qubit_gate_time: The time (in ns) for two-qubit gates (CNOT, CZ). - If not provided, defaults to the value of ``gate_time``. - - References: - - - Michael E. Beverland, Prakash Murali, Matthias Troyer, Krysta M. Svore, - Torsten Hoefler, Vadym Kliuchnikov, Guang Hao Low, Mathias Soeken, Aarthi - Sundaram, Alexander Vaschillo: Assessing requirements to scale to - practical quantum advantage, - [arXiv:2211.07629](https://arxiv.org/abs/2211.07629) - - Jens Koch, Terri M. Yu, Jay Gambetta, A. A. Houck, D. I. Schuster, J. - Majer, Alexandre Blais, M. H. Devoret, S. M. Girvin, R. J. Schoelkopf: - Charge insensitive qubit design derived from the Cooper pair box, - [arXiv:cond-mat/0703002](https://arxiv.org/abs/cond-mat/0703002) - """ - - _: KW_ONLY - error_rate: float = field(default=1e-4) - gate_time: int - measurement_time: int - two_qubit_gate_time: Optional[int] = field(default=None) - - def __post_init__(self): - if self.two_qubit_gate_time is None: - self.two_qubit_gate_time = self.gate_time - - def provided_isa(self, ctx: ISAContext) -> ISA: - # Value is initialized in __post_init__ - assert self.two_qubit_gate_time is not None - - # NOTE: This can be improved with instruction coercion once implemented. - instructions = [] - - # Single-qubit gates - single = [ - PAULI_I, - PAULI_X, - PAULI_Y, - PAULI_Z, - H, - SQRT_X, - SQRT_X_DAG, - SQRT_Y, - SQRT_Y_DAG, - S, - S_DAG, - SQRT_SQRT_X, - SQRT_SQRT_X_DAG, - SQRT_SQRT_Y, - SQRT_SQRT_Y_DAG, - T, - T_DAG, - RX, - RY, - RZ, - ] - - for instr in single: - instructions.append( - ctx.add_instruction( - instr, - encoding=Encoding.PHYSICAL, - arity=1, - time=self.gate_time, - error_rate=self.error_rate, - ) - ) - - for instr in [MEAS_X, MEAS_Y, MEAS_Z]: - instructions.append( - ctx.add_instruction( - instr, - encoding=Encoding.PHYSICAL, - arity=1, - time=self.measurement_time, - error_rate=self.error_rate, - ) - ) - - # Two-qubit gates - for instr in [CNOT, CZ]: - instructions.append( - ctx.add_instruction( - instr, - encoding=Encoding.PHYSICAL, - arity=2, - time=self.two_qubit_gate_time, - error_rate=self.error_rate, - ) - ) - - return ctx.make_isa(*instructions) diff --git a/source/pip/qsharp/qre/models/qubits/_msft.py b/source/pip/qsharp/qre/models/qubits/_msft.py deleted file mode 100644 index 1d74300e3e..0000000000 --- a/source/pip/qsharp/qre/models/qubits/_msft.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from dataclasses import KW_ONLY, dataclass, field - -from ..._architecture import Architecture, ISAContext -from ...instruction_ids import ( - T, - PREP_X, - PREP_Z, - MEAS_XX, - MEAS_ZZ, - MEAS_X, - MEAS_Z, -) -from ..._instruction import ISA - - -@dataclass -class Majorana(Architecture): - """ - This class models physical instructions that may be relevant for future - Majorana qubits. For these qubits, we assume that measurements - and the physical T gate each take 1 µs. Owing to topological protection in - the hardware, we assume single and two-qubit measurement error rates - (Clifford error rates) in $10^{-4}$, $10^{-5}$, and $10^{-6}$ as a range - between realistic and optimistic targets. Non-Clifford operations in this - architecture do not have topological protection, so we assume a 5%, 1.5%, - and 1% error rate for non-Clifford physical T gates for the three cases, - respectively. - - References: - - - Torsten Karzig, Christina Knapp, Roman M. Lutchyn, Parsa Bonderson, - Matthew B. Hastings, Chetan Nayak, Jason Alicea, Karsten Flensberg, - Stephan Plugge, Yuval Oreg, Charles M. Marcus, Michael H. Freedman: - Scalable Designs for Quasiparticle-Poisoning-Protected Topological Quantum - Computation with Majorana Zero Modes, - [arXiv:1610.05289](https://arxiv.org/abs/1610.05289) - - Alexei Kitaev: Unpaired Majorana fermions in quantum wires, - [arXiv:cond-mat/0010440](https://arxiv.org/abs/cond-mat/0010440) - - Sankar Das Sarma, Michael Freedman, Chetan Nayak: Majorana Zero Modes and - Topological Quantum Computation, - [arXiv:1501.02813](https://arxiv.org/abs/1501.02813) - """ - - _: KW_ONLY - error_rate: float = field(default=1e-5, metadata={"domain": [1e-4, 1e-5, 1e-6]}) - - def provided_isa(self, ctx: ISAContext) -> ISA: - if abs(self.error_rate - 1e-4) <= 1e-8: - t_error_rate = 0.05 - elif abs(self.error_rate - 1e-5) <= 1e-8: - t_error_rate = 0.015 - elif abs(self.error_rate - 1e-6) <= 1e-8: - t_error_rate = 0.01 - - return ctx.make_isa( - ctx.add_instruction(PREP_X, time=1000, error_rate=self.error_rate), - ctx.add_instruction(PREP_Z, time=1000, error_rate=self.error_rate), - ctx.add_instruction( - MEAS_XX, arity=2, time=1000, error_rate=self.error_rate - ), - ctx.add_instruction( - MEAS_ZZ, arity=2, time=1000, error_rate=self.error_rate - ), - ctx.add_instruction(MEAS_X, time=1000, error_rate=self.error_rate), - ctx.add_instruction(MEAS_Z, time=1000, error_rate=self.error_rate), - ctx.add_instruction(T, time=1000, error_rate=t_error_rate), - ) diff --git a/source/pip/qsharp/qre/property_keys.py b/source/pip/qsharp/qre/property_keys.py index 917e25ca0d..0d2afc07f4 100644 --- a/source/pip/qsharp/qre/property_keys.py +++ b/source/pip/qsharp/qre/property_keys.py @@ -1,10 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -# pyright: reportAttributeAccessIssue=false - - -from .._native import property_keys - -for name in property_keys.__all__: - globals()[name] = getattr(property_keys, name) +# Deprecated: use qdk.qre.property_keys instead. +from qdk.qre.property_keys import * # noqa: F401,F403 diff --git a/source/pip/qsharp/qre/property_keys.pyi b/source/pip/qsharp/qre/property_keys.pyi deleted file mode 100644 index f4a097f3f7..0000000000 --- a/source/pip/qsharp/qre/property_keys.pyi +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -DISTANCE: int -SURFACE_CODE_ONE_QUBIT_TIME_FACTOR: int -SURFACE_CODE_TWO_QUBIT_TIME_FACTOR: int -ACCELERATION: int -NUM_TS_PER_ROTATION: int -EXPECTED_SHOTS: int -RUNTIME_SINGLE_SHOT: int -EVALUATION_TIME: int -PHYSICAL_COMPUTE_QUBITS: int -PHYSICAL_FACTORY_QUBITS: int -PHYSICAL_MEMORY_QUBITS: int -MOLECULE: int -LOGICAL_COMPUTE_QUBITS: int -LOGICAL_MEMORY_QUBITS: int -ALGORITHM_COMPUTE_QUBITS: int -ALGORITHM_MEMORY_QUBITS: int -NAME: int -LOSS: int -LOGICAL_CYCLE_TIME: int -CODE_CYCLE_TIME: int diff --git a/source/pip/qsharp/telemetry.py b/source/pip/qsharp/telemetry.py index 4114c69878..ca22eb5c38 100644 --- a/source/pip/qsharp/telemetry.py +++ b/source/pip/qsharp/telemetry.py @@ -1,310 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -""" -This module sends telemetry directly to Azure Monitor using a similar mechanism and -format to the Azure Monitor OpenTelemetry Python SDK. It only supports custom metrics of -type "counter" and "histogram" for now. It's goal is to be minimal in size and dependencies, -and easy to read to understand exactly what data is being sent. - -To use this API, simply call `log_telemetry` with the metric name, value, and any other -optional properties. The telemetry will be batched and sent at a regular intervals (60 sec), -and when the process is about to exit. - -Disable qsharp Python telemetry by setting the environment variable `QSHARP_PYTHON_TELEMETRY=none`. -""" - -import atexit -import json -import locale -import logging -import os -import platform -import time -import urllib.request -import warnings - -from datetime import datetime, timezone -from queue import SimpleQueue, Empty -from threading import Thread -from typing import Any, Dict, Literal, List, TypedDict, Union - -logger = logging.getLogger(__name__) - -QSHARP_VERSION = "0.0.0.dev0" - -AIKEY = os.environ.get("QSHARP_PYTHON_AI_KEY") or "95d25b22-8b6d-448e-9677-78ad4047a95a" -AIURL = ( - os.environ.get("QSHARP_PYTHON_AI_URL") - or "https://westus2-2.in.applicationinsights.azure.com//v2.1/track" -) - -# If explicitly disabled via either environment variable, disable telemetry. This takes precedence. -# If explicitly enabled via either environment variable, enable telemetry. -# Otherwise, enable telemetry only in release builds. -_disable_values = {"0", "false", "disabled", "none"} -_enable_values = {"1", "true", "enabled"} -_env_values = { - (os.environ.get("QSHARP_PYTHON_TELEMETRY") or "").lower(), - (os.environ.get("QDK_PYTHON_TELEMETRY") or "").lower(), -} - -# `&` here is set intersection: it yields the common values between sets. -# `not _env_values & _disable_values` is True iff no disable value is present. -# `bool(_env_values & _enable_values)` is True iff any enable value is present. -TELEMETRY_ENABLED = not _env_values & _disable_values and ( - bool(_env_values & _enable_values) or "dev" not in QSHARP_VERSION -) - -BATCH_INTERVAL_SEC = int(os.environ.get("QSHARP_PYTHON_TELEMETRY_INTERVAL") or 60) - - -# The below is taken from the Azure Monitor Python SDK -def _getlocale() -> str: - try: - with warnings.catch_warnings(): - # Workaround for https://github.com/python/cpython/issues/82986 by continuing to use getdefaultlocale() even though it has been deprecated. - # Ignore the deprecation warnings to reduce noise - warnings.simplefilter("ignore", category=DeprecationWarning) - return locale.getdefaultlocale()[0] or "" - except AttributeError: - # Use this as a fallback if locale.getdefaultlocale() doesn't exist (>Py3.13) - return locale.getlocale()[0] or "" - - -# Minimal device information to include with telemetry -AI_DEVICE_LOCALE = _getlocale() -AI_DEVICE_OS_VERSION = platform.version() - - -class Metric(TypedDict): - """Used internally for objects in the telemetry queue""" - - name: str - value: float - count: int - properties: Dict[str, Any] - type: str - - -class PendingMetric(Metric): - """Used internally to aggregate metrics before sending""" - - min: float - max: float - - -# Maintain a collection of custom metrics to log, stored by metric name with a list entry -# for each unique set of properties per metric name -pending_metrics: Dict[str, List[PendingMetric]] = {} - -# The telemetry queue is used to send telemetry from the main thread to the telemetry thread -# This simplifies any thread-safety concerns, and avoids the need for locks, etc. -telemetry_queue: Any = SimpleQueue() # type 'Any' until we get off Python 3.8 builds - - -def log_telemetry( - name: str, - value: float, - count: int = 1, - properties: Dict[str, Any] = {}, - type: Literal["counter", "histogram"] = "counter", -) -> None: - """ - Logs a custom metric with the name provided. Properties are optional and can be used to - capture additional context about the metric (but should be a relatively static set of values, as - each unique set of properties will be sent as a separate metric and creates a separate 'dimension' - in the backend telemetry store). - - The type can be either 'counter' or 'histogram'. A 'counter' is a simple value that is summed - over time, such as how many times an event occurs, while a 'histogram' is used to track 'quantative' - values, such as the distribution of values over time, e.g., the duration of an operation. - - Example usage for a counter: - - log_telemetry("qir_generated", 1, properties={"profile": "base", "qsharp.version": "1.9.0"}) - - Example usage for a histogram: - - log_telemetry("simulation_duration", 123.45, type="histogram") - - """ - if not TELEMETRY_ENABLED: - return - - obj: Metric = { - "name": name, - "value": value, - "count": count, - "properties": {**properties, "qsharp.version": QSHARP_VERSION}, - "type": type, - } - - logger.debug("Queuing telemetry: %s", obj) - telemetry_queue.put(obj) - - -def _add_to_pending(metric: Metric): - """Used by the telemetry thread to aggregate metrics before sending""" - - if metric["type"] not in ["counter", "histogram"]: - raise Exception("Metric must be of type counter or histogram") - - # Get or create the entry list for this name - name_entries = pending_metrics.setdefault(metric["name"], []) - - # Try to find the entry with matching properties - # This relies on the fact dicts with matching keys/values compare equal in Python - prop_entry = next( - ( - entry - for entry in name_entries - if entry["properties"] == metric["properties"] - ), - None, - ) - if prop_entry is None: - new_entry: PendingMetric = { - **metric, - "min": metric["value"], - "max": metric["value"], - } - name_entries.append(new_entry) - else: - if prop_entry["type"] != metric["type"]: - raise Exception("Cannot mix counter and histogram for the same metric name") - prop_entry["value"] += metric["value"] - prop_entry["count"] += metric["count"] - prop_entry["min"] = min(prop_entry["min"], metric["value"]) - prop_entry["max"] = max(prop_entry["max"], metric["value"]) - - -def _pending_to_payload() -> List[Dict[str, Any]]: - """Converts the pending metrics to the JSON payload for Azure Monitor""" - - result_array: List[Dict[str, Any]] = [] - formatted_time = ( - datetime.now(timezone.utc) - .isoformat(timespec="microseconds") - .replace("+00:00", "Z") - ) - for name in pending_metrics: - for unique_props in pending_metrics[name]: - # The below matches the entry format for Azure Monitor REST API - entry: Dict[str, Any] = { - "ver": 1, - "name": "Microsoft.ApplicationInsights.Metric", - "time": formatted_time, - "sampleRate": 100.0, - "iKey": AIKEY, - "tags": { - "ai.device.locale": AI_DEVICE_LOCALE, - "ai.device.osVersion": AI_DEVICE_OS_VERSION, - }, - "data": { - "baseType": "MetricData", - "baseData": { - "ver": 2, - "metrics": [ - { - "name": unique_props["name"], - "value": unique_props["value"], - "count": unique_props["count"], - } - ], - "properties": unique_props["properties"], - }, - }, - } - # Histogram values differ only in that they have min/max values also - if unique_props["type"] == "histogram": - entry["data"]["baseData"]["metrics"][0]["min"] = unique_props["min"] - entry["data"]["baseData"]["metrics"][0]["max"] = unique_props["max"] - - result_array.append(entry) - - return result_array - - -def _post_telemetry() -> bool: - """Posts the pending telemetry to Azure Monitor""" - - if len(pending_metrics) == 0: - return True - - payload = json.dumps(_pending_to_payload()).encode("utf-8") - logger.debug("Sending telemetry request: %s", payload) - try: - request = urllib.request.Request(AIURL, data=payload, method="POST") - request.add_header("Content-Type", "application/json") - with urllib.request.urlopen(request, timeout=10) as response: - logger.debug("Telemetry response: %s", response.status) - # On a successful post, clear the pending list. (Else they will be included on the next retry) - pending_metrics.clear() - return True - - except Exception: - logger.debug( - "Failed to post telemetry. Pending metrics will be retried at the next interval." - ) - return False - - -# This is the thread that aggregates and posts telemetry at a regular interval. -# The main thread will signal the thread loop to exit when the process is about to exit. -def _telemetry_thread_start(): - next_post_sec: Union[float, None] = None - - def on_metric(msg: Metric): - nonlocal next_post_sec - - # Add to the pending batch to send next - _add_to_pending(msg) - - # Schedule the next post if we don't have one scheduled - if next_post_sec == None: - next_post_sec = time.monotonic() + BATCH_INTERVAL_SEC - - while True: - try: - # Block if no timeout, else wait a maximum of time until the next post is due - timeout: Union[float, None] = None - if next_post_sec: - timeout = max(next_post_sec - time.monotonic(), 0) - msg = telemetry_queue.get(timeout=timeout) - - if msg == "exit": - logger.debug("Exiting telemetry thread") - if not _post_telemetry(): - logger.debug("Failed to post telemetry on exit") - return - else: - on_metric(msg) - # Loop until the queue has been drained. This will cause the 'Empty' exception - # below once the queue is empty and it's time to post - continue - except Empty: - # No more telemetry within timeout, so write what we have pending - _ = _post_telemetry() - - # If we get here, it's after a post attempt. Pending will still have items if the attempt - # failed, so updated the time for the next attempt in that case. - if len(pending_metrics) == 0: - next_post_sec = None - else: - next_post_sec = time.monotonic() + BATCH_INTERVAL_SEC - - -# When the process is about to exit, notify the telemetry thread to flush, and wait max 3 sec before exiting anyway -def _on_exit(): - logger.debug("In on_exit handler") - telemetry_queue.put("exit") - # Wait at most 3 seconds for the telemetry thread to flush and exit - telemetry_thread.join(timeout=3) - - -# Mark the telemetry thread as a daemon thread, else it will keep the process alive when the main thread exits -if TELEMETRY_ENABLED: - telemetry_thread = Thread(target=_telemetry_thread_start, daemon=True) - telemetry_thread.start() - atexit.register(_on_exit) +# Deprecated: use qdk.telemetry instead. +from qdk.telemetry import * # noqa: F401,F403 diff --git a/source/pip/qsharp/telemetry_events.py b/source/pip/qsharp/telemetry_events.py index edffb17585..86dd1c96ce 100644 --- a/source/pip/qsharp/telemetry_events.py +++ b/source/pip/qsharp/telemetry_events.py @@ -1,357 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from .telemetry import log_telemetry -import math -from typing import Union - -# For metrics such as duration, we want to capture things like how many shots or qubits in -# the additional properties. However properties shouldn't be 'continuous' values, as they -# create new 'dimensions' on the backend, which is limited, thus we want to bucket these properties. - -# See some of the notes at: https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/metrics-custom-overview#design-limitations-and-considerations - - -def get_next_power_of_ten_bucket(value: int) -> int: - if value <= 1: - return 1 - elif value >= 1000000: - # Limit the buckets upper bound - return 1000000 - else: - # Bucket into nearest (rounded up) power of 10, e.g. 75 -> 100, 450 -> 1000, etc. - return 10 ** math.ceil(math.log10(value)) - - -# gets the order of magnitude for the number of qubits -def get_qubits_bucket(qubits: Union[str, int]) -> str: - if qubits == "unknown": - return "unknown" - qubits = int(qubits) - if qubits <= 1: - return "1" - elif qubits >= 50: - return "50" - else: - # integer divide by 5 to get nearest 5 - return str(qubits // 5 * 5) - - -def on_import() -> None: - log_telemetry("qsharp.import", 1) - - -def on_qdk_import() -> None: - log_telemetry("qdk.import", 1) - - -def on_run(shots: int, noise: bool, qubit_loss: bool) -> None: - log_telemetry( - "qsharp.run", - 1, - properties={ - "shots": get_next_power_of_ten_bucket(shots), - "noise": noise, - "qubit_loss": qubit_loss, - }, - ) - - -def on_run_end(durationMs: float, shots: int) -> None: - log_telemetry( - "qsharp.run.durationMs", - durationMs, - properties={"shots": get_next_power_of_ten_bucket(shots)}, - type="histogram", - ) - - -def on_run_qasm(shots: int, noise: bool, qubit_loss: bool) -> None: - log_telemetry( - "qsharp.run_qasm", - 1, - properties={ - "shots": get_next_power_of_ten_bucket(shots), - "noise": noise, - "qubit_loss": qubit_loss, - }, - ) - - -def on_run_qasm_end(durationMs: float, shots: int) -> None: - log_telemetry( - "qsharp.run_qasm.durationMs", - durationMs, - properties={"shots": get_next_power_of_ten_bucket(shots)}, - type="histogram", - ) - - -def on_eval() -> None: - log_telemetry( - "qsharp.eval", - 1, - ) - - -def on_eval_end(durationMs: float) -> None: - log_telemetry( - "qsharp.eval.durationMs", - durationMs, - type="histogram", - ) - - -def on_import_qasm() -> None: - log_telemetry( - "qsharp.import_qasm", - 1, - ) - - -def on_import_qasm_end(durationMs: float) -> None: - log_telemetry( - "qsharp.import_qasm.durationMs", - durationMs, - type="histogram", - ) - - -def on_run_cell() -> None: - log_telemetry( - "qsharp.run.cell", - 1, - ) - - -def on_run_cell_end(durationMs: float) -> None: - log_telemetry( - "qsharp.run.cell.durationMs", - durationMs, - type="histogram", - ) - - -def on_compile(profile: str) -> None: - log_telemetry("qsharp.compile", 1, properties={"profile": profile}) - - -def on_compile_end(durationMs: float, profile: str) -> None: - log_telemetry( - "qsharp.compile.durationMs", - durationMs, - properties={"profile": profile}, - type="histogram", - ) - - -def on_compile_qasm(profile: str) -> None: - log_telemetry("qsharp.compile_qasm", 1, properties={"profile": profile}) - - -def on_compile_qasm_end(durationMs: float, profile: str) -> None: - log_telemetry( - "qsharp.compile_qasm.durationMs", - durationMs, - properties={"profile": profile}, - type="histogram", - ) - - -def on_estimate() -> None: - log_telemetry( - "qsharp.estimate", - 1, - ) - - -def on_estimate_end(durationMs: float, qubits: Union[str, int]) -> None: - log_telemetry( - "qsharp.estimate.durationMs", - durationMs, - properties={"qubits": get_qubits_bucket(qubits)}, - type="histogram", - ) - - -def on_estimate_qasm() -> None: - log_telemetry( - "qsharp.estimate_qasm", - 1, - ) - - -def on_estimate_qasm_end(durationMs: float, qubits: Union[str, int]) -> None: - log_telemetry( - "qsharp.estimate_qasm.durationMs", - durationMs, - properties={"qubits": get_qubits_bucket(qubits)}, - type="histogram", - ) - - -def on_circuit() -> None: - log_telemetry( - "qsharp.circuit", - 1, - ) - - -def on_circuit_end(durationMs: float) -> None: - log_telemetry( - "qsharp.circuit.durationMs", - durationMs, - type="histogram", - ) - - -def on_circuit_qasm() -> None: - log_telemetry( - "qsharp.circuit_qasm", - 1, - ) - - -def on_circuit_qasm_end(durationMs: float) -> None: - log_telemetry( - "qsharp.circuit_qasm.durationMs", - durationMs, - type="histogram", - ) - - -# Qiskit telemetry events - - -def on_qiskit_run(shots: int, num_circuits: int) -> None: - log_telemetry( - "qiskit.run", - 1, - properties={ - "shots": get_next_power_of_ten_bucket(shots), - "circuits": get_next_power_of_ten_bucket(num_circuits), - }, - ) - - -def on_qiskit_run_end(shots: int, num_circuits: int, duration_ms: float) -> None: - log_telemetry( - "qiskit.run.durationMs", - duration_ms, - properties={ - "shots": get_next_power_of_ten_bucket(shots), - "circuits": get_next_power_of_ten_bucket(num_circuits), - }, - type="histogram", - ) - - -def on_qiskit_run_re() -> None: - log_telemetry( - "qiskit.run.re", - 1, - ) - - -def on_qiskit_run_re_end(duration_ms: float) -> None: - log_telemetry( - "qiskit.run.re.durationMs", - duration_ms, - type="histogram", - ) - - -def on_neutral_atom_init(default_layout: bool) -> None: - log_telemetry( - "neutral_atom.device.init", - 1, - properties={"default_layout": default_layout}, - ) - - -def on_neutral_atom_compile() -> None: - log_telemetry( - "neutral_atom.device.compile", - 1, - ) - - -def on_neutral_atom_compile_end(duration_ms: float) -> None: - log_telemetry( - "neutral_atom.device.compile.durationMs", - duration_ms, - type="histogram", - ) - - -def on_neutral_atom_trace() -> None: - log_telemetry( - "neutral_atom.device.trace", - 1, - ) - - -def on_neutral_atom_trace_end(duration_ms: float) -> None: - log_telemetry( - "neutral_atom.device.trace.durationMs", - duration_ms, - type="histogram", - ) - - -def on_neutral_atom_cpu_fallback() -> None: - log_telemetry( - "neutral_atom.device.cpu_fallback", - 1, - ) - - -def on_neutral_atom_simulate(shots: int, noise: bool, type: str) -> None: - log_telemetry( - "neutral_atom.device.simulate", - 1, - properties={ - "shots": get_next_power_of_ten_bucket(shots), - "noise": noise, - "type": type, - }, - ) - - -def on_neutral_atom_simulate_end( - duration_ms: float, shots: int, noise: bool, type: str -) -> None: - log_telemetry( - "neutral_atom.device.simulate.durationMs", - duration_ms, - properties={ - "shots": get_next_power_of_ten_bucket(shots), - "noise": noise, - "type": type, - }, - type="histogram", - ) - - -# QRE telemetry events - - -def on_qre_estimate(post_process: bool, use_graph: bool) -> None: - log_telemetry( - "qsharp.qre.estimate", - 1, - properties={ - "post_process": post_process, - "use_graph": use_graph, - }, - ) - - -def on_qre_application_created(application_type: str) -> None: - log_telemetry( - "qsharp.qre.application.created", - 1, - properties={ - "application_type": application_type, - }, - ) +# Deprecated: use qdk.telemetry_events instead. +from qdk.telemetry_events import * # noqa: F401,F403 diff --git a/source/pip/qsharp/utils/__init__.py b/source/pip/qsharp/utils/__init__.py index 03d71482e7..2eb60148d5 100644 --- a/source/pip/qsharp/utils/__init__.py +++ b/source/pip/qsharp/utils/__init__.py @@ -1,8 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from ._utils import dump_operation - -__all__ = [ - "dump_operation", -] +# Deprecated: use qdk.utils instead. +from qdk.utils import * # noqa: F401,F403 diff --git a/source/pip/qsharp/utils/_utils.py b/source/pip/qsharp/utils/_utils.py deleted file mode 100644 index 26984dc4db..0000000000 --- a/source/pip/qsharp/utils/_utils.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from .._qsharp import run -from typing import List -import math - - -def dump_operation(operation: str, num_qubits: int) -> List[List[complex]]: - """ - Returns a square matrix of complex numbers representing the operation performed. - - :param operation: The operation to be performed, which must operate on a list of qubits. - :param num_qubits: The number of qubits to be used. - - :return: The matrix representing the operation. - :rtype: List[List[complex]] - """ - code = f"""{{ - let op = {operation}; - use (targets, extra) = (Qubit[{num_qubits}], Qubit[{num_qubits}]); - for i in 0..{num_qubits}-1 {{ - H(targets[i]); - CNOT(targets[i], extra[i]); - }} - operation ApplyOp (op : (Qubit[] => Unit), targets : Qubit[]) : Unit {{ op(targets); }} - ApplyOp(op, targets); - Microsoft.Quantum.Diagnostics.DumpMachine(); - ResetAll(targets + extra); - }}""" - result = run(code, shots=1, save_events=True)[0] - state = result["events"][-1].state_dump().get_dict() - num_entries = pow(2, num_qubits) - factor = math.sqrt(num_entries) - ndigits = 6 - matrix = [] - for i in range(num_entries): - matrix += [[]] - for j in range(num_entries): - entry = state.get(i * num_entries + j) - if entry is None: - matrix[i] += [complex(0, 0)] - else: - matrix[i] += [ - complex( - round(factor * entry.real, ndigits), - round(factor * entry.imag, ndigits), - ) - ] - return matrix From a500e0a4aab6579f2287abe053db3db69adf90f0 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Wed, 29 Apr 2026 12:17:02 -0700 Subject: [PATCH 04/25] Started to deal with tests --- build.py | 107 ++++++++------- source/pip/qsharp/__init__.py | 1 + source/pip/qsharp/_adaptive_bytecode.py | 4 + source/pip/qsharp/_adaptive_pass.py | 4 + source/pip/qsharp/_device/_atom/_decomp.py | 4 + source/pip/qsharp/_device/_atom/_validate.py | 4 + source/pip/qsharp/_native.py | 4 + .../qsharp/applications/magnets/__init__.py | 4 + source/pip/qsharp/qre/_architecture.py | 4 + source/pip/qsharp/qre/_enumeration.py | 4 + source/pip/qsharp/qre/_estimation.py | 4 + source/pip/qsharp/qre/_instruction.py | 4 + source/pip/qsharp/qre/_isa_enumeration.py | 4 + source/pip/qsharp/qre/_qre.py | 4 + source/pip/qsharp/qre/application/__init__.py | 4 + source/pip/qsharp/qre/interop/__init__.py | 4 + source/pip/qsharp/qre/interop/_cirq.py | 4 + source/pip/qsharp/qre/models/__init__.py | 4 + source/qdk_package/tests/conftest.py | 9 +- source/qdk_package/tests/mocks.py | 126 +----------------- source/qdk_package/tests/test_extras.py | 35 ++--- source/qdk_package/tests/test_reexports.py | 73 +++++----- 22 files changed, 172 insertions(+), 243 deletions(-) create mode 100644 source/pip/qsharp/_adaptive_bytecode.py create mode 100644 source/pip/qsharp/_adaptive_pass.py create mode 100644 source/pip/qsharp/_device/_atom/_decomp.py create mode 100644 source/pip/qsharp/_device/_atom/_validate.py create mode 100644 source/pip/qsharp/_native.py create mode 100644 source/pip/qsharp/applications/magnets/__init__.py create mode 100644 source/pip/qsharp/qre/_architecture.py create mode 100644 source/pip/qsharp/qre/_enumeration.py create mode 100644 source/pip/qsharp/qre/_estimation.py create mode 100644 source/pip/qsharp/qre/_instruction.py create mode 100644 source/pip/qsharp/qre/_isa_enumeration.py create mode 100644 source/pip/qsharp/qre/_qre.py create mode 100644 source/pip/qsharp/qre/application/__init__.py create mode 100644 source/pip/qsharp/qre/interop/__init__.py create mode 100644 source/pip/qsharp/qre/interop/_cirq.py create mode 100644 source/pip/qsharp/qre/models/__init__.py diff --git a/build.py b/build.py index de197c3b50..706d6df7da 100755 --- a/build.py +++ b/build.py @@ -346,7 +346,7 @@ def install_python_test_requirements(cwd, interpreter, check: bool = True): subprocess.run(command_args, check=check, text=True, cwd=cwd) -def build_qsharp_wheel(cwd, interpreter, pip_env): +def build_maturin_wheel(cwd, interpreter, pip_env): # Read the build dependencies out of the pyproject.toml and install them first. with open(os.path.join(cwd, "pyproject.toml"), "rb") as f: requires = tomllib.load(f)["build-system"]["requires"] @@ -438,18 +438,74 @@ def run_ci_historic_benchmark(): f.write(result.stdout) +if build_qdk: + step_start("Building the qdk python package") + + # Reuse (or create) the pip environment so qdk wheel can be built/installed consistently. + (python_bin, pip_env) = use_python_env(qdk_python_src) + + # Build the qdk wheel with maturin (it now owns the native extension). + build_maturin_wheel(qdk_python_src, python_bin, pip_env) + step_end() + + if run_tests: + step_start("Running tests for the qdk python package") + # Install per-package test requirements (pytest, etc.) + install_python_test_requirements(qdk_python_src, python_bin) + + # Install qdk from the freshly built wheel. + install_args = [ + python_bin, + "-m", + "pip", + "install", + "--force-reinstall", + "--no-index", + "--find-links=" + wheels_dir, + "qdk", + ] + subprocess.run(install_args, check=True, text=True, cwd=qdk_python_src) + + # Run its test suite + run_python_tests(os.path.join(qdk_python_src, "tests"), python_bin, pip_env) + step_end() + if build_pip: step_start("Building the pip package") (python_bin, pip_env) = use_python_env(pip_src) - build_qsharp_wheel(pip_src, python_bin, pip_env) + # qsharp is now a pure-Python shim depending on qdk. + # Build with setuptools (no maturin needed). + pip_build_args = [ + python_bin, + "-m", + "build", + "--wheel", + "-v", + "--outdir", + wheels_dir, + pip_src, + ] + subprocess.run(pip_build_args, check=True, text=True, cwd=pip_src, env=pip_env) step_end() if run_tests: step_start("Running tests for the pip package") install_python_test_requirements(pip_src, python_bin) + # Install qdk first (qsharp depends on it) + install_args = [ + python_bin, + "-m", + "pip", + "install", + "--force-reinstall", + "--no-index", + "--find-links=" + wheels_dir, + "qdk", + ] + subprocess.run(install_args, check=True, text=True, cwd=pip_src) install_qsharp_python_package(pip_src, wheels_dir, python_bin) run_python_tests(os.path.join(pip_src, "tests"), python_bin, pip_env) @@ -484,53 +540,6 @@ def run_ci_historic_benchmark(): step_end() -if build_qdk: - step_start("Building the qdk python package") - - # Reuse (or create) the pip environment so qsharp wheel can be built/installed consistently. - (python_bin, pip_env) = use_python_env(qdk_python_src) - - # Build the qdk wheel (no dependency build needed; it's a thin meta-package) - qdk_build_args = [ - python_bin, - "-m", - "build", - "--wheel", - "-v", - "--outdir", - wheels_dir, - qdk_python_src, - ] - subprocess.run(qdk_build_args, check=True, text=True, cwd=qdk_python_src) - step_end() - - if run_tests: - step_start("Running tests for the qdk python package") - # Install per-package test requirements (pytest, etc.) - install_python_test_requirements(qdk_python_src, python_bin) - - # Install qsharp wheel first so dependency resolution is offline & version-synced. - install_qsharp_python_package(qdk_python_src, wheels_dir, python_bin) - - # Install qdk itself from the freshly built wheel (force to ensure isolation) - install_args = [ - python_bin, - "-m", - "pip", - "install", - "--force-reinstall", - "--no-index", - "--no-deps", - "--find-links=" + wheels_dir, - "qdk", - "qsharp", - ] - subprocess.run(install_args, check=True, text=True, cwd=qdk_python_src) - - # Run its test suite - run_python_tests(os.path.join(qdk_python_src, "tests"), python_bin, pip_env) - step_end() - if build_widgets: step_start("Building the Python widgets") diff --git a/source/pip/qsharp/__init__.py b/source/pip/qsharp/__init__.py index 9559b7b333..af20955d63 100644 --- a/source/pip/qsharp/__init__.py +++ b/source/pip/qsharp/__init__.py @@ -46,6 +46,7 @@ from qdk._native import Result, Pauli, QSharpError, TargetProfile, estimate_custom from qdk import telemetry_events + telemetry_events.on_import() # IPython notebook specific features diff --git a/source/pip/qsharp/_adaptive_bytecode.py b/source/pip/qsharp/_adaptive_bytecode.py new file mode 100644 index 0000000000..4a5193e367 --- /dev/null +++ b/source/pip/qsharp/_adaptive_bytecode.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk._adaptive_bytecode +from qdk._adaptive_bytecode import * diff --git a/source/pip/qsharp/_adaptive_pass.py b/source/pip/qsharp/_adaptive_pass.py new file mode 100644 index 0000000000..883f58b136 --- /dev/null +++ b/source/pip/qsharp/_adaptive_pass.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk._adaptive_pass +from qdk._adaptive_pass import * diff --git a/source/pip/qsharp/_device/_atom/_decomp.py b/source/pip/qsharp/_device/_atom/_decomp.py new file mode 100644 index 0000000000..a839752945 --- /dev/null +++ b/source/pip/qsharp/_device/_atom/_decomp.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk._device._atom._decomp +from qdk._device._atom._decomp import * diff --git a/source/pip/qsharp/_device/_atom/_validate.py b/source/pip/qsharp/_device/_atom/_validate.py new file mode 100644 index 0000000000..04e88244a9 --- /dev/null +++ b/source/pip/qsharp/_device/_atom/_validate.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk._device._atom._validate +from qdk._device._atom._validate import * diff --git a/source/pip/qsharp/_native.py b/source/pip/qsharp/_native.py new file mode 100644 index 0000000000..55a3073bcd --- /dev/null +++ b/source/pip/qsharp/_native.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk._native +from qdk._native import * # type: ignore diff --git a/source/pip/qsharp/applications/magnets/__init__.py b/source/pip/qsharp/applications/magnets/__init__.py new file mode 100644 index 0000000000..2956378270 --- /dev/null +++ b/source/pip/qsharp/applications/magnets/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk.applications.magnets +from qdk.applications.magnets import * diff --git a/source/pip/qsharp/qre/_architecture.py b/source/pip/qsharp/qre/_architecture.py new file mode 100644 index 0000000000..371fc6fdf3 --- /dev/null +++ b/source/pip/qsharp/qre/_architecture.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk.qre._architecture +from qdk.qre._architecture import * diff --git a/source/pip/qsharp/qre/_enumeration.py b/source/pip/qsharp/qre/_enumeration.py new file mode 100644 index 0000000000..17c065d12d --- /dev/null +++ b/source/pip/qsharp/qre/_enumeration.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk.qre._enumeration +from qdk.qre._enumeration import * diff --git a/source/pip/qsharp/qre/_estimation.py b/source/pip/qsharp/qre/_estimation.py new file mode 100644 index 0000000000..4daafc7141 --- /dev/null +++ b/source/pip/qsharp/qre/_estimation.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk.qre._estimation +from qdk.qre._estimation import * diff --git a/source/pip/qsharp/qre/_instruction.py b/source/pip/qsharp/qre/_instruction.py new file mode 100644 index 0000000000..c4a762b14f --- /dev/null +++ b/source/pip/qsharp/qre/_instruction.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk.qre._instruction +from qdk.qre._instruction import * diff --git a/source/pip/qsharp/qre/_isa_enumeration.py b/source/pip/qsharp/qre/_isa_enumeration.py new file mode 100644 index 0000000000..4552186044 --- /dev/null +++ b/source/pip/qsharp/qre/_isa_enumeration.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk.qre._isa_enumeration +from qdk.qre._isa_enumeration import * diff --git a/source/pip/qsharp/qre/_qre.py b/source/pip/qsharp/qre/_qre.py new file mode 100644 index 0000000000..68a89c531c --- /dev/null +++ b/source/pip/qsharp/qre/_qre.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk.qre._qre +from qdk.qre._qre import * diff --git a/source/pip/qsharp/qre/application/__init__.py b/source/pip/qsharp/qre/application/__init__.py new file mode 100644 index 0000000000..f47b24c95a --- /dev/null +++ b/source/pip/qsharp/qre/application/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk.qre.application +from qdk.qre.application import * diff --git a/source/pip/qsharp/qre/interop/__init__.py b/source/pip/qsharp/qre/interop/__init__.py new file mode 100644 index 0000000000..2ed7a9b952 --- /dev/null +++ b/source/pip/qsharp/qre/interop/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk.qre.interop +from qdk.qre.interop import * diff --git a/source/pip/qsharp/qre/interop/_cirq.py b/source/pip/qsharp/qre/interop/_cirq.py new file mode 100644 index 0000000000..eae736aee9 --- /dev/null +++ b/source/pip/qsharp/qre/interop/_cirq.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk.qre.interop._cirq +from qdk.qre.interop._cirq import * diff --git a/source/pip/qsharp/qre/models/__init__.py b/source/pip/qsharp/qre/models/__init__.py new file mode 100644 index 0000000000..a84ac70e66 --- /dev/null +++ b/source/pip/qsharp/qre/models/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# Deprecation shim – delegates to qdk.qre.models +from qdk.qre.models import * diff --git a/source/qdk_package/tests/conftest.py b/source/qdk_package/tests/conftest.py index 4013081ef4..b06646dcab 100644 --- a/source/qdk_package/tests/conftest.py +++ b/source/qdk_package/tests/conftest.py @@ -4,14 +4,11 @@ import sys from pathlib import Path -# Add local package root so 'qdk' can be imported without installation and add tests dir so 'mocks' is importable. +# Add local package root so 'qdk' can be imported without installation (useful +# after ``maturin develop``) and add tests dir so 'mocks' is importable. +# In CI the wheel is installed before tests run, so this is a convenience fallback. _root = Path(__file__).resolve().parent _pkg_root = _root.parent for p in (_pkg_root, _root): if str(p) not in sys.path: sys.path.insert(0, str(p)) - -# Ensure a qsharp stub (if real package absent) via centralized mocks helper. -import mocks - -mocks.mock_qsharp() diff --git a/source/qdk_package/tests/mocks.py b/source/qdk_package/tests/mocks.py index 430edaa761..a2124a198e 100644 --- a/source/qdk_package/tests/mocks.py +++ b/source/qdk_package/tests/mocks.py @@ -3,7 +3,7 @@ """Centralized mock helpers for tests. -Provides lightweight stand-ins for optional (and required during tests) dependencies. +Provides lightweight stand-ins for optional dependencies. Functions return a list of module names they created so callers can later clean them up using cleanup_modules(). This keeps test intent explicit. @@ -15,102 +15,7 @@ def _not_impl(*_a, **_k): - raise NotImplementedError("qsharp stub: real 'qsharp' package not installed") - - -def mock_qsharp() -> List[str]: - """Ensure a minimal 'qsharp' module exists. - - In real usage the qsharp package provides a compiled extension. Tests only - need the attribute surface that qdk re-exports (run/estimate presently used - for sanity checks). If the real package is installed this is a no-op. - """ - created: List[str] = [] - if "qsharp" not in sys.modules: - stub = types.ModuleType("qsharp") - - stub.run = _not_impl - stub.estimate = _not_impl - # Provide utility symbols expected to re-export at root - stub.code = object() - stub.set_quantum_seed = _not_impl - stub.set_classical_seed = _not_impl - stub.dump_machine = _not_impl - stub.init = _not_impl - - class _T: # placeholder types - pass - - stub.Result = _T - stub.TargetProfile = _T - stub.StateDump = _T - stub.ShotResult = _T - stub.PauliNoise = _T - stub.DepolarizingNoise = _T - stub.BitFlipNoise = _T - stub.PhaseFlipNoise = _T - stub.__all__ = [ - "run", - "estimate", - "code", - "set_quantum_seed", - "set_classical_seed", - "dump_machine", - "init", - "Result", - "TargetProfile", - "StateDump", - "ShotResult", - "PauliNoise", - "DepolarizingNoise", - "BitFlipNoise", - "PhaseFlipNoise", - "estimator", - "openqasm", - "utils", - ] - # Minimal submodules to back lifted shims - est = types.ModuleType("qsharp.estimator") - est.__doc__ = "mock estimator" - sys.modules["qsharp.estimator"] = est - stub.estimator = est - oq = types.ModuleType("qsharp.openqasm") - oq.__doc__ = "mock openqasm" - sys.modules["qsharp.openqasm"] = oq - stub.openqasm = oq - utils_mod = types.ModuleType("qsharp.utils") - utils_mod.dump_operation = _not_impl - sys.modules["qsharp.utils"] = utils_mod - stub.utils = utils_mod - - sys.modules["qsharp"] = stub - # Telemetry events package with on_qdk_import function expected by qdk import - telemetry_pkg = types.ModuleType("qsharp.telemetry_events") - - def on_qdk_import(): - return None - - telemetry_pkg.on_qdk_import = on_qdk_import - sys.modules["qsharp.telemetry_events"] = telemetry_pkg - # Interop namespace for qiskit shim expectations - interop = types.ModuleType("qsharp.interop") - sys.modules["qsharp.interop"] = interop - interop_qk = types.ModuleType("qsharp.interop.qiskit") - interop_qk.__doc__ = "mock qsharp interop qiskit" - sys.modules["qsharp.interop.qiskit"] = interop_qk - - created.extend( - [ - "qsharp", - "qsharp.estimator", - "qsharp.openqasm", - "qsharp.utils", - "qsharp.telemetry_events", - "qsharp.interop", - "qsharp.interop.qiskit", - ] - ) - return created + raise NotImplementedError("stub: dependency not installed") def mock_widgets() -> List[str]: @@ -153,33 +58,6 @@ def mock_azure() -> List[str]: return created -def mock_qiskit() -> List[str]: - created: List[str] = [] - if "qiskit" not in sys.modules: - qk = types.ModuleType("qiskit") - qk.transpile = _not_impl - sys.modules["qiskit"] = qk - created.append("qiskit") - return created - - -def mock_cirq() -> List[str]: - created: List[str] = [] - if "cirq" not in sys.modules: - cq = types.ModuleType("cirq") - sys.modules["cirq"] = cq - created.append("cirq") - if "qsharp.interop.cirq" not in sys.modules: - interop_cirq = types.ModuleType("qsharp.interop.cirq") - interop_cirq.__doc__ = "mock qsharp interop cirq" - sys.modules["qsharp.interop.cirq"] = interop_cirq - interop = sys.modules.get("qsharp.interop") - if interop is not None: - interop.cirq = interop_cirq - created.append("qsharp.interop.cirq") - return created - - def cleanup_modules(created: List[str]) -> None: """Remove synthetic modules created during a test if still present.""" for name in created: diff --git a/source/qdk_package/tests/test_extras.py b/source/qdk_package/tests/test_extras.py index 80b574805e..922a955525 100644 --- a/source/qdk_package/tests/test_extras.py +++ b/source/qdk_package/tests/test_extras.py @@ -1,19 +1,20 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -import pytest, importlib +"""Tests for the re-export shims that wrap optional third-party packages. -from mocks import ( - mock_widgets, - mock_azure, - mock_qiskit, - mock_cirq, - cleanup_modules, -) +Only ``qdk.widgets`` (wraps ``qsharp_widgets``) and ``qdk.azure`` (wraps +``azure.quantum``) are re-export shims. We mock the upstream packages and +verify that the shims surface the expected attributes. +""" +import importlib +import pytest -# Standard contract description for each extra we test. -EXTRAS = { +from mocks import mock_widgets, mock_azure, cleanup_modules + + +MOCK_EXTRAS = { "widgets": { "mock": mock_widgets, "module": "qdk.widgets", @@ -26,21 +27,11 @@ hasattr(mod, name) for name in ("target", "argument_types", "job") ), }, - "qiskit": { - "mock": mock_qiskit, - "module": "qdk.qiskit", - "post_assert": lambda mod: hasattr(mod, "__doc__"), - }, - "cirq": { - "mock": mock_cirq, - "module": "qdk.cirq", - "post_assert": lambda mod: hasattr(mod, "__doc__"), - }, } -@pytest.mark.parametrize("name,spec", EXTRAS.items()) -def test_direct_import_with_mock(name, spec): +@pytest.mark.parametrize("name,spec", MOCK_EXTRAS.items()) +def test_reexport_shim_with_mock(name, spec): created = spec["mock"]() try: imported = importlib.import_module(spec["module"]) diff --git a/source/qdk_package/tests/test_reexports.py b/source/qdk_package/tests/test_reexports.py index cbaf445a1a..b852723212 100644 --- a/source/qdk_package/tests/test_reexports.py +++ b/source/qdk_package/tests/test_reexports.py @@ -1,45 +1,34 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -import pytest, importlib - - -def test_qdk_qsharp_submodule_available(): - # Import the qsharp submodule explicitly. - qdk_qsharp = importlib.import_module("qdk.qsharp") - # Ensure a core API is reachable via submodule - assert hasattr(qdk_qsharp, "run"), "qsharp.run missing in submodule" - - -def test_estimator_and_openqasm_shims(): - est = importlib.import_module("qdk.estimator") - oq = importlib.import_module("qdk.openqasm") - assert hasattr(est, "__doc__") - assert hasattr(oq, "__doc__") - - -def test_missing_optional_direct_imports(): - # If optional extras truly not installed, importing their submodules should raise ImportError. - # We probe without using mocks here. - for mod in ("qdk.widgets", "qdk.azure", "qdk.qiskit", "qdk.cirq", "qdk.qre"): - base_dep = { - "qdk.widgets": "qsharp_widgets", - "qdk.azure": "azure.quantum", - "qdk.qiskit": "qiskit", - "qdk.cirq": "cirq", - "qdk.qre": "qre", - }[mod] - try: - importlib.import_module(base_dep) - dep_installed = True - except Exception: - dep_installed = False - if not dep_installed: - try: - importlib.import_module(mod) - except ImportError as e: - # Expected path: verify helpful hint present - assert "pip install qdk[" in str(e) - else: - # If it imported anyway, treat as environment providing the feature (e.g. via dev install) - pass +"""Tests for qdk re-export shims. + +Only ``qdk.widgets`` and ``qdk.azure`` are re-export shims wrapping third-party +packages. All other qdk submodules (estimator, openqasm, qiskit, cirq, qre, +etc.) now own their code directly and are covered by functional tests elsewhere. +""" + +import importlib +import pytest + + +# ---- Friendly error messages when optional deps are missing ---- + +_REEXPORT_SHIMS = { + "qdk.widgets": {"dep": "qsharp_widgets", "hint": "pip install qdk[jupyter]"}, + "qdk.azure": {"dep": "azure.quantum", "hint": "pip install qdk[azure]"}, +} + + +@pytest.mark.parametrize("mod,spec", _REEXPORT_SHIMS.items()) +def test_missing_optional_gives_helpful_error(mod, spec): + """When the upstream dep is absent, importing the shim should raise + ImportError containing a pip-install hint.""" + try: + importlib.import_module(spec["dep"]) + pytest.skip(f"{spec['dep']} is installed; cannot test missing-dep path") + except ImportError: + pass + + with pytest.raises(ImportError, match=spec["hint"]): + importlib.import_module(mod) From f61fcab9d12df21b694c9dd4e7e9b0cbfc557dc7 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Wed, 29 Apr 2026 14:49:22 -0700 Subject: [PATCH 05/25] move non-integration tests into qdk --- build.py | 21 ++++----- source/qdk_package/LICENSE.txt | 21 +++++++++ source/qdk_package/pyproject.toml | 3 ++ source/qdk_package/test_requirements.txt | 3 ++ source/{pip => qdk_package}/tests/.gitignore | 0 .../tests/CliffordCalls.qs | 0 .../tests/CliffordIsing.qs | 0 .../tests/applications/__init__.py | 0 .../tests/applications/magnets/__init__.py | 0 .../applications/magnets/test_complete.py | 2 +- .../applications/magnets/test_hypergraph.py | 2 +- .../applications/magnets/test_lattice1d.py | 2 +- .../applications/magnets/test_lattice2d.py | 2 +- .../tests/applications/magnets/test_model.py | 2 +- .../tests/applications/magnets/test_pauli.py | 2 +- .../applications/magnets/test_trotter.py | 2 +- source/{pip => qdk_package}/tests/circuit.qsc | 0 source/qdk_package/tests/conftest.py | 46 +++++++++++++++---- .../csv_dir_test/test_noise_intrinsic.csv | 0 .../tests/qre/__init__.py | 0 .../tests/qre/conftest.py | 6 +-- .../tests/qre/test_application.py | 14 +++--- .../tests/qre/test_cirq_interop.py | 8 ++-- .../tests/qre/test_enumeration.py | 32 ++++++------- .../tests/qre/test_estimation.py | 8 ++-- .../tests/qre/test_estimation_table.py | 14 +++--- .../tests/qre/test_interop.py | 12 ++--- .../tests/qre/test_isa.py | 16 +++---- .../tests/qre/test_models.py | 16 +++---- .../qdk_package/tests/reexports/__init__.py | 0 .../qdk_package/tests/reexports/conftest.py | 10 ++++ .../tests/{ => reexports}/mocks.py | 11 ++++- .../tests/{ => reexports}/test_extras.py | 0 .../tests/{ => reexports}/test_reexports.py | 4 +- .../tests/test_adaptive_gpu_bytecode.py | 12 ++--- .../tests/test_adaptive_gpu_noise.py | 30 ++++++------ .../tests/test_adaptive_gpu_quantum_ops.py | 4 +- .../tests/test_adaptive_pass.py | 4 +- .../tests/test_callable_passing.py | 24 +++++----- .../tests/test_clifford_simulator.py | 12 ++--- .../tests/test_correlated_noise.py | 14 +++--- .../tests/test_cpu_simulator.py | 10 ++-- .../{pip => qdk_package}/tests/test_enums.py | 10 ++-- .../tests/test_generic_estimator.py | 2 +- .../tests/test_gpu_simulator.py | 12 ++--- .../tests/test_interpreter.py | 4 +- .../tests/test_noisy_config.py | 2 +- .../tests/test_noisy_simulator.py | 2 +- .../tests/test_project.py | 6 +-- .../{pip => qdk_package}/tests/test_qasm.py | 8 ++-- .../tests/test_qasm_io.py | 6 +-- .../{pip => qdk_package}/tests/test_qsharp.py | 18 ++++---- source/{pip => qdk_package}/tests/test_re.py | 4 +- .../tests/test_sparse_simulator.py | 10 ++-- 54 files changed, 260 insertions(+), 193 deletions(-) create mode 100644 source/qdk_package/LICENSE.txt rename source/{pip => qdk_package}/tests/.gitignore (100%) rename source/{pip => qdk_package}/tests/CliffordCalls.qs (100%) rename source/{pip => qdk_package}/tests/CliffordIsing.qs (100%) rename source/{pip => qdk_package}/tests/applications/__init__.py (100%) rename source/{pip => qdk_package}/tests/applications/magnets/__init__.py (100%) rename source/{pip => qdk_package}/tests/applications/magnets/test_complete.py (99%) rename source/{pip => qdk_package}/tests/applications/magnets/test_hypergraph.py (99%) mode change 100755 => 100644 rename source/{pip => qdk_package}/tests/applications/magnets/test_lattice1d.py (99%) rename source/{pip => qdk_package}/tests/applications/magnets/test_lattice2d.py (99%) rename source/{pip => qdk_package}/tests/applications/magnets/test_model.py (99%) mode change 100755 => 100644 rename source/{pip => qdk_package}/tests/applications/magnets/test_pauli.py (98%) rename source/{pip => qdk_package}/tests/applications/magnets/test_trotter.py (99%) rename source/{pip => qdk_package}/tests/circuit.qsc (100%) rename source/{pip => qdk_package}/tests/csv_dir_test/test_noise_intrinsic.csv (100%) rename source/{pip => qdk_package}/tests/qre/__init__.py (100%) rename source/{pip => qdk_package}/tests/qre/conftest.py (91%) rename source/{pip => qdk_package}/tests/qre/test_application.py (95%) rename source/{pip => qdk_package}/tests/qre/test_cirq_interop.py (97%) rename source/{pip => qdk_package}/tests/qre/test_enumeration.py (94%) rename source/{pip => qdk_package}/tests/qre/test_estimation.py (94%) rename source/{pip => qdk_package}/tests/qre/test_estimation_table.py (96%) rename source/{pip => qdk_package}/tests/qre/test_interop.py (96%) rename source/{pip => qdk_package}/tests/qre/test_isa.py (94%) rename source/{pip => qdk_package}/tests/qre/test_models.py (99%) create mode 100644 source/qdk_package/tests/reexports/__init__.py create mode 100644 source/qdk_package/tests/reexports/conftest.py rename source/qdk_package/tests/{ => reexports}/mocks.py (83%) rename source/qdk_package/tests/{ => reexports}/test_extras.py (100%) rename source/qdk_package/tests/{ => reexports}/test_reexports.py (85%) rename source/{pip => qdk_package}/tests/test_adaptive_gpu_bytecode.py (99%) rename source/{pip => qdk_package}/tests/test_adaptive_gpu_noise.py (94%) rename source/{pip => qdk_package}/tests/test_adaptive_gpu_quantum_ops.py (99%) rename source/{pip => qdk_package}/tests/test_adaptive_pass.py (99%) rename source/{pip => qdk_package}/tests/test_callable_passing.py (94%) rename source/{pip => qdk_package}/tests/test_clifford_simulator.py (96%) rename source/{pip => qdk_package}/tests/test_correlated_noise.py (90%) rename source/{pip => qdk_package}/tests/test_cpu_simulator.py (98%) rename source/{pip => qdk_package}/tests/test_enums.py (95%) rename source/{pip => qdk_package}/tests/test_generic_estimator.py (99%) rename source/{pip => qdk_package}/tests/test_gpu_simulator.py (98%) rename source/{pip => qdk_package}/tests/test_interpreter.py (99%) rename source/{pip => qdk_package}/tests/test_noisy_config.py (98%) rename source/{pip => qdk_package}/tests/test_noisy_simulator.py (99%) rename source/{pip => qdk_package}/tests/test_project.py (99%) rename source/{pip => qdk_package}/tests/test_qasm.py (99%) rename source/{pip => qdk_package}/tests/test_qasm_io.py (99%) rename source/{pip => qdk_package}/tests/test_qsharp.py (99%) rename source/{pip => qdk_package}/tests/test_re.py (99%) rename source/{pip => qdk_package}/tests/test_sparse_simulator.py (98%) diff --git a/build.py b/build.py index 706d6df7da..2f54b0c506 100755 --- a/build.py +++ b/build.py @@ -454,12 +454,15 @@ def run_ci_historic_benchmark(): install_python_test_requirements(qdk_python_src, python_bin) # Install qdk from the freshly built wheel. + # Use --no-deps because dependencies (pyqir, etc.) are already installed + # via test_requirements, and --no-index can't resolve them from PyPI. install_args = [ python_bin, "-m", "pip", "install", "--force-reinstall", + "--no-deps", "--no-index", "--find-links=" + wheels_dir, "qdk", @@ -490,10 +493,11 @@ def run_ci_historic_benchmark(): subprocess.run(pip_build_args, check=True, text=True, cwd=pip_src, env=pip_env) step_end() - if run_tests: - step_start("Running tests for the pip package") + if args.integration_tests: + step_start("Setting up for integration tests for the pip package") + test_dir = os.path.join(pip_src, "tests-integration") + install_python_test_requirements(test_dir, python_bin, check=False) - install_python_test_requirements(pip_src, python_bin) # Install qdk first (qsharp depends on it) install_args = [ python_bin, @@ -505,16 +509,7 @@ def run_ci_historic_benchmark(): "--find-links=" + wheels_dir, "qdk", ] - subprocess.run(install_args, check=True, text=True, cwd=pip_src) - install_qsharp_python_package(pip_src, wheels_dir, python_bin) - run_python_tests(os.path.join(pip_src, "tests"), python_bin, pip_env) - - step_end() - - if args.integration_tests: - step_start("Setting up for integration tests for the pip package") - test_dir = os.path.join(pip_src, "tests-integration") - install_python_test_requirements(test_dir, python_bin, check=False) + subprocess.run(install_args, check=True, text=True, cwd=test_dir) step_end() for version in QISKIT_VERSION_MATRIX: diff --git a/source/qdk_package/LICENSE.txt b/source/qdk_package/LICENSE.txt new file mode 100644 index 0000000000..9e841e7a26 --- /dev/null +++ b/source/qdk_package/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/source/qdk_package/pyproject.toml b/source/qdk_package/pyproject.toml index a9dcf666fb..6d7ebed0cc 100644 --- a/source/qdk_package/pyproject.toml +++ b/source/qdk_package/pyproject.toml @@ -45,6 +45,9 @@ all = [ "qsharp-jupyterlab==0.0.0", ] +[tool.pytest.ini_options] +testpaths = ["tests"] + [tool.maturin] module-name = "qdk._native" diff --git a/source/qdk_package/test_requirements.txt b/source/qdk_package/test_requirements.txt index e13e58d3f3..eb3a1d28a5 100644 --- a/source/qdk_package/test_requirements.txt +++ b/source/qdk_package/test_requirements.txt @@ -1,2 +1,5 @@ pytest +expecttest==0.3.0 pyqir>=0.12.3,<0.13 +cirq==1.6.1; platform_system != 'Windows' or platform_machine == 'AMD64' +pandas>=2.1 diff --git a/source/pip/tests/.gitignore b/source/qdk_package/tests/.gitignore similarity index 100% rename from source/pip/tests/.gitignore rename to source/qdk_package/tests/.gitignore diff --git a/source/pip/tests/CliffordCalls.qs b/source/qdk_package/tests/CliffordCalls.qs similarity index 100% rename from source/pip/tests/CliffordCalls.qs rename to source/qdk_package/tests/CliffordCalls.qs diff --git a/source/pip/tests/CliffordIsing.qs b/source/qdk_package/tests/CliffordIsing.qs similarity index 100% rename from source/pip/tests/CliffordIsing.qs rename to source/qdk_package/tests/CliffordIsing.qs diff --git a/source/pip/tests/applications/__init__.py b/source/qdk_package/tests/applications/__init__.py similarity index 100% rename from source/pip/tests/applications/__init__.py rename to source/qdk_package/tests/applications/__init__.py diff --git a/source/pip/tests/applications/magnets/__init__.py b/source/qdk_package/tests/applications/magnets/__init__.py similarity index 100% rename from source/pip/tests/applications/magnets/__init__.py rename to source/qdk_package/tests/applications/magnets/__init__.py diff --git a/source/pip/tests/applications/magnets/test_complete.py b/source/qdk_package/tests/applications/magnets/test_complete.py similarity index 99% rename from source/pip/tests/applications/magnets/test_complete.py rename to source/qdk_package/tests/applications/magnets/test_complete.py index 50a50ac1ef..aafd6f3d3a 100644 --- a/source/pip/tests/applications/magnets/test_complete.py +++ b/source/qdk_package/tests/applications/magnets/test_complete.py @@ -7,7 +7,7 @@ cirq = pytest.importorskip("cirq") -from qsharp.applications.magnets import ( +from qdk.applications.magnets import ( CompleteBipartiteGraph, CompleteGraph, Hypergraph, diff --git a/source/pip/tests/applications/magnets/test_hypergraph.py b/source/qdk_package/tests/applications/magnets/test_hypergraph.py old mode 100755 new mode 100644 similarity index 99% rename from source/pip/tests/applications/magnets/test_hypergraph.py rename to source/qdk_package/tests/applications/magnets/test_hypergraph.py index 0c633a1d84..297f471be7 --- a/source/pip/tests/applications/magnets/test_hypergraph.py +++ b/source/qdk_package/tests/applications/magnets/test_hypergraph.py @@ -8,7 +8,7 @@ cirq = pytest.importorskip("cirq") -from qsharp.applications.magnets import ( +from qdk.applications.magnets import ( Hyperedge, Hypergraph, HypergraphEdgeColoring, diff --git a/source/pip/tests/applications/magnets/test_lattice1d.py b/source/qdk_package/tests/applications/magnets/test_lattice1d.py similarity index 99% rename from source/pip/tests/applications/magnets/test_lattice1d.py rename to source/qdk_package/tests/applications/magnets/test_lattice1d.py index f9b06086be..b122ef0f59 100644 --- a/source/pip/tests/applications/magnets/test_lattice1d.py +++ b/source/qdk_package/tests/applications/magnets/test_lattice1d.py @@ -7,7 +7,7 @@ cirq = pytest.importorskip("cirq") -from qsharp.applications.magnets import ( +from qdk.applications.magnets import ( Chain1D, Hypergraph, HypergraphEdgeColoring, diff --git a/source/pip/tests/applications/magnets/test_lattice2d.py b/source/qdk_package/tests/applications/magnets/test_lattice2d.py similarity index 99% rename from source/pip/tests/applications/magnets/test_lattice2d.py rename to source/qdk_package/tests/applications/magnets/test_lattice2d.py index bedb7874b6..8c53b3c94e 100644 --- a/source/pip/tests/applications/magnets/test_lattice2d.py +++ b/source/qdk_package/tests/applications/magnets/test_lattice2d.py @@ -7,7 +7,7 @@ cirq = pytest.importorskip("cirq") -from qsharp.applications.magnets import ( +from qdk.applications.magnets import ( Hypergraph, HypergraphEdgeColoring, Patch2D, diff --git a/source/pip/tests/applications/magnets/test_model.py b/source/qdk_package/tests/applications/magnets/test_model.py old mode 100755 new mode 100644 similarity index 99% rename from source/pip/tests/applications/magnets/test_model.py rename to source/qdk_package/tests/applications/magnets/test_model.py index c525a80719..52a684f3ec --- a/source/pip/tests/applications/magnets/test_model.py +++ b/source/qdk_package/tests/applications/magnets/test_model.py @@ -11,7 +11,7 @@ cirq = pytest.importorskip("cirq") -from qsharp.applications.magnets import ( +from qdk.applications.magnets import ( HeisenbergModel, Hyperedge, Hypergraph, diff --git a/source/pip/tests/applications/magnets/test_pauli.py b/source/qdk_package/tests/applications/magnets/test_pauli.py similarity index 98% rename from source/pip/tests/applications/magnets/test_pauli.py rename to source/qdk_package/tests/applications/magnets/test_pauli.py index bb4f7ab8dd..306e6b4597 100644 --- a/source/pip/tests/applications/magnets/test_pauli.py +++ b/source/qdk_package/tests/applications/magnets/test_pauli.py @@ -7,7 +7,7 @@ cirq = pytest.importorskip("cirq") -from qsharp.applications.magnets import Pauli, PauliString, PauliX, PauliY, PauliZ +from qdk.applications.magnets import Pauli, PauliString, PauliX, PauliY, PauliZ def test_pauli_init_from_int_and_string(): diff --git a/source/pip/tests/applications/magnets/test_trotter.py b/source/qdk_package/tests/applications/magnets/test_trotter.py similarity index 99% rename from source/pip/tests/applications/magnets/test_trotter.py rename to source/qdk_package/tests/applications/magnets/test_trotter.py index 77d508ab7e..f0e2f435db 100644 --- a/source/pip/tests/applications/magnets/test_trotter.py +++ b/source/qdk_package/tests/applications/magnets/test_trotter.py @@ -7,7 +7,7 @@ cirq = pytest.importorskip("cirq") -from qsharp.applications.magnets import ( +from qdk.applications.magnets import ( Hyperedge, Hypergraph, Model, diff --git a/source/pip/tests/circuit.qsc b/source/qdk_package/tests/circuit.qsc similarity index 100% rename from source/pip/tests/circuit.qsc rename to source/qdk_package/tests/circuit.qsc diff --git a/source/qdk_package/tests/conftest.py b/source/qdk_package/tests/conftest.py index b06646dcab..9814a62367 100644 --- a/source/qdk_package/tests/conftest.py +++ b/source/qdk_package/tests/conftest.py @@ -1,14 +1,40 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -import sys -from pathlib import Path +# Tests rely on the qdk wheel being installed in the venv (with the compiled +# _native extension). Do NOT add the source tree to sys.path here – that +# would shadow the installed package with the local source directory, which +# does not contain the compiled extension module. -# Add local package root so 'qdk' can be imported without installation (useful -# after ``maturin develop``) and add tests dir so 'mocks' is importable. -# In CI the wheel is installed before tests run, so this is a convenience fallback. -_root = Path(__file__).resolve().parent -_pkg_root = _root.parent -for p in (_pkg_root, _root): - if str(p) not in sys.path: - sys.path.insert(0, str(p)) +# --------------------------------------------------------------------------- +# Many test files use ``import qdk as qsharp`` and then access symbols like +# ``qsharp.eval()``, ``qsharp.run()``, etc. Those symbols were part of the +# old *qsharp* public API but are intentionally NOT exported from ``qdk`` +# (whose public API must stay unchanged). +# +# Rather than rewriting hundreds of call-sites, we monkey-patch the extra +# symbols onto the ``qdk`` module here so the test alias keeps working. +# This is test infrastructure only – it does not affect the public package. +# --------------------------------------------------------------------------- +import qdk +from qdk._qsharp import ( # noqa: E402 + eval, + run, + compile, + circuit, + estimate, + logical_counts, + QSharpError, + CircuitGenerationMethod, +) +from qdk._native import estimate_custom # type: ignore # noqa: E402 + +qdk.eval = eval +qdk.run = run +qdk.compile = compile +qdk.circuit = circuit +qdk.estimate = estimate +qdk.logical_counts = logical_counts +qdk.QSharpError = QSharpError +qdk.CircuitGenerationMethod = CircuitGenerationMethod +qdk.estimate_custom = estimate_custom diff --git a/source/pip/tests/csv_dir_test/test_noise_intrinsic.csv b/source/qdk_package/tests/csv_dir_test/test_noise_intrinsic.csv similarity index 100% rename from source/pip/tests/csv_dir_test/test_noise_intrinsic.csv rename to source/qdk_package/tests/csv_dir_test/test_noise_intrinsic.csv diff --git a/source/pip/tests/qre/__init__.py b/source/qdk_package/tests/qre/__init__.py similarity index 100% rename from source/pip/tests/qre/__init__.py rename to source/qdk_package/tests/qre/__init__.py diff --git a/source/pip/tests/qre/conftest.py b/source/qdk_package/tests/qre/conftest.py similarity index 91% rename from source/pip/tests/qre/conftest.py rename to source/qdk_package/tests/qre/conftest.py index c779e6ff31..8e0b3394ae 100644 --- a/source/pip/tests/qre/conftest.py +++ b/source/qdk_package/tests/qre/conftest.py @@ -4,15 +4,15 @@ from dataclasses import KW_ONLY, dataclass, field from typing import Generator -from qsharp.qre import ( +from qdk.qre import ( ISA, LOGICAL, ISARequirements, ISATransform, constraint, ) -from qsharp.qre._architecture import ISAContext -from qsharp.qre.instruction_ids import LATTICE_SURGERY, T +from qdk.qre._architecture import ISAContext +from qdk.qre.instruction_ids import LATTICE_SURGERY, T # NOTE These classes will be generalized as part of the QRE API in the following diff --git a/source/pip/tests/qre/test_application.py b/source/qdk_package/tests/qre/test_application.py similarity index 95% rename from source/pip/tests/qre/test_application.py rename to source/qdk_package/tests/qre/test_application.py index f49db0921b..f3045aad37 100644 --- a/source/pip/tests/qre/test_application.py +++ b/source/qdk_package/tests/qre/test_application.py @@ -7,9 +7,9 @@ from dataclasses import dataclass, field -import qsharp +import qdk as qsharp -from qsharp.qre import ( +from qdk.qre import ( Application, ISA, LOGICAL, @@ -19,11 +19,11 @@ Trace, linear_function, ) -from qsharp.qre._qre import _ProvenanceGraph -from qsharp.qre._enumeration import _enumerate_instances -from qsharp.qre.application import QSharpApplication -from qsharp.qre.instruction_ids import CCX, LATTICE_SURGERY, T, RZ -from qsharp.qre.property_keys import ( +from qdk.qre._qre import _ProvenanceGraph +from qdk.qre._enumeration import _enumerate_instances +from qdk.qre.application import QSharpApplication +from qdk.qre.instruction_ids import CCX, LATTICE_SURGERY, T, RZ +from qdk.qre.property_keys import ( ALGORITHM_COMPUTE_QUBITS, ALGORITHM_MEMORY_QUBITS, LOGICAL_COMPUTE_QUBITS, diff --git a/source/pip/tests/qre/test_cirq_interop.py b/source/qdk_package/tests/qre/test_cirq_interop.py similarity index 97% rename from source/pip/tests/qre/test_cirq_interop.py rename to source/qdk_package/tests/qre/test_cirq_interop.py index 3e01eb8b12..95bcf8fe04 100644 --- a/source/pip/tests/qre/test_cirq_interop.py +++ b/source/qdk_package/tests/qre/test_cirq_interop.py @@ -5,10 +5,10 @@ cirq = pytest.importorskip("cirq") -from qsharp.qre import PSSPC -from qsharp.qre.application import CirqApplication -from qsharp.qre.interop import trace_from_cirq -from qsharp.qre.interop._cirq import ( +from qdk.qre import PSSPC +from qdk.qre.application import CirqApplication +from qdk.qre.interop import trace_from_cirq +from qdk.qre.interop._cirq import ( TypedQubit, QubitType, read_from_memory, diff --git a/source/pip/tests/qre/test_enumeration.py b/source/qdk_package/tests/qre/test_enumeration.py similarity index 94% rename from source/pip/tests/qre/test_enumeration.py rename to source/qdk_package/tests/qre/test_enumeration.py index 371ca5126f..d8c987f90c 100644 --- a/source/pip/tests/qre/test_enumeration.py +++ b/source/qdk_package/tests/qre/test_enumeration.py @@ -7,10 +7,10 @@ import pytest -from qsharp.qre import LOGICAL -from qsharp.qre.models import SurfaceCode, GateBased, RoundBasedFactory -from qsharp.qre.instruction_ids import LATTICE_SURGERY, T -from qsharp.qre._isa_enumeration import ( +from qdk.qre import LOGICAL +from qdk.qre.models import SurfaceCode, GateBased, RoundBasedFactory +from qdk.qre.instruction_ids import LATTICE_SURGERY, T +from qdk.qre._isa_enumeration import ( ISARefNode, _ComponentQuery, _ProductNode, @@ -22,7 +22,7 @@ def test_enumerate_instances(): """Test enumeration of SurfaceCode instances with default and custom domains.""" - from qsharp.qre._enumeration import _enumerate_instances + from qdk.qre._enumeration import _enumerate_instances instances = list(_enumerate_instances(SurfaceCode)) @@ -47,7 +47,7 @@ def test_enumerate_instances(): def test_enumerate_instances_bool(): """Test that boolean dataclass fields enumerate both True and False.""" - from qsharp.qre._enumeration import _enumerate_instances + from qdk.qre._enumeration import _enumerate_instances @dataclass class BoolConfig: @@ -62,7 +62,7 @@ class BoolConfig: def test_enumerate_instances_enum(): """Test that Enum dataclass fields enumerate all members.""" - from qsharp.qre._enumeration import _enumerate_instances + from qdk.qre._enumeration import _enumerate_instances class Color(Enum): RED = 1 @@ -83,7 +83,7 @@ class EnumConfig: def test_enumerate_instances_failure(): """Test that a field with no domain and no default raises ValueError.""" - from qsharp.qre._enumeration import _enumerate_instances + from qdk.qre._enumeration import _enumerate_instances @dataclass class InvalidConfig: @@ -97,7 +97,7 @@ class InvalidConfig: def test_enumerate_instances_single(): """Test enumeration of a dataclass with a single non-kw-only field.""" - from qsharp.qre._enumeration import _enumerate_instances + from qdk.qre._enumeration import _enumerate_instances @dataclass class SingleConfig: @@ -110,7 +110,7 @@ class SingleConfig: def test_enumerate_instances_literal(): """Test that Literal-typed fields enumerate their allowed values.""" - from qsharp.qre._enumeration import _enumerate_instances + from qdk.qre._enumeration import _enumerate_instances from typing import Literal @@ -127,7 +127,7 @@ class LiteralConfig: def test_enumerate_instances_nested(): """Test enumeration of nested dataclass fields.""" - from qsharp.qre._enumeration import _enumerate_instances + from qdk.qre._enumeration import _enumerate_instances @dataclass class InnerConfig: @@ -147,7 +147,7 @@ class OuterConfig: def test_enumerate_instances_union(): """Test enumeration of union-typed dataclass fields.""" - from qsharp.qre._enumeration import _enumerate_instances + from qdk.qre._enumeration import _enumerate_instances @dataclass class OptionA: @@ -174,7 +174,7 @@ class UnionConfig: def test_enumerate_instances_nested_with_constraints(): """Test constraining nested dataclass fields via a dict.""" - from qsharp.qre._enumeration import _enumerate_instances + from qdk.qre._enumeration import _enumerate_instances @dataclass class InnerConfig: @@ -194,7 +194,7 @@ class OuterConfig: def test_enumerate_instances_union_single_type(): """Test restricting a union field to a single member type.""" - from qsharp.qre._enumeration import _enumerate_instances + from qdk.qre._enumeration import _enumerate_instances @dataclass class OptionA: @@ -227,7 +227,7 @@ class UnionConfig: def test_enumerate_instances_union_list_of_types(): """Test restricting a union field to a subset of member types.""" - from qsharp.qre._enumeration import _enumerate_instances + from qdk.qre._enumeration import _enumerate_instances @dataclass class OptionA: @@ -257,7 +257,7 @@ class UnionConfig: def test_enumerate_instances_union_constraint_dict(): """Test constraining union field members via a type-to-kwargs dict.""" - from qsharp.qre._enumeration import _enumerate_instances + from qdk.qre._enumeration import _enumerate_instances @dataclass class OptionA: diff --git a/source/pip/tests/qre/test_estimation.py b/source/qdk_package/tests/qre/test_estimation.py similarity index 94% rename from source/pip/tests/qre/test_estimation.py rename to source/qdk_package/tests/qre/test_estimation.py index ab61887e5f..7026d90f9b 100644 --- a/source/pip/tests/qre/test_estimation.py +++ b/source/qdk_package/tests/qre/test_estimation.py @@ -7,14 +7,14 @@ cirq = pytest.importorskip("cirq") -from qsharp.estimator import LogicalCounts -from qsharp.qre import ( +from qdk.estimator import LogicalCounts +from qdk.qre import ( PSSPC, LatticeSurgery, estimate, ) -from qsharp.qre.application import QSharpApplication -from qsharp.qre.models import ( +from qdk.qre.application import QSharpApplication +from qdk.qre.models import ( SurfaceCode, GateBased, RoundBasedFactory, diff --git a/source/pip/tests/qre/test_estimation_table.py b/source/qdk_package/tests/qre/test_estimation_table.py similarity index 96% rename from source/pip/tests/qre/test_estimation_table.py rename to source/qdk_package/tests/qre/test_estimation_table.py index 3cb09451ed..abce2a79ac 100644 --- a/source/pip/tests/qre/test_estimation_table.py +++ b/source/qdk_package/tests/qre/test_estimation_table.py @@ -7,20 +7,20 @@ import pandas as pd -from qsharp.qre import ( +from qdk.qre import ( PSSPC, LatticeSurgery, estimate, ) -from qsharp.qre.application import QSharpApplication -from qsharp.qre.models import SurfaceCode, GateBased -from qsharp.qre._estimation import ( +from qdk.qre.application import QSharpApplication +from qdk.qre.models import SurfaceCode, GateBased +from qdk.qre._estimation import ( EstimationTable, EstimationTableEntry, ) -from qsharp.qre._instruction import InstructionSource -from qsharp.qre.instruction_ids import LATTICE_SURGERY -from qsharp.qre.property_keys import DISTANCE, NUM_TS_PER_ROTATION +from qdk.qre._instruction import InstructionSource +from qdk.qre.instruction_ids import LATTICE_SURGERY +from qdk.qre.property_keys import DISTANCE, NUM_TS_PER_ROTATION from .conftest import ExampleFactory diff --git a/source/pip/tests/qre/test_interop.py b/source/qdk_package/tests/qre/test_interop.py similarity index 96% rename from source/pip/tests/qre/test_interop.py rename to source/qdk_package/tests/qre/test_interop.py index 6f3cf37713..de29730f59 100644 --- a/source/pip/tests/qre/test_interop.py +++ b/source/qdk_package/tests/qre/test_interop.py @@ -7,9 +7,9 @@ cirq = pytest.importorskip("cirq") -import qsharp -from qsharp.qre.application import QSharpApplication, QIRApplication -from qsharp.qre.interop import trace_from_qir +import qdk as qsharp +from qdk.qre.application import QSharpApplication, QIRApplication +from qdk.qre.interop import trace_from_qir def _ll_files(): @@ -50,8 +50,8 @@ def test_trace_from_qir_handles_all_instruction_ids(): """ import pyqir import pyqir.qis as qis - from qsharp._native import QirInstructionId - from qsharp.qre.interop._qir import _GATE_MAP, _MEAS_MAP, _SKIP + from qdk._native import QirInstructionId + from qdk.qre.interop._qir import _GATE_MAP, _MEAS_MAP, _SKIP # -- Completeness check: every QirInstructionId must be covered -------- handled_ids = ( @@ -198,7 +198,7 @@ def declare(name, param_types): def test_rotation_buckets(): """Test that rotation bucketization preserves total count and depth.""" - from qsharp.qre.interop._qsharp import _bucketize_rotation_counts + from qdk.qre.interop._qsharp import _bucketize_rotation_counts r_count = 15066 r_depth = 14756 diff --git a/source/pip/tests/qre/test_isa.py b/source/qdk_package/tests/qre/test_isa.py similarity index 94% rename from source/pip/tests/qre/test_isa.py rename to source/qdk_package/tests/qre/test_isa.py index e8fda19f29..7edff0dbe2 100644 --- a/source/pip/tests/qre/test_isa.py +++ b/source/qdk_package/tests/qre/test_isa.py @@ -3,7 +3,7 @@ import pytest -from qsharp.qre import ( +from qdk.qre import ( LOGICAL, ISARequirements, constraint, @@ -11,11 +11,11 @@ property_name, property_name_to_key, ) -from qsharp.qre._qre import _ProvenanceGraph -from qsharp.qre.models import SurfaceCode, GateBased -from qsharp.qre._architecture import _make_instruction -from qsharp.qre.instruction_ids import CCX, CCZ, LATTICE_SURGERY, T -from qsharp.qre.property_keys import DISTANCE +from qdk.qre._qre import _ProvenanceGraph +from qdk.qre.models import SurfaceCode, GateBased +from qdk.qre._architecture import _make_instruction +from qdk.qre.instruction_ids import CCX, CCZ, LATTICE_SURGERY, T +from qdk.qre.property_keys import DISTANCE def test_isa(): @@ -146,7 +146,7 @@ def test_property_names(): def test_block_linear_function(): """Test block_linear_function creation and behavior.""" - from qsharp.qre._qre import block_linear_function + from qdk.qre._qre import block_linear_function # Test int version with offset int_fn = block_linear_function(block_size=4, slope=2, offset=1) @@ -183,7 +183,7 @@ def test_block_linear_function(): def test_generic_function(): """Test generic_function wrapping for int and float return types.""" - from qsharp.qre._qre import _IntFunction, _FloatFunction + from qdk.qre._qre import _IntFunction, _FloatFunction def time(x: int) -> int: return x * x diff --git a/source/pip/tests/qre/test_models.py b/source/qdk_package/tests/qre/test_models.py similarity index 99% rename from source/pip/tests/qre/test_models.py rename to source/qdk_package/tests/qre/test_models.py index 46b236afb0..46f23b88be 100644 --- a/source/pip/tests/qre/test_models.py +++ b/source/qdk_package/tests/qre/test_models.py @@ -1,8 +1,8 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from qsharp.qre import LOGICAL, PHYSICAL -from qsharp.qre.instruction_ids import ( +from qdk.qre import LOGICAL, PHYSICAL +from qdk.qre.instruction_ids import ( T, CCZ, CCX, @@ -26,7 +26,7 @@ SQRT_SQRT_Z, SQRT_SQRT_Z_DAG, ) -from qsharp.qre.models import ( +from qdk.qre.models import ( GateBased, Majorana, RoundBasedFactory, @@ -36,7 +36,7 @@ ThreeAux, TwoDimensionalYokedSurfaceCode, ) -from qsharp.qre.property_keys import DISTANCE +from qdk.qre.property_keys import DISTANCE # --------------------------------------------------------------------------- @@ -580,7 +580,7 @@ def test_table1_1e3_clifford_yields_6_isas(self): def test_table2_scenario_no_ccz(self): """Table 2 scenario: T error ~10x higher than Clifford, no CCZ.""" - from qsharp.qre._qre import _ProvenanceGraph + from qdk.qre._qre import _ProvenanceGraph arch = GateBased(gate_time=50, measurement_time=100) ctx = arch.context() @@ -608,7 +608,7 @@ def test_table2_scenario_no_ccz(self): def test_no_yield_when_error_too_high(self): """If T error > 10x Clifford, no entries match.""" - from qsharp.qre._qre import _ProvenanceGraph + from qdk.qre._qre import _ProvenanceGraph arch = GateBased(gate_time=50, measurement_time=100) ctx = arch.context() @@ -756,14 +756,14 @@ def test_modification_count_matches_factory_output(self): def test_no_family_present_passes_through(self): """If no family member is present, ISA passes through unchanged.""" - from qsharp.qre._qre import _ProvenanceGraph + from qdk.qre._qre import _ProvenanceGraph arch = GateBased(gate_time=50, measurement_time=100) ctx = arch.context() modifier = MagicUpToClifford() # ISA with only a LATTICE_SURGERY instruction (no T or CCZ family) - from qsharp.qre import linear_function + from qdk.qre import linear_function graph = _ProvenanceGraph() isa_input = graph.make_isa( diff --git a/source/qdk_package/tests/reexports/__init__.py b/source/qdk_package/tests/reexports/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/source/qdk_package/tests/reexports/conftest.py b/source/qdk_package/tests/reexports/conftest.py new file mode 100644 index 0000000000..16a794a909 --- /dev/null +++ b/source/qdk_package/tests/reexports/conftest.py @@ -0,0 +1,10 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import sys +from pathlib import Path + +# Make the sibling ``mocks`` module importable without installation. +_dir = Path(__file__).resolve().parent +if str(_dir) not in sys.path: + sys.path.insert(0, str(_dir)) diff --git a/source/qdk_package/tests/mocks.py b/source/qdk_package/tests/reexports/mocks.py similarity index 83% rename from source/qdk_package/tests/mocks.py rename to source/qdk_package/tests/reexports/mocks.py index a2124a198e..3c4a2698d1 100644 --- a/source/qdk_package/tests/mocks.py +++ b/source/qdk_package/tests/reexports/mocks.py @@ -59,6 +59,15 @@ def mock_azure() -> List[str]: def cleanup_modules(created: List[str]) -> None: - """Remove synthetic modules created during a test if still present.""" + """Remove synthetic modules created during a test if still present. + + Also removes qdk shim modules that depend on them so that subsequent + tests re-trigger the import machinery instead of getting cached results. + """ for name in created: sys.modules.pop(name, None) + + # The qdk re-export shims cache themselves in sys.modules after import. + # Remove them so later tests that check missing-dep behavior work correctly. + sys.modules.pop("qdk.widgets", None) + sys.modules.pop("qdk.azure", None) diff --git a/source/qdk_package/tests/test_extras.py b/source/qdk_package/tests/reexports/test_extras.py similarity index 100% rename from source/qdk_package/tests/test_extras.py rename to source/qdk_package/tests/reexports/test_extras.py diff --git a/source/qdk_package/tests/test_reexports.py b/source/qdk_package/tests/reexports/test_reexports.py similarity index 85% rename from source/qdk_package/tests/test_reexports.py rename to source/qdk_package/tests/reexports/test_reexports.py index b852723212..ac6e0049fe 100644 --- a/source/qdk_package/tests/test_reexports.py +++ b/source/qdk_package/tests/reexports/test_reexports.py @@ -15,8 +15,8 @@ # ---- Friendly error messages when optional deps are missing ---- _REEXPORT_SHIMS = { - "qdk.widgets": {"dep": "qsharp_widgets", "hint": "pip install qdk[jupyter]"}, - "qdk.azure": {"dep": "azure.quantum", "hint": "pip install qdk[azure]"}, + "qdk.widgets": {"dep": "qsharp_widgets", "hint": r"pip install qdk\[jupyter\]"}, + "qdk.azure": {"dep": "azure.quantum", "hint": r"pip install qdk\[azure\]"}, } diff --git a/source/pip/tests/test_adaptive_gpu_bytecode.py b/source/qdk_package/tests/test_adaptive_gpu_bytecode.py similarity index 99% rename from source/pip/tests/test_adaptive_gpu_bytecode.py rename to source/qdk_package/tests/test_adaptive_gpu_bytecode.py index 8cd2dcb93a..8112556fca 100644 --- a/source/pip/tests/test_adaptive_gpu_bytecode.py +++ b/source/qdk_package/tests/test_adaptive_gpu_bytecode.py @@ -17,7 +17,7 @@ import sys from collections import Counter import pytest -import qsharp.openqasm +import qdk.openqasm # Skip the whole module when GPU tests aren't requested. if not os.environ.get("QDK_GPU_TESTS"): @@ -27,7 +27,7 @@ GPU_AVAILABLE = False try: - from qsharp._native import try_create_gpu_adapter + from qdk._native import try_create_gpu_adapter gpu_info = try_create_gpu_adapter() print(f"*** USING GPU: {gpu_info}", file=sys.stderr) @@ -35,7 +35,7 @@ except OSError as e: SKIP_REASON = str(e) -from qsharp._simulation import GpuSimulator +from qdk._simulation import GpuSimulator # --------------------------------------------------------------------------- # Helpers @@ -1604,10 +1604,10 @@ def test_call_stack_overflow_guard(): def _run_openqasm(qasm_src: str, shots: int = SHOTS, seed: int = 42): """Compile OpenQASM source via the adaptive pass and run on the GPU.""" global sim - qir = qsharp.openqasm.compile( + qir = qdk.openqasm.compile( qasm_src, - output_semantics=qsharp.openqasm.OutputSemantics.OpenQasm, - target_profile=qsharp.TargetProfile.Adaptive_RIF, + output_semantics=qdk.openqasm.OutputSemantics.OpenQasm, + target_profile=qdk.TargetProfile.Adaptive_RIF, ) sim.set_program(qir) return sim.run_shots(shots, seed=seed) diff --git a/source/pip/tests/test_adaptive_gpu_noise.py b/source/qdk_package/tests/test_adaptive_gpu_noise.py similarity index 94% rename from source/pip/tests/test_adaptive_gpu_noise.py rename to source/qdk_package/tests/test_adaptive_gpu_noise.py index e44d55294c..26f5db733d 100644 --- a/source/pip/tests/test_adaptive_gpu_noise.py +++ b/source/qdk_package/tests/test_adaptive_gpu_noise.py @@ -18,7 +18,7 @@ from collections import Counter import pytest from typing import Optional, List -import qsharp.openqasm +import qdk.openqasm # Skip the whole module when GPU tests aren't requested. if not os.environ.get("QDK_GPU_TESTS"): @@ -28,7 +28,7 @@ GPU_AVAILABLE = False try: - from qsharp._native import try_create_gpu_adapter + from qdk._native import try_create_gpu_adapter gpu_info = try_create_gpu_adapter() print(f"*** USING GPU: {gpu_info}", file=sys.stderr) @@ -36,7 +36,7 @@ except OSError as e: SKIP_REASON = str(e) -from qsharp._simulation import run_qir, GpuSimulator, NoiseConfig, Result +from qdk._simulation import run_qir, GpuSimulator, NoiseConfig, Result # --------------------------------------------------------------------------- # Helpers @@ -247,10 +247,10 @@ def test_probabilistic_x_noise(): bit[3] res = measure qs; """ -QIR_WITH_CORRELATED_NOISE = qsharp.openqasm.compile( +QIR_WITH_CORRELATED_NOISE = qdk.openqasm.compile( QASM_WITH_CORRELATED_NOISE, - output_semantics=qsharp.openqasm.OutputSemantics.OpenQasm, - target_profile=qsharp.TargetProfile.Adaptive_RIF, + output_semantics=qdk.openqasm.OutputSemantics.OpenQasm, + target_profile=qdk.TargetProfile.Adaptive_RIF, ) @@ -360,10 +360,10 @@ def test_noise_intrinsics_with_registers_noisy(): bit res = measure q; """ -QIR_NOISE_1Q = qsharp.openqasm.compile( +QIR_NOISE_1Q = qdk.openqasm.compile( QASM_NOISE_1Q, - output_semantics=qsharp.openqasm.OutputSemantics.OpenQasm, - target_profile=qsharp.TargetProfile.Adaptive_RIF, + output_semantics=qdk.openqasm.OutputSemantics.OpenQasm, + target_profile=qdk.TargetProfile.Adaptive_RIF, ) @@ -389,10 +389,10 @@ def test_noise_intrinsic_1q_x_flip(): bit[2] res = measure qs; """ -QIR_NOISE_2Q = qsharp.openqasm.compile( +QIR_NOISE_2Q = qdk.openqasm.compile( QASM_NOISE_2Q, - output_semantics=qsharp.openqasm.OutputSemantics.OpenQasm, - target_profile=qsharp.TargetProfile.Adaptive_RIF, + output_semantics=qdk.openqasm.OutputSemantics.OpenQasm, + target_profile=qdk.TargetProfile.Adaptive_RIF, ) @@ -420,10 +420,10 @@ def test_noise_intrinsic_2q_xx_flip(): bit[5] res = measure qs; """ -QIR_NOISE_5Q = qsharp.openqasm.compile( +QIR_NOISE_5Q = qdk.openqasm.compile( QASM_NOISE_5Q, - output_semantics=qsharp.openqasm.OutputSemantics.OpenQasm, - target_profile=qsharp.TargetProfile.Adaptive_RIF, + output_semantics=qdk.openqasm.OutputSemantics.OpenQasm, + target_profile=qdk.TargetProfile.Adaptive_RIF, ) diff --git a/source/pip/tests/test_adaptive_gpu_quantum_ops.py b/source/qdk_package/tests/test_adaptive_gpu_quantum_ops.py similarity index 99% rename from source/pip/tests/test_adaptive_gpu_quantum_ops.py rename to source/qdk_package/tests/test_adaptive_gpu_quantum_ops.py index 13b0bc0a1e..a1f71b3433 100644 --- a/source/pip/tests/test_adaptive_gpu_quantum_ops.py +++ b/source/qdk_package/tests/test_adaptive_gpu_quantum_ops.py @@ -26,7 +26,7 @@ GPU_AVAILABLE = False try: - from qsharp._native import try_create_gpu_adapter + from qdk._native import try_create_gpu_adapter gpu_info = try_create_gpu_adapter() print(f"*** USING GPU: {gpu_info}", file=sys.stderr) @@ -34,7 +34,7 @@ except OSError as e: SKIP_REASON = str(e) -from qsharp._simulation import GpuSimulator +from qdk._simulation import GpuSimulator # --------------------------------------------------------------------------- diff --git a/source/pip/tests/test_adaptive_pass.py b/source/qdk_package/tests/test_adaptive_pass.py similarity index 99% rename from source/pip/tests/test_adaptive_pass.py rename to source/qdk_package/tests/test_adaptive_pass.py index 8e1d0b89fa..d88ecb8c2b 100644 --- a/source/pip/tests/test_adaptive_pass.py +++ b/source/qdk_package/tests/test_adaptive_pass.py @@ -12,8 +12,8 @@ import pyqir import pytest -from qsharp._adaptive_pass import AdaptiveProfilePass, AdaptiveProgram -from qsharp._adaptive_bytecode import * +from qdk._adaptive_pass import AdaptiveProfilePass, AdaptiveProgram +from qdk._adaptive_bytecode import * # --------------------------------------------------------------------------- diff --git a/source/pip/tests/test_callable_passing.py b/source/qdk_package/tests/test_callable_passing.py similarity index 94% rename from source/pip/tests/test_callable_passing.py rename to source/qdk_package/tests/test_callable_passing.py index c02c37028c..97a4d4891f 100644 --- a/source/pip/tests/test_callable_passing.py +++ b/source/qdk_package/tests/test_callable_passing.py @@ -1,7 +1,7 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -import qsharp +import qdk as qsharp from expecttest import assert_expected_inline from textwrap import dedent @@ -16,7 +16,7 @@ def test_python_callable_passed_to_python_callable() -> None: x + 1 } """) - from qsharp.code import InvokeWithFive, AddOne + from qdk.code import InvokeWithFive, AddOne assert InvokeWithFive(AddOne) == 6 @@ -30,7 +30,7 @@ def test_python_callable_passed_to_qsharp_callable() -> None: x + 1 } """) - from qsharp.code import InvokeWithFive + from qdk.code import InvokeWithFive f = qsharp.eval("AddOne") assert InvokeWithFive(f) == 6 @@ -61,7 +61,7 @@ def test_run_qsharp_callable_passed_to_python_callable() -> None: x + 1 } """) - from qsharp.code import InvokeWithFive + from qdk.code import InvokeWithFive add_one = qsharp.eval("AddOne") res = qsharp.run(InvokeWithFive, 1, add_one)[0] assert res == 6 @@ -81,7 +81,7 @@ def test_python_callable_with_unsupported_types_passed_to_python_callable() -> N sum } """) - from qsharp.code import MakeRange, SumRangeFromMaker + from qdk.code import MakeRange, SumRangeFromMaker assert SumRangeFromMaker(MakeRange) == 55 @@ -95,7 +95,7 @@ def test_qsharp_closure_from_python_callable_passed_to_python_callable() -> None x -> x + inc } """) - from qsharp.code import InvokeWithFive, MakeAdd + from qdk.code import InvokeWithFive, MakeAdd assert InvokeWithFive(MakeAdd(1)) == 6 @@ -110,7 +110,7 @@ def test_qir_from_python_callable_passed_to_python_callable() -> None: ApplyToEach(H, qs); } """) - from qsharp.code import InvokeWithQubits, AllH + from qdk.code import InvokeWithQubits, AllH qir = qsharp.compile(InvokeWithQubits, 3, AllH) assert_expected_inline(str(qir), """\ %Result = type opaque @@ -159,7 +159,7 @@ def test_qir_from_qsharp_callable_passed_to_python_callable() -> None: ApplyToEach(H, qs); } """) - from qsharp.code import InvokeWithQubits + from qdk.code import InvokeWithQubits all_h = qsharp.eval("AllH") qir = qsharp.compile(InvokeWithQubits, 3, all_h) assert_expected_inline(str(qir), """\ @@ -206,7 +206,7 @@ def test_qir_from_qsharp_closure_passed_to_python_callable() -> None: f(qs) } """) - from qsharp.code import InvokeWithQubits + from qdk.code import InvokeWithQubits apply_h = qsharp.eval("ApplyToEach(H, _)") qir = qsharp.compile(InvokeWithQubits, 3, apply_h) assert_expected_inline(str(qir), """\ @@ -256,7 +256,7 @@ def test_circuit_from_python_callable_passed_to_python_callable() -> None: ApplyToEach(H, qs); } """) - from qsharp.code import InvokeWithQubits, AllH + from qdk.code import InvokeWithQubits, AllH circuit = qsharp.circuit(InvokeWithQubits, 3, AllH) assert_expected_inline(str(circuit), """q_0 ── H ── q_1 ── H ── @@ -275,7 +275,7 @@ def test_circuit_from_qsharp_callable_passed_to_python_callable() -> None: ApplyToEach(H, qs); } """) - from qsharp.code import InvokeWithQubits + from qdk.code import InvokeWithQubits all_h = qsharp.eval("AllH") circuit = qsharp.circuit(InvokeWithQubits, 3, all_h) assert_expected_inline(str(circuit), """q_0 ── H ── @@ -292,7 +292,7 @@ def test_circuit_from_qsharp_closure_passed_to_python_callable() -> None: f(qs) } """) - from qsharp.code import InvokeWithQubits + from qdk.code import InvokeWithQubits apply_h = qsharp.eval("ApplyToEach(H, _)") circuit = qsharp.circuit(InvokeWithQubits, 3, apply_h) assert_expected_inline(str(circuit), """q_0 ── H ── diff --git a/source/pip/tests/test_clifford_simulator.py b/source/qdk_package/tests/test_clifford_simulator.py similarity index 96% rename from source/pip/tests/test_clifford_simulator.py rename to source/qdk_package/tests/test_clifford_simulator.py index 2c47fc0c8e..1c4227bb78 100644 --- a/source/pip/tests/test_clifford_simulator.py +++ b/source/qdk_package/tests/test_clifford_simulator.py @@ -4,12 +4,12 @@ from pathlib import Path import pyqir -import qsharp -from qsharp._simulation import run_qir_clifford, NoiseConfig -from qsharp._device._atom import NeutralAtomDevice -from qsharp._device._atom._decomp import DecomposeRzAnglesToCliffordGates -from qsharp._device._atom._validate import ValidateNoConditionalBranches -from qsharp import TargetProfile, Result +import qdk as qsharp +from qdk._simulation import run_qir_clifford, NoiseConfig +from qdk._device._atom import NeutralAtomDevice +from qdk._device._atom._decomp import DecomposeRzAnglesToCliffordGates +from qdk._device._atom._validate import ValidateNoConditionalBranches +from qdk import TargetProfile, Result current_file_path = Path(__file__) # Get the directory of the current file diff --git a/source/pip/tests/test_correlated_noise.py b/source/qdk_package/tests/test_correlated_noise.py similarity index 90% rename from source/pip/tests/test_correlated_noise.py rename to source/qdk_package/tests/test_correlated_noise.py index 123f49fca0..a5cf80843d 100644 --- a/source/pip/tests/test_correlated_noise.py +++ b/source/qdk_package/tests/test_correlated_noise.py @@ -3,16 +3,16 @@ import pytest import sys -from qsharp._simulation import NoiseConfig, run_qir -from qsharp import Result -import qsharp.openqasm +from qdk._simulation import NoiseConfig, run_qir +from qdk import Result +import qdk.openqasm SKIP_REASON = "GPU is not available" gpu_info = "Unknown" try: - from qsharp._native import try_create_gpu_adapter + from qdk._native import try_create_gpu_adapter gpu_info = try_create_gpu_adapter() # Printing to stderr so that it is visible if CI run fails @@ -37,10 +37,10 @@ bit[3] res = measure qs; """ -QIR_WITH_CORRELATED_NOISE = qsharp.openqasm.compile( +QIR_WITH_CORRELATED_NOISE = qdk.openqasm.compile( QASM_WITH_CORRELATED_NOISE, - output_semantics=qsharp.openqasm.OutputSemantics.OpenQasm, - target_profile=qsharp.TargetProfile.Base, + output_semantics=qdk.openqasm.OutputSemantics.OpenQasm, + target_profile=qdk.TargetProfile.Base, ) diff --git a/source/pip/tests/test_cpu_simulator.py b/source/qdk_package/tests/test_cpu_simulator.py similarity index 98% rename from source/pip/tests/test_cpu_simulator.py rename to source/qdk_package/tests/test_cpu_simulator.py index 93c5d474ce..6230681277 100644 --- a/source/pip/tests/test_cpu_simulator.py +++ b/source/qdk_package/tests/test_cpu_simulator.py @@ -9,13 +9,13 @@ import pytest -from qsharp._native import Result +from qdk._native import Result -import qsharp -from qsharp import TargetProfile -from qsharp import openqasm +import qdk as qsharp +from qdk import TargetProfile +from qdk import openqasm -from qsharp._simulation import run_qir_cpu, NoiseConfig +from qdk._simulation import run_qir_cpu, NoiseConfig current_file_path = Path(__file__) # Get the directory of the current file diff --git a/source/pip/tests/test_enums.py b/source/qdk_package/tests/test_enums.py similarity index 95% rename from source/pip/tests/test_enums.py rename to source/qdk_package/tests/test_enums.py index 0fb36f5e4f..d9ffb69e98 100644 --- a/source/pip/tests/test_enums.py +++ b/source/qdk_package/tests/test_enums.py @@ -3,17 +3,17 @@ from textwrap import dedent import pytest -import qsharp -import qsharp.code -import qsharp.utils +import qdk as qsharp +import qdk.code +import qdk.utils from contextlib import redirect_stdout import io -from qsharp import TargetProfile +from qdk import TargetProfile # pull in from native module for tests so that we don't have to install qiskit # using the interop module -from qsharp._native import OutputSemantics, ProgramType +from qdk._native import OutputSemantics, ProgramType def test_target_profile_int_values_match_enum_values() -> None: diff --git a/source/pip/tests/test_generic_estimator.py b/source/qdk_package/tests/test_generic_estimator.py similarity index 99% rename from source/pip/tests/test_generic_estimator.py rename to source/qdk_package/tests/test_generic_estimator.py index c8a50bab22..a0e64f5a3a 100644 --- a/source/pip/tests/test_generic_estimator.py +++ b/source/qdk_package/tests/test_generic_estimator.py @@ -2,7 +2,7 @@ # Licensed under the MIT License. import pytest -import qsharp +import qdk as qsharp class SampleAlgorithm: diff --git a/source/pip/tests/test_gpu_simulator.py b/source/qdk_package/tests/test_gpu_simulator.py similarity index 98% rename from source/pip/tests/test_gpu_simulator.py rename to source/qdk_package/tests/test_gpu_simulator.py index 013dd808e2..dc95ac5d0e 100644 --- a/source/pip/tests/test_gpu_simulator.py +++ b/source/qdk_package/tests/test_gpu_simulator.py @@ -11,7 +11,7 @@ import pytest import sys -from qsharp._native import Result +from qdk._native import Result # Skip all tests in this module if QDK_GPU_TESTS is not set if not os.environ.get("QDK_GPU_TESTS"): @@ -22,7 +22,7 @@ gpu_info = "Unknown" try: - from qsharp._native import try_create_gpu_adapter + from qdk._native import try_create_gpu_adapter gpu_info = try_create_gpu_adapter() # Printing to stderr so that it is visible if CI run fails @@ -34,11 +34,11 @@ SKIP_REASON = str(e) -import qsharp -from qsharp import TargetProfile -from qsharp import openqasm +import qdk as qsharp +from qdk import TargetProfile +from qdk import openqasm -from qsharp._simulation import run_qir_gpu, NoiseConfig +from qdk._simulation import run_qir_gpu, NoiseConfig current_file_path = Path(__file__) # Get the directory of the current file diff --git a/source/pip/tests/test_interpreter.py b/source/qdk_package/tests/test_interpreter.py similarity index 99% rename from source/pip/tests/test_interpreter.py rename to source/qdk_package/tests/test_interpreter.py index d9c4e365ab..34d03c095b 100644 --- a/source/pip/tests/test_interpreter.py +++ b/source/qdk_package/tests/test_interpreter.py @@ -2,7 +2,7 @@ # Licensed under the MIT License. from textwrap import dedent -from qsharp._native import ( +from qdk._native import ( Interpreter, Result, Pauli, @@ -10,7 +10,7 @@ TargetProfile, CircuitConfig, ) -from qsharp._qsharp import qsharp_value_to_python_value +from qdk._qsharp import qsharp_value_to_python_value import pytest from expecttest import assert_expected_inline diff --git a/source/pip/tests/test_noisy_config.py b/source/qdk_package/tests/test_noisy_config.py similarity index 98% rename from source/pip/tests/test_noisy_config.py rename to source/qdk_package/tests/test_noisy_config.py index 7bb46eadea..6eeb95656d 100644 --- a/source/pip/tests/test_noisy_config.py +++ b/source/qdk_package/tests/test_noisy_config.py @@ -1,7 +1,7 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from qsharp._simulation import NoiseConfig +from qdk._simulation import NoiseConfig import pytest diff --git a/source/pip/tests/test_noisy_simulator.py b/source/qdk_package/tests/test_noisy_simulator.py similarity index 99% rename from source/pip/tests/test_noisy_simulator.py rename to source/qdk_package/tests/test_noisy_simulator.py index 5bc9f301d8..c5ddc17d49 100644 --- a/source/pip/tests/test_noisy_simulator.py +++ b/source/qdk_package/tests/test_noisy_simulator.py @@ -1,7 +1,7 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from qsharp.noisy_simulator import ( +from qdk.noisy_simulator import ( NoisySimulatorError, Operation, Instrument, diff --git a/source/pip/tests/test_project.py b/source/qdk_package/tests/test_project.py similarity index 99% rename from source/pip/tests/test_project.py rename to source/qdk_package/tests/test_project.py index 321acef265..872520862c 100644 --- a/source/pip/tests/test_project.py +++ b/source/qdk_package/tests/test_project.py @@ -7,9 +7,9 @@ @pytest.fixture def qsharp(): - import qsharp - import qsharp._fs - import qsharp._http + import qdk as qsharp + import qdk._fs + import qdk._http qsharp._fs.read_file = read_file_memfs qsharp._fs.list_directory = list_directory_memfs diff --git a/source/pip/tests/test_qasm.py b/source/qdk_package/tests/test_qasm.py similarity index 99% rename from source/pip/tests/test_qasm.py rename to source/qdk_package/tests/test_qasm.py index 7ddb4d0c54..f5b1f0c239 100644 --- a/source/pip/tests/test_qasm.py +++ b/source/qdk_package/tests/test_qasm.py @@ -4,7 +4,7 @@ from math import pi from textwrap import dedent import pytest -from qsharp import ( +from qdk import ( init, TargetProfile, BitFlipNoise, @@ -17,8 +17,8 @@ circuit as qsharp_circuit, estimate as qsharp_estimate, ) -from qsharp.estimator import EstimatorParams, QubitParams, QECScheme, LogicalCounts -from qsharp.openqasm import ( +from qdk.estimator import EstimatorParams, QubitParams, QECScheme, LogicalCounts +from qdk.openqasm import ( import_openqasm, run, compile, @@ -27,7 +27,7 @@ ProgramType, QasmError, ) -import qsharp.code as code +import qdk.code as code # Run diff --git a/source/pip/tests/test_qasm_io.py b/source/qdk_package/tests/test_qasm_io.py similarity index 99% rename from source/pip/tests/test_qasm_io.py rename to source/qdk_package/tests/test_qasm_io.py index 8630b9982b..c2ce5c4f2c 100644 --- a/source/pip/tests/test_qasm_io.py +++ b/source/qdk_package/tests/test_qasm_io.py @@ -3,17 +3,17 @@ from math import pi import pytest -from qsharp import ( +from qdk import ( QSharpError, init, TargetProfile, Result, ) -from qsharp.openqasm import ( +from qdk.openqasm import ( import_openqasm, ProgramType, ) -import qsharp.code as code +import qdk.code as code def test_import_unsupported_angle_input_type() -> None: diff --git a/source/pip/tests/test_qsharp.py b/source/qdk_package/tests/test_qsharp.py similarity index 99% rename from source/pip/tests/test_qsharp.py rename to source/qdk_package/tests/test_qsharp.py index 0d1fd3c55f..2b796987c9 100644 --- a/source/pip/tests/test_qsharp.py +++ b/source/qdk_package/tests/test_qsharp.py @@ -3,9 +3,9 @@ from textwrap import dedent import pytest -import qsharp -import qsharp.code -import qsharp.utils +import qdk as qsharp +import qdk.code +import qdk.utils from contextlib import redirect_stdout import io @@ -862,8 +862,8 @@ def test_callables_in_namespaces_exposed_into_env_submodules_and_removed_on_rein qsharp.eval("namespace Test { function Four() : Int { 4 } }") qsharp.eval("function Identity(a : Int) : Int { a }") # should be able to import callables from env and namespace submodule - from qsharp.code import Identity - from qsharp.code.Test import Four + from qdk.code import Identity + from qdk.code.Test import Four assert Identity(4) == 4 assert Four() == 4 @@ -1014,8 +1014,8 @@ def test_function_defined_before_namespace_keeps_both_accessible() -> None: qsharp.eval("namespace Four { function Two() : Int { 42 } }") assert qsharp.code.Four() == 4 assert qsharp.code.Four.Two() == 42 - from qsharp.code import Four - from qsharp.code.Four import Two + from qdk.code import Four + from qdk.code.Four import Two assert Four() == 4 assert Two() == 42 @@ -1027,8 +1027,8 @@ def test_namespace_defined_before_function_keeps_both_accessible() -> None: qsharp.eval("function Four() : Int { 4 }") assert qsharp.code.Four() == 4 assert qsharp.code.Four.Two() == 42 - from qsharp.code import Four - from qsharp.code.Four import Two + from qdk.code import Four + from qdk.code.Four import Two assert Four() == 4 assert Two() == 42 diff --git a/source/pip/tests/test_re.py b/source/qdk_package/tests/test_re.py similarity index 99% rename from source/pip/tests/test_re.py rename to source/qdk_package/tests/test_re.py index 565ef21881..291f2cdb35 100644 --- a/source/pip/tests/test_re.py +++ b/source/qdk_package/tests/test_re.py @@ -1,8 +1,8 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -import qsharp -from qsharp.estimator import EstimatorParams, QubitParams, QECScheme, LogicalCounts +import qdk as qsharp +from qdk.estimator import EstimatorParams, QubitParams, QECScheme, LogicalCounts def test_qsharp_estimation() -> None: diff --git a/source/pip/tests/test_sparse_simulator.py b/source/qdk_package/tests/test_sparse_simulator.py similarity index 98% rename from source/pip/tests/test_sparse_simulator.py rename to source/qdk_package/tests/test_sparse_simulator.py index 83326bf2b1..6cb2696147 100644 --- a/source/pip/tests/test_sparse_simulator.py +++ b/source/qdk_package/tests/test_sparse_simulator.py @@ -9,13 +9,13 @@ import pytest -from qsharp._native import Result +from qdk._native import Result -import qsharp -from qsharp import TargetProfile -from qsharp import openqasm, run +import qdk as qsharp +from qdk import TargetProfile +from qdk import openqasm, run -from qsharp._simulation import NoiseConfig +from qdk._simulation import NoiseConfig current_file_path = Path(__file__) # Get the directory of the current file From 3fbc8792bbd09851a46421d568f568fa1057ea6a Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Thu, 30 Apr 2026 10:28:46 -0700 Subject: [PATCH 06/25] migrate integration tests into qdk from qsharp --- build.py | 2 +- source/pip/tests-integration/conftest.py | 13 ------ .../tests-integration/.gitignore | 0 .../tests-integration/__init__.py | 2 +- .../qdk_package/tests-integration/conftest.py | 42 +++++++++++++++++++ .../tests-integration/devices/__init__.py | 0 .../devices/test_atom_decomp.py | 4 +- .../devices/test_atom_e2e.py | 4 +- .../devices/test_atom_optimize.py | 4 +- .../devices/test_atom_reorder.py | 6 +-- .../devices/test_atom_schedule.py | 6 +-- .../devices/validation/__init__.py | 0 .../interop_cirq/__init__.py | 0 .../interop_cirq/test_neutral_atom.py | 6 +-- .../interop_qiskit/__init__.py | 0 .../resources/custom_intrinsics.inc | 0 .../resources/custom_intrinsics.ll | 0 .../interop_qiskit/test_circuits/__init__.py | 0 .../test_circuits/test_circuits.py | 2 +- .../interop_qiskit/test_gate_correctness.py | 8 ++-- .../interop_qiskit/test_gateset_qasm.py | 2 +- .../interop_qiskit/test_neutral_atom.py | 8 ++-- .../interop_qiskit/test_qir.py | 5 ++- .../interop_qiskit/test_qsharp.py | 2 +- .../interop_qiskit/test_re.py | 6 +-- .../interop_qiskit/test_run_sim.py | 5 ++- .../adaptive_ri/input/ArithmeticOps.qs | 0 .../input/BernsteinVaziraniNISQ.qs | 0 .../adaptive_ri/input/ConstantFolding.qs | 0 .../input/CopyAndUpdateExpressions.qs | 0 .../adaptive_ri/input/DeutschJozsaNISQ.qs | 0 .../adaptive_ri/input/ExpandedTests.qs | 0 .../resources/adaptive_ri/input/Functors.qs | 0 .../adaptive_ri/input/HiddenShiftNISQ.qs | 0 .../adaptive_ri/input/IntegerComparison.qs | 0 .../adaptive_ri/input/IntrinsicCCNOT.qs | 0 .../adaptive_ri/input/IntrinsicCNOT.qs | 0 .../adaptive_ri/input/IntrinsicHIXYZ.qs | 0 .../resources/adaptive_ri/input/IntrinsicM.qs | 0 .../input/IntrinsicMeasureWithBitFlipCode.qs | 0 .../IntrinsicMeasureWithPhaseFlipCode.qs | 0 .../input/IntrinsicRotationsWithPeriod.qs | 0 .../adaptive_ri/input/IntrinsicSTSWAP.qs | 0 .../adaptive_ri/input/MeasureAndReuse.qs | 0 .../input/MeasurementComparison.qs | 0 .../adaptive_ri/input/NestedBranching.qs | 0 .../resources/adaptive_ri/input/RandomBit.qs | 0 .../adaptive_ri/input/SampleTeleport.qs | 0 .../input/ShortcuttingMeasurement.qs | 0 .../resources/adaptive_ri/input/Slicing.qs | 0 .../adaptive_ri/input/SuperdenseCoding.qs | 0 .../adaptive_ri/input/SwitchHandling.qs | 0 .../input/ThreeQubitRepetitionCode.qs | 0 .../adaptive_ri/input/WithinApply.qs | 0 .../adaptive_ri/output/ArithmeticOps.ll | 0 .../adaptive_ri/output/ArithmeticOps.out | 0 .../output/BernsteinVaziraniNISQ.ll | 0 .../output/BernsteinVaziraniNISQ.out | 0 .../adaptive_ri/output/ConstantFolding.ll | 0 .../adaptive_ri/output/ConstantFolding.out | 0 .../output/CopyAndUpdateExpressions.ll | 0 .../output/CopyAndUpdateExpressions.out | 0 .../adaptive_ri/output/DeutschJozsaNISQ.ll | 0 .../adaptive_ri/output/DeutschJozsaNISQ.out | 0 .../adaptive_ri/output/ExpandedTests.ll | 0 .../adaptive_ri/output/ExpandedTests.out | 0 .../resources/adaptive_ri/output/Functors.ll | 0 .../resources/adaptive_ri/output/Functors.out | 0 .../adaptive_ri/output/HiddenShiftNISQ.ll | 0 .../adaptive_ri/output/HiddenShiftNISQ.out | 0 .../adaptive_ri/output/IntegerComparison.ll | 0 .../adaptive_ri/output/IntegerComparison.out | 0 .../adaptive_ri/output/IntrinsicCCNOT.ll | 0 .../adaptive_ri/output/IntrinsicCCNOT.out | 0 .../adaptive_ri/output/IntrinsicCNOT.ll | 0 .../adaptive_ri/output/IntrinsicCNOT.out | 0 .../adaptive_ri/output/IntrinsicHIXYZ.ll | 0 .../adaptive_ri/output/IntrinsicHIXYZ.out | 0 .../adaptive_ri/output/IntrinsicM.ll | 0 .../adaptive_ri/output/IntrinsicM.out | 0 .../output/IntrinsicMeasureWithBitFlipCode.ll | 0 .../IntrinsicMeasureWithBitFlipCode.out | 0 .../IntrinsicMeasureWithPhaseFlipCode.ll | 0 .../IntrinsicMeasureWithPhaseFlipCode.out | 0 .../output/IntrinsicRotationsWithPeriod.ll | 0 .../output/IntrinsicRotationsWithPeriod.out | 0 .../adaptive_ri/output/IntrinsicSTSWAP.ll | 0 .../adaptive_ri/output/IntrinsicSTSWAP.out | 0 .../adaptive_ri/output/MeasureAndReuse.ll | 0 .../adaptive_ri/output/MeasureAndReuse.out | 0 .../output/MeasurementComparison.ll | 0 .../output/MeasurementComparison.out | 0 .../adaptive_ri/output/NestedBranching.ll | 0 .../adaptive_ri/output/NestedBranching.out | 0 .../resources/adaptive_ri/output/RandomBit.ll | 0 .../adaptive_ri/output/RandomBit.out | 0 .../adaptive_ri/output/SampleTeleport.ll | 0 .../adaptive_ri/output/SampleTeleport.out | 0 .../output/ShortcuttingMeasurement.ll | 0 .../output/ShortcuttingMeasurement.out | 0 .../resources/adaptive_ri/output/Slicing.ll | 0 .../resources/adaptive_ri/output/Slicing.out | 0 .../adaptive_ri/output/SuperdenseCoding.ll | 0 .../adaptive_ri/output/SuperdenseCoding.out | 0 .../adaptive_ri/output/SwitchHandling.ll | 0 .../adaptive_ri/output/SwitchHandling.out | 0 .../output/ThreeQubitRepetitionCode.ll | 0 .../output/ThreeQubitRepetitionCode.out | 0 .../adaptive_ri/output/WithinApply.ll | 0 .../adaptive_ri/output/WithinApply.out | 0 .../resources/adaptive_rif/input/Doubles.qs | 0 .../resources/adaptive_rif/output/Doubles.ll | 0 .../resources/adaptive_rif/output/Doubles.out | 0 .../adaptive_rifla/input/ArithmeticOps.qs | 0 .../input/BernsteinVaziraniNISQ.qs | 0 .../adaptive_rifla/input/ConstantFolding.qs | 0 .../input/CopyAndUpdateExpressions.qs | 0 .../adaptive_rifla/input/DeutschJozsaNISQ.qs | 0 .../resources/adaptive_rifla/input/Doubles.qs | 0 .../adaptive_rifla/input/ExpandedTests.qs | 0 .../adaptive_rifla/input/Functors.qs | 0 .../adaptive_rifla/input/HiddenShiftNISQ.qs | 0 .../adaptive_rifla/input/IntegerComparison.qs | 0 .../adaptive_rifla/input/IntrinsicCCNOT.qs | 0 .../adaptive_rifla/input/IntrinsicCNOT.qs | 0 .../adaptive_rifla/input/IntrinsicHIXYZ.qs | 0 .../adaptive_rifla/input/IntrinsicM.qs | 0 .../input/IntrinsicMeasureWithBitFlipCode.qs | 0 .../IntrinsicMeasureWithPhaseFlipCode.qs | 0 .../input/IntrinsicRotationsWithPeriod.qs | 0 .../adaptive_rifla/input/IntrinsicSTSWAP.qs | 0 .../adaptive_rifla/input/LoopOverArrays.qs | 0 .../adaptive_rifla/input/MeasureAndReuse.qs | 0 .../input/MeasurementComparison.qs | 0 .../adaptive_rifla/input/NestedBranching.qs | 0 .../resources/adaptive_rifla/input/RUS.qs | 0 .../adaptive_rifla/input/RandomBit.qs | 0 .../adaptive_rifla/input/SampleTeleport.qs | 0 .../input/ShortcuttingMeasurement.qs | 0 .../resources/adaptive_rifla/input/Slicing.qs | 0 .../adaptive_rifla/input/SuperdenseCoding.qs | 0 .../adaptive_rifla/input/SwitchHandling.qs | 0 .../input/ThreeQubitRepetitionCode.qs | 0 .../adaptive_rifla/input/WithinApply.qs | 0 .../adaptive_rifla/output/ArithmeticOps.ll | 0 .../adaptive_rifla/output/ArithmeticOps.out | 0 .../output/BernsteinVaziraniNISQ.ll | 0 .../output/BernsteinVaziraniNISQ.out | 0 .../adaptive_rifla/output/ConstantFolding.ll | 0 .../adaptive_rifla/output/ConstantFolding.out | 0 .../output/CopyAndUpdateExpressions.ll | 0 .../output/CopyAndUpdateExpressions.out | 0 .../adaptive_rifla/output/DeutschJozsaNISQ.ll | 0 .../output/DeutschJozsaNISQ.out | 0 .../adaptive_rifla/output/Doubles.ll | 0 .../adaptive_rifla/output/Doubles.out | 0 .../adaptive_rifla/output/ExpandedTests.ll | 0 .../adaptive_rifla/output/ExpandedTests.out | 0 .../adaptive_rifla/output/Functors.ll | 0 .../adaptive_rifla/output/Functors.out | 0 .../adaptive_rifla/output/HiddenShiftNISQ.ll | 0 .../adaptive_rifla/output/HiddenShiftNISQ.out | 0 .../output/IntegerComparison.ll | 0 .../output/IntegerComparison.out | 0 .../adaptive_rifla/output/IntrinsicCCNOT.ll | 0 .../adaptive_rifla/output/IntrinsicCCNOT.out | 0 .../adaptive_rifla/output/IntrinsicCNOT.ll | 0 .../adaptive_rifla/output/IntrinsicCNOT.out | 0 .../adaptive_rifla/output/IntrinsicHIXYZ.ll | 0 .../adaptive_rifla/output/IntrinsicHIXYZ.out | 0 .../adaptive_rifla/output/IntrinsicM.ll | 0 .../adaptive_rifla/output/IntrinsicM.out | 0 .../output/IntrinsicMeasureWithBitFlipCode.ll | 0 .../IntrinsicMeasureWithBitFlipCode.out | 0 .../IntrinsicMeasureWithPhaseFlipCode.ll | 0 .../IntrinsicMeasureWithPhaseFlipCode.out | 0 .../output/IntrinsicRotationsWithPeriod.ll | 0 .../output/IntrinsicRotationsWithPeriod.out | 0 .../adaptive_rifla/output/IntrinsicSTSWAP.ll | 0 .../adaptive_rifla/output/IntrinsicSTSWAP.out | 0 .../adaptive_rifla/output/LoopOverArrays.ll | 0 .../adaptive_rifla/output/LoopOverArrays.out | 0 .../adaptive_rifla/output/MeasureAndReuse.ll | 0 .../adaptive_rifla/output/MeasureAndReuse.out | 0 .../output/MeasurementComparison.ll | 0 .../output/MeasurementComparison.out | 0 .../adaptive_rifla/output/NestedBranching.ll | 0 .../adaptive_rifla/output/NestedBranching.out | 0 .../resources/adaptive_rifla/output/RUS.ll | 0 .../resources/adaptive_rifla/output/RUS.out | 0 .../adaptive_rifla/output/RandomBit.ll | 0 .../adaptive_rifla/output/RandomBit.out | 0 .../adaptive_rifla/output/SampleTeleport.ll | 0 .../adaptive_rifla/output/SampleTeleport.out | 0 .../output/ShortcuttingMeasurement.ll | 0 .../output/ShortcuttingMeasurement.out | 0 .../adaptive_rifla/output/Slicing.ll | 0 .../adaptive_rifla/output/Slicing.out | 0 .../adaptive_rifla/output/SuperdenseCoding.ll | 0 .../output/SuperdenseCoding.out | 0 .../adaptive_rifla/output/SwitchHandling.ll | 0 .../adaptive_rifla/output/SwitchHandling.out | 0 .../output/ThreeQubitRepetitionCode.ll | 0 .../output/ThreeQubitRepetitionCode.out | 0 .../adaptive_rifla/output/WithinApply.ll | 0 .../adaptive_rifla/output/WithinApply.out | 0 .../tests-integration/test_adaptive_ri_qir.py | 2 +- .../test_adaptive_rif_qir.py | 2 +- .../test_adaptive_rifla_qir.py | 2 +- .../tests-integration/test_base_qir.py | 3 +- .../tests-integration/test_requirements.txt | 0 .../tests-integration/utils.py | 4 +- 212 files changed, 85 insertions(+), 55 deletions(-) delete mode 100644 source/pip/tests-integration/conftest.py rename source/{pip => qdk_package}/tests-integration/.gitignore (100%) rename source/{pip => qdk_package}/tests-integration/__init__.py (70%) create mode 100644 source/qdk_package/tests-integration/conftest.py rename source/{pip => qdk_package}/tests-integration/devices/__init__.py (100%) rename source/{pip => qdk_package}/tests-integration/devices/test_atom_decomp.py (99%) rename source/{pip => qdk_package}/tests-integration/devices/test_atom_e2e.py (98%) rename source/{pip => qdk_package}/tests-integration/devices/test_atom_optimize.py (99%) rename source/{pip => qdk_package}/tests-integration/devices/test_atom_reorder.py (99%) rename source/{pip => qdk_package}/tests-integration/devices/test_atom_schedule.py (99%) rename source/{pip => qdk_package}/tests-integration/devices/validation/__init__.py (100%) rename source/{pip => qdk_package}/tests-integration/interop_cirq/__init__.py (100%) rename source/{pip => qdk_package}/tests-integration/interop_cirq/test_neutral_atom.py (98%) rename source/{pip => qdk_package}/tests-integration/interop_qiskit/__init__.py (100%) rename source/{pip => qdk_package}/tests-integration/interop_qiskit/resources/custom_intrinsics.inc (100%) rename source/{pip => qdk_package}/tests-integration/interop_qiskit/resources/custom_intrinsics.ll (100%) rename source/{pip => qdk_package}/tests-integration/interop_qiskit/test_circuits/__init__.py (100%) rename source/{pip => qdk_package}/tests-integration/interop_qiskit/test_circuits/test_circuits.py (99%) rename source/{pip => qdk_package}/tests-integration/interop_qiskit/test_gate_correctness.py (97%) rename source/{pip => qdk_package}/tests-integration/interop_qiskit/test_gateset_qasm.py (99%) rename source/{pip => qdk_package}/tests-integration/interop_qiskit/test_neutral_atom.py (98%) rename source/{pip => qdk_package}/tests-integration/interop_qiskit/test_qir.py (98%) rename source/{pip => qdk_package}/tests-integration/interop_qiskit/test_qsharp.py (97%) rename source/{pip => qdk_package}/tests-integration/interop_qiskit/test_re.py (97%) rename source/{pip => qdk_package}/tests-integration/interop_qiskit/test_run_sim.py (98%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/ArithmeticOps.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/BernsteinVaziraniNISQ.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/ConstantFolding.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/CopyAndUpdateExpressions.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/DeutschJozsaNISQ.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/ExpandedTests.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/Functors.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/HiddenShiftNISQ.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/IntegerComparison.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/IntrinsicCCNOT.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/IntrinsicCNOT.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/IntrinsicHIXYZ.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/IntrinsicM.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/IntrinsicMeasureWithBitFlipCode.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/IntrinsicMeasureWithPhaseFlipCode.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/IntrinsicRotationsWithPeriod.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/IntrinsicSTSWAP.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/MeasureAndReuse.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/MeasurementComparison.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/NestedBranching.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/RandomBit.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/SampleTeleport.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/ShortcuttingMeasurement.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/Slicing.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/SuperdenseCoding.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/SwitchHandling.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/ThreeQubitRepetitionCode.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/input/WithinApply.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/ArithmeticOps.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/ArithmeticOps.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/BernsteinVaziraniNISQ.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/BernsteinVaziraniNISQ.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/ConstantFolding.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/ConstantFolding.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/CopyAndUpdateExpressions.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/CopyAndUpdateExpressions.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/DeutschJozsaNISQ.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/DeutschJozsaNISQ.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/ExpandedTests.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/ExpandedTests.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/Functors.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/Functors.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/HiddenShiftNISQ.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/HiddenShiftNISQ.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntegerComparison.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntegerComparison.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicCCNOT.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicCCNOT.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicCNOT.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicCNOT.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicHIXYZ.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicHIXYZ.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicM.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicM.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithBitFlipCode.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithBitFlipCode.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithPhaseFlipCode.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithPhaseFlipCode.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicRotationsWithPeriod.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicRotationsWithPeriod.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicSTSWAP.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/IntrinsicSTSWAP.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/MeasureAndReuse.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/MeasureAndReuse.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/MeasurementComparison.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/MeasurementComparison.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/NestedBranching.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/NestedBranching.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/RandomBit.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/RandomBit.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/SampleTeleport.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/SampleTeleport.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/ShortcuttingMeasurement.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/ShortcuttingMeasurement.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/Slicing.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/Slicing.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/SuperdenseCoding.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/SuperdenseCoding.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/SwitchHandling.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/SwitchHandling.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/ThreeQubitRepetitionCode.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/ThreeQubitRepetitionCode.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/WithinApply.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_ri/output/WithinApply.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rif/input/Doubles.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rif/output/Doubles.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rif/output/Doubles.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/ArithmeticOps.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/BernsteinVaziraniNISQ.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/ConstantFolding.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/CopyAndUpdateExpressions.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/DeutschJozsaNISQ.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/Doubles.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/ExpandedTests.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/Functors.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/HiddenShiftNISQ.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/IntegerComparison.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/IntrinsicCCNOT.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/IntrinsicCNOT.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/IntrinsicHIXYZ.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/IntrinsicM.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithBitFlipCode.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithPhaseFlipCode.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/IntrinsicRotationsWithPeriod.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/IntrinsicSTSWAP.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/LoopOverArrays.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/MeasureAndReuse.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/MeasurementComparison.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/NestedBranching.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/RUS.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/RandomBit.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/SampleTeleport.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/ShortcuttingMeasurement.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/Slicing.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/SuperdenseCoding.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/SwitchHandling.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/ThreeQubitRepetitionCode.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/input/WithinApply.qs (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/ConstantFolding.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/ConstantFolding.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/Doubles.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/Doubles.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/ExpandedTests.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/ExpandedTests.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/Functors.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/Functors.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntegerComparison.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntegerComparison.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicM.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicM.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/LoopOverArrays.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/LoopOverArrays.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/NestedBranching.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/NestedBranching.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/RUS.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/RUS.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/RandomBit.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/RandomBit.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/SampleTeleport.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/SampleTeleport.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/Slicing.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/Slicing.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/SwitchHandling.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/SwitchHandling.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.out (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/WithinApply.ll (100%) rename source/{pip => qdk_package}/tests-integration/resources/adaptive_rifla/output/WithinApply.out (100%) rename source/{pip => qdk_package}/tests-integration/test_adaptive_ri_qir.py (98%) rename source/{pip => qdk_package}/tests-integration/test_adaptive_rif_qir.py (98%) rename source/{pip => qdk_package}/tests-integration/test_adaptive_rifla_qir.py (98%) rename source/{pip => qdk_package}/tests-integration/test_base_qir.py (99%) rename source/{pip => qdk_package}/tests-integration/test_requirements.txt (100%) rename source/{pip => qdk_package}/tests-integration/utils.py (98%) diff --git a/build.py b/build.py index 2f54b0c506..a4b564ae2c 100755 --- a/build.py +++ b/build.py @@ -495,7 +495,7 @@ def run_ci_historic_benchmark(): if args.integration_tests: step_start("Setting up for integration tests for the pip package") - test_dir = os.path.join(pip_src, "tests-integration") + test_dir = os.path.join(qdk_python_src, "tests-integration") install_python_test_requirements(test_dir, python_bin, check=False) # Install qdk first (qsharp depends on it) diff --git a/source/pip/tests-integration/conftest.py b/source/pip/tests-integration/conftest.py deleted file mode 100644 index 3635045928..0000000000 --- a/source/pip/tests-integration/conftest.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -""" -This file is used to configure pytest for the test suite. - -- It attempts to import necessary modules from test_circuits. - -Fixtures and other configurations for pytest can be added to this file to -be shared across multiple test files. -""" - -from interop_qiskit.test_circuits import * diff --git a/source/pip/tests-integration/.gitignore b/source/qdk_package/tests-integration/.gitignore similarity index 100% rename from source/pip/tests-integration/.gitignore rename to source/qdk_package/tests-integration/.gitignore diff --git a/source/pip/tests-integration/__init__.py b/source/qdk_package/tests-integration/__init__.py similarity index 70% rename from source/pip/tests-integration/__init__.py rename to source/qdk_package/tests-integration/__init__.py index d84a4b96d5..4af6a30469 100644 --- a/source/pip/tests-integration/__init__.py +++ b/source/qdk_package/tests-integration/__init__.py @@ -1,4 +1,4 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -"""qsharp integration tests""" +"""qdk integration tests""" diff --git a/source/qdk_package/tests-integration/conftest.py b/source/qdk_package/tests-integration/conftest.py new file mode 100644 index 0000000000..e1ca9f1c71 --- /dev/null +++ b/source/qdk_package/tests-integration/conftest.py @@ -0,0 +1,42 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +""" +This file is used to configure pytest for the test suite. + +- It attempts to import necessary modules from test_circuits. +- It monkey-patches extra symbols onto the ``qdk`` module so that + ``import qdk as qsharp`` followed by ``qsharp.compile(...)`` etc. works. + +Fixtures and other configurations for pytest can be added to this file to +be shared across multiple test files. +""" + +# --------------------------------------------------------------------------- +# Monkey-patch symbols onto qdk that are NOT part of the public API but are +# used throughout tests via the ``import qdk as qsharp`` alias. +# --------------------------------------------------------------------------- +import qdk +from qdk._qsharp import ( # noqa: E402 + eval, + run, + compile, + circuit, + estimate, + logical_counts, + QSharpError, + CircuitGenerationMethod, +) +from qdk._native import estimate_custom # type: ignore # noqa: E402 + +qdk.eval = eval +qdk.run = run +qdk.compile = compile +qdk.circuit = circuit +qdk.estimate = estimate +qdk.logical_counts = logical_counts +qdk.QSharpError = QSharpError +qdk.CircuitGenerationMethod = CircuitGenerationMethod +qdk.estimate_custom = estimate_custom + +from interop_qiskit.test_circuits import * diff --git a/source/pip/tests-integration/devices/__init__.py b/source/qdk_package/tests-integration/devices/__init__.py similarity index 100% rename from source/pip/tests-integration/devices/__init__.py rename to source/qdk_package/tests-integration/devices/__init__.py diff --git a/source/pip/tests-integration/devices/test_atom_decomp.py b/source/qdk_package/tests-integration/devices/test_atom_decomp.py similarity index 99% rename from source/pip/tests-integration/devices/test_atom_decomp.py rename to source/qdk_package/tests-integration/devices/test_atom_decomp.py index 6b71011bb5..10db7d0f95 100644 --- a/source/pip/tests-integration/devices/test_atom_decomp.py +++ b/source/qdk_package/tests-integration/devices/test_atom_decomp.py @@ -4,8 +4,8 @@ import pytest from expecttest import assert_expected_inline -import qsharp -from qsharp._device._atom._decomp import ( +import qdk as qsharp +from qdk._device._atom._decomp import ( DecomposeMultiQubitToCZ, DecomposeSingleRotationToRz, DecomposeSingleQubitToRzSX, diff --git a/source/pip/tests-integration/devices/test_atom_e2e.py b/source/qdk_package/tests-integration/devices/test_atom_e2e.py similarity index 98% rename from source/pip/tests-integration/devices/test_atom_e2e.py rename to source/qdk_package/tests-integration/devices/test_atom_e2e.py index 938751ba23..60f6ec898c 100644 --- a/source/pip/tests-integration/devices/test_atom_e2e.py +++ b/source/qdk_package/tests-integration/devices/test_atom_e2e.py @@ -4,8 +4,8 @@ import pytest from expecttest import assert_expected_inline -import qsharp -from qsharp._device._atom import NeutralAtomDevice, NoiseConfig +import qdk as qsharp +from qdk._device._atom import NeutralAtomDevice, NoiseConfig try: import pyqir diff --git a/source/pip/tests-integration/devices/test_atom_optimize.py b/source/qdk_package/tests-integration/devices/test_atom_optimize.py similarity index 99% rename from source/pip/tests-integration/devices/test_atom_optimize.py rename to source/qdk_package/tests-integration/devices/test_atom_optimize.py index cc3a26f5cb..39b0992a26 100644 --- a/source/pip/tests-integration/devices/test_atom_optimize.py +++ b/source/qdk_package/tests-integration/devices/test_atom_optimize.py @@ -4,8 +4,8 @@ import pytest from expecttest import assert_expected_inline -import qsharp -from qsharp._device._atom._optimize import ( +import qdk as qsharp +from qdk._device._atom._optimize import ( PruneUnusedFunctions, OptimizeSingleQubitGates, ) diff --git a/source/pip/tests-integration/devices/test_atom_reorder.py b/source/qdk_package/tests-integration/devices/test_atom_reorder.py similarity index 99% rename from source/pip/tests-integration/devices/test_atom_reorder.py rename to source/qdk_package/tests-integration/devices/test_atom_reorder.py index ee8bacfc93..b1fc7d0198 100644 --- a/source/pip/tests-integration/devices/test_atom_reorder.py +++ b/source/qdk_package/tests-integration/devices/test_atom_reorder.py @@ -4,9 +4,9 @@ import pytest from expecttest import assert_expected_inline -import qsharp -from qsharp._device._atom._reorder import Reorder -from qsharp._device._atom import NeutralAtomDevice +import qdk as qsharp +from qdk._device._atom._reorder import Reorder +from qdk._device._atom import NeutralAtomDevice from .validation import PerQubitOrdering, check_qubit_ordering_unchanged try: diff --git a/source/pip/tests-integration/devices/test_atom_schedule.py b/source/qdk_package/tests-integration/devices/test_atom_schedule.py similarity index 99% rename from source/pip/tests-integration/devices/test_atom_schedule.py rename to source/qdk_package/tests-integration/devices/test_atom_schedule.py index c90ec91daa..ce98268d01 100644 --- a/source/pip/tests-integration/devices/test_atom_schedule.py +++ b/source/qdk_package/tests-integration/devices/test_atom_schedule.py @@ -4,9 +4,9 @@ import pytest from expecttest import assert_expected_inline -import qsharp -from qsharp._device._atom import NeutralAtomDevice -from qsharp._device._atom._scheduler import Schedule +import qdk as qsharp +from qdk._device._atom import NeutralAtomDevice +from qdk._device._atom._scheduler import Schedule from .validation import ( ValidateBeginEndParallel, PerQubitOrdering, diff --git a/source/pip/tests-integration/devices/validation/__init__.py b/source/qdk_package/tests-integration/devices/validation/__init__.py similarity index 100% rename from source/pip/tests-integration/devices/validation/__init__.py rename to source/qdk_package/tests-integration/devices/validation/__init__.py diff --git a/source/pip/tests-integration/interop_cirq/__init__.py b/source/qdk_package/tests-integration/interop_cirq/__init__.py similarity index 100% rename from source/pip/tests-integration/interop_cirq/__init__.py rename to source/qdk_package/tests-integration/interop_cirq/__init__.py diff --git a/source/pip/tests-integration/interop_cirq/test_neutral_atom.py b/source/qdk_package/tests-integration/interop_cirq/test_neutral_atom.py similarity index 98% rename from source/pip/tests-integration/interop_cirq/test_neutral_atom.py rename to source/qdk_package/tests-integration/interop_cirq/test_neutral_atom.py index ab09bfe5ed..13d48e3321 100644 --- a/source/pip/tests-integration/interop_cirq/test_neutral_atom.py +++ b/source/qdk_package/tests-integration/interop_cirq/test_neutral_atom.py @@ -6,9 +6,9 @@ cirq = pytest.importorskip("cirq") import numpy as np -from qsharp.interop.cirq import NeutralAtomCirqResult, NeutralAtomSampler -from qsharp._simulation import NoiseConfig -from qsharp._device._atom import NeutralAtomDevice +from qdk.cirq import NeutralAtomCirqResult, NeutralAtomSampler +from qdk._simulation import NoiseConfig +from qdk._device._atom import NeutralAtomDevice # --------------------------------------------------------------------------- diff --git a/source/pip/tests-integration/interop_qiskit/__init__.py b/source/qdk_package/tests-integration/interop_qiskit/__init__.py similarity index 100% rename from source/pip/tests-integration/interop_qiskit/__init__.py rename to source/qdk_package/tests-integration/interop_qiskit/__init__.py diff --git a/source/pip/tests-integration/interop_qiskit/resources/custom_intrinsics.inc b/source/qdk_package/tests-integration/interop_qiskit/resources/custom_intrinsics.inc similarity index 100% rename from source/pip/tests-integration/interop_qiskit/resources/custom_intrinsics.inc rename to source/qdk_package/tests-integration/interop_qiskit/resources/custom_intrinsics.inc diff --git a/source/pip/tests-integration/interop_qiskit/resources/custom_intrinsics.ll b/source/qdk_package/tests-integration/interop_qiskit/resources/custom_intrinsics.ll similarity index 100% rename from source/pip/tests-integration/interop_qiskit/resources/custom_intrinsics.ll rename to source/qdk_package/tests-integration/interop_qiskit/resources/custom_intrinsics.ll diff --git a/source/pip/tests-integration/interop_qiskit/test_circuits/__init__.py b/source/qdk_package/tests-integration/interop_qiskit/test_circuits/__init__.py similarity index 100% rename from source/pip/tests-integration/interop_qiskit/test_circuits/__init__.py rename to source/qdk_package/tests-integration/interop_qiskit/test_circuits/__init__.py diff --git a/source/pip/tests-integration/interop_qiskit/test_circuits/test_circuits.py b/source/qdk_package/tests-integration/interop_qiskit/test_circuits/test_circuits.py similarity index 99% rename from source/pip/tests-integration/interop_qiskit/test_circuits/test_circuits.py rename to source/qdk_package/tests-integration/interop_qiskit/test_circuits/test_circuits.py index 00fea5ac06..8745ef18ee 100644 --- a/source/pip/tests-integration/interop_qiskit/test_circuits/test_circuits.py +++ b/source/qdk_package/tests-integration/interop_qiskit/test_circuits/test_circuits.py @@ -10,7 +10,7 @@ if QISKIT_AVAILABLE: from qiskit.circuit import QuantumCircuit - from qsharp.interop.qiskit import QSharpBackend + from qdk.qiskit import QSharpBackend def random_bit() -> Tuple["QuantumCircuit", List[str]]: diff --git a/source/pip/tests-integration/interop_qiskit/test_gate_correctness.py b/source/qdk_package/tests-integration/interop_qiskit/test_gate_correctness.py similarity index 97% rename from source/pip/tests-integration/interop_qiskit/test_gate_correctness.py rename to source/qdk_package/tests-integration/interop_qiskit/test_gate_correctness.py index 6cca737bd8..cfdfe77997 100644 --- a/source/pip/tests-integration/interop_qiskit/test_gate_correctness.py +++ b/source/qdk_package/tests-integration/interop_qiskit/test_gate_correctness.py @@ -2,16 +2,16 @@ # Licensed under the MIT License. import pytest -from qsharp import TargetProfile +from qdk import TargetProfile from interop_qiskit import QISKIT_AVAILABLE, SKIP_REASON if QISKIT_AVAILABLE: from qiskit import QuantumCircuit from qiskit.qasm3 import dumps - from qsharp.interop.qiskit import QSharpBackend - from qsharp.openqasm import run as run_qasm, OutputSemantics - from qsharp import set_quantum_seed, init + from qdk.qiskit import QSharpBackend + from qdk.openqasm import run as run_qasm, OutputSemantics + from qdk import set_quantum_seed, init from .test_circuits import ( generate_repro_information, diff --git a/source/pip/tests-integration/interop_qiskit/test_gateset_qasm.py b/source/qdk_package/tests-integration/interop_qiskit/test_gateset_qasm.py similarity index 99% rename from source/pip/tests-integration/interop_qiskit/test_gateset_qasm.py rename to source/qdk_package/tests-integration/interop_qiskit/test_gateset_qasm.py index 458a5e29c5..861040989b 100644 --- a/source/pip/tests-integration/interop_qiskit/test_gateset_qasm.py +++ b/source/qdk_package/tests-integration/interop_qiskit/test_gateset_qasm.py @@ -8,7 +8,7 @@ if QISKIT_AVAILABLE: from qiskit import QuantumCircuit - from qsharp.interop.qiskit import QSharpBackend + from qdk.qiskit import QSharpBackend def run_transpile_test( diff --git a/source/pip/tests-integration/interop_qiskit/test_neutral_atom.py b/source/qdk_package/tests-integration/interop_qiskit/test_neutral_atom.py similarity index 98% rename from source/pip/tests-integration/interop_qiskit/test_neutral_atom.py rename to source/qdk_package/tests-integration/interop_qiskit/test_neutral_atom.py index b384cb33d5..7f1f140fd3 100644 --- a/source/pip/tests-integration/interop_qiskit/test_neutral_atom.py +++ b/source/qdk_package/tests-integration/interop_qiskit/test_neutral_atom.py @@ -11,9 +11,9 @@ from qiskit import ClassicalRegister from qiskit.circuit import QuantumCircuit from qiskit.providers import JobStatus - from qsharp.interop.qiskit import NeutralAtomBackend - from qsharp._simulation import NoiseConfig - from qsharp._device._atom import NeutralAtomDevice + from qdk.qiskit import NeutralAtomBackend + from qdk._simulation import NoiseConfig + from qdk._device._atom import NeutralAtomDevice from .test_circuits import generate_repro_information @@ -454,7 +454,7 @@ def test_pretranspiled_matches_backend_transpiled(backend) -> None: @pytest.mark.skipif(not QISKIT_AVAILABLE, reason=SKIP_REASON) def test_non_base_target_profile_raises(backend) -> None: """Passing a non-Base target_profile must raise ValueError immediately.""" - from qsharp import TargetProfile + from qdk import TargetProfile circuit = create_bell_circuit() with pytest.raises(ValueError, match="TargetProfile.Base"): diff --git a/source/pip/tests-integration/interop_qiskit/test_qir.py b/source/qdk_package/tests-integration/interop_qiskit/test_qir.py similarity index 98% rename from source/pip/tests-integration/interop_qiskit/test_qir.py rename to source/qdk_package/tests-integration/interop_qiskit/test_qir.py index c359b7220d..f7194ac58a 100644 --- a/source/pip/tests-integration/interop_qiskit/test_qir.py +++ b/source/qdk_package/tests-integration/interop_qiskit/test_qir.py @@ -5,7 +5,8 @@ from typing import Optional import pytest -from qsharp import TargetProfile, QSharpError +from qdk import TargetProfile +from qdk._native import QSharpError from . import QISKIT_AVAILABLE, SKIP_REASON, ignore_on_failure @@ -14,7 +15,7 @@ from .test_circuits import ( generate_repro_information, ) - from qsharp.interop.qiskit import ( + from qdk.qiskit import ( OutputSemantics, QSharpBackend, QasmError, diff --git a/source/pip/tests-integration/interop_qiskit/test_qsharp.py b/source/qdk_package/tests-integration/interop_qiskit/test_qsharp.py similarity index 97% rename from source/pip/tests-integration/interop_qiskit/test_qsharp.py rename to source/qdk_package/tests-integration/interop_qiskit/test_qsharp.py index 62d0511e59..6f86598935 100644 --- a/source/pip/tests-integration/interop_qiskit/test_qsharp.py +++ b/source/qdk_package/tests-integration/interop_qiskit/test_qsharp.py @@ -7,7 +7,7 @@ if QISKIT_AVAILABLE: - from qsharp.interop.qiskit import ( + from qdk.qiskit import ( OutputSemantics, ProgramType, QSharpBackend, diff --git a/source/pip/tests-integration/interop_qiskit/test_re.py b/source/qdk_package/tests-integration/interop_qiskit/test_re.py similarity index 97% rename from source/pip/tests-integration/interop_qiskit/test_re.py rename to source/qdk_package/tests-integration/interop_qiskit/test_re.py index f87df50bfa..30501fbfab 100644 --- a/source/pip/tests-integration/interop_qiskit/test_re.py +++ b/source/qdk_package/tests-integration/interop_qiskit/test_re.py @@ -4,8 +4,8 @@ from concurrent.futures import ThreadPoolExecutor import pytest -from qsharp import QSharpError -from qsharp.estimator import ( +from qdk._native import QSharpError +from qdk.estimator import ( EstimatorParams, QubitParams, LogicalCounts, @@ -20,7 +20,7 @@ ) from qiskit.circuit import QuantumCircuit, Parameter from qiskit.circuit.library import RGQFTMultiplier - from qsharp.interop.qiskit import ResourceEstimatorBackend + from qdk.qiskit import ResourceEstimatorBackend from qiskit.version import __version__ as QISKIT_VERSION diff --git a/source/pip/tests-integration/interop_qiskit/test_run_sim.py b/source/qdk_package/tests-integration/interop_qiskit/test_run_sim.py similarity index 98% rename from source/pip/tests-integration/interop_qiskit/test_run_sim.py rename to source/qdk_package/tests-integration/interop_qiskit/test_run_sim.py index 627a7d9399..4b2a20da88 100644 --- a/source/pip/tests-integration/interop_qiskit/test_run_sim.py +++ b/source/qdk_package/tests-integration/interop_qiskit/test_run_sim.py @@ -3,7 +3,8 @@ from concurrent.futures import ThreadPoolExecutor import pytest -from qsharp import QSharpError, TargetProfile +from qdk import TargetProfile +from qdk._native import QSharpError from interop_qiskit import QISKIT_AVAILABLE, SKIP_REASON @@ -16,7 +17,7 @@ from qiskit.qasm3 import loads as from_qasm3 from qiskit.providers import JobStatus from qiskit import ClassicalRegister - from qsharp.interop.qiskit import QSharpBackend + from qdk.qiskit import QSharpBackend from .test_circuits import ( generate_repro_information, ) diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/ArithmeticOps.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/ArithmeticOps.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/ArithmeticOps.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/ArithmeticOps.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/BernsteinVaziraniNISQ.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/BernsteinVaziraniNISQ.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/BernsteinVaziraniNISQ.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/BernsteinVaziraniNISQ.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/ConstantFolding.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/ConstantFolding.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/ConstantFolding.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/ConstantFolding.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/CopyAndUpdateExpressions.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/CopyAndUpdateExpressions.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/CopyAndUpdateExpressions.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/CopyAndUpdateExpressions.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/DeutschJozsaNISQ.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/DeutschJozsaNISQ.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/DeutschJozsaNISQ.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/DeutschJozsaNISQ.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/ExpandedTests.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/ExpandedTests.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/ExpandedTests.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/ExpandedTests.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/Functors.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/Functors.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/Functors.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/Functors.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/HiddenShiftNISQ.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/HiddenShiftNISQ.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/HiddenShiftNISQ.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/HiddenShiftNISQ.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/IntegerComparison.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/IntegerComparison.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/IntegerComparison.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/IntegerComparison.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicCCNOT.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicCCNOT.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicCCNOT.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicCCNOT.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicCNOT.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicCNOT.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicCNOT.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicCNOT.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicHIXYZ.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicHIXYZ.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicHIXYZ.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicHIXYZ.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicM.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicM.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicM.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicM.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicMeasureWithBitFlipCode.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicMeasureWithBitFlipCode.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicMeasureWithBitFlipCode.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicMeasureWithBitFlipCode.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicMeasureWithPhaseFlipCode.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicMeasureWithPhaseFlipCode.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicMeasureWithPhaseFlipCode.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicMeasureWithPhaseFlipCode.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicRotationsWithPeriod.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicRotationsWithPeriod.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicRotationsWithPeriod.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicRotationsWithPeriod.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicSTSWAP.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicSTSWAP.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/IntrinsicSTSWAP.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/IntrinsicSTSWAP.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/MeasureAndReuse.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/MeasureAndReuse.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/MeasureAndReuse.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/MeasureAndReuse.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/MeasurementComparison.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/MeasurementComparison.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/MeasurementComparison.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/MeasurementComparison.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/NestedBranching.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/NestedBranching.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/NestedBranching.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/NestedBranching.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/RandomBit.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/RandomBit.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/RandomBit.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/RandomBit.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/SampleTeleport.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/SampleTeleport.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/SampleTeleport.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/SampleTeleport.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/ShortcuttingMeasurement.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/ShortcuttingMeasurement.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/ShortcuttingMeasurement.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/ShortcuttingMeasurement.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/Slicing.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/Slicing.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/Slicing.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/Slicing.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/SuperdenseCoding.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/SuperdenseCoding.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/SuperdenseCoding.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/SuperdenseCoding.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/SwitchHandling.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/SwitchHandling.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/SwitchHandling.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/SwitchHandling.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/ThreeQubitRepetitionCode.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/ThreeQubitRepetitionCode.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/ThreeQubitRepetitionCode.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/ThreeQubitRepetitionCode.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/input/WithinApply.qs b/source/qdk_package/tests-integration/resources/adaptive_ri/input/WithinApply.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/input/WithinApply.qs rename to source/qdk_package/tests-integration/resources/adaptive_ri/input/WithinApply.qs diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/ArithmeticOps.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/ArithmeticOps.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/ArithmeticOps.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/ArithmeticOps.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/ArithmeticOps.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/ArithmeticOps.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/ArithmeticOps.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/ArithmeticOps.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/BernsteinVaziraniNISQ.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/BernsteinVaziraniNISQ.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/BernsteinVaziraniNISQ.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/BernsteinVaziraniNISQ.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/BernsteinVaziraniNISQ.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/BernsteinVaziraniNISQ.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/BernsteinVaziraniNISQ.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/BernsteinVaziraniNISQ.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/ConstantFolding.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/ConstantFolding.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/ConstantFolding.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/ConstantFolding.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/ConstantFolding.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/ConstantFolding.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/ConstantFolding.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/ConstantFolding.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/CopyAndUpdateExpressions.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/CopyAndUpdateExpressions.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/CopyAndUpdateExpressions.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/CopyAndUpdateExpressions.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/CopyAndUpdateExpressions.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/CopyAndUpdateExpressions.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/CopyAndUpdateExpressions.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/CopyAndUpdateExpressions.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/DeutschJozsaNISQ.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/DeutschJozsaNISQ.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/DeutschJozsaNISQ.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/DeutschJozsaNISQ.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/DeutschJozsaNISQ.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/DeutschJozsaNISQ.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/DeutschJozsaNISQ.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/DeutschJozsaNISQ.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/ExpandedTests.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/ExpandedTests.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/ExpandedTests.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/ExpandedTests.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/ExpandedTests.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/ExpandedTests.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/ExpandedTests.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/ExpandedTests.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/Functors.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/Functors.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/Functors.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/Functors.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/Functors.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/Functors.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/Functors.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/Functors.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/HiddenShiftNISQ.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/HiddenShiftNISQ.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/HiddenShiftNISQ.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/HiddenShiftNISQ.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/HiddenShiftNISQ.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/HiddenShiftNISQ.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/HiddenShiftNISQ.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/HiddenShiftNISQ.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntegerComparison.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntegerComparison.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntegerComparison.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntegerComparison.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntegerComparison.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntegerComparison.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntegerComparison.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntegerComparison.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicCCNOT.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicCCNOT.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicCCNOT.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicCCNOT.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicCCNOT.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicCCNOT.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicCCNOT.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicCCNOT.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicCNOT.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicCNOT.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicCNOT.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicCNOT.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicCNOT.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicCNOT.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicCNOT.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicCNOT.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicHIXYZ.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicHIXYZ.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicHIXYZ.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicHIXYZ.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicHIXYZ.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicHIXYZ.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicHIXYZ.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicHIXYZ.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicM.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicM.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicM.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicM.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicM.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicM.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicM.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicM.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithBitFlipCode.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithBitFlipCode.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithBitFlipCode.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithBitFlipCode.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithBitFlipCode.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithBitFlipCode.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithBitFlipCode.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithBitFlipCode.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithPhaseFlipCode.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithPhaseFlipCode.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithPhaseFlipCode.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithPhaseFlipCode.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithPhaseFlipCode.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithPhaseFlipCode.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithPhaseFlipCode.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicMeasureWithPhaseFlipCode.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicRotationsWithPeriod.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicRotationsWithPeriod.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicRotationsWithPeriod.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicRotationsWithPeriod.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicRotationsWithPeriod.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicRotationsWithPeriod.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicRotationsWithPeriod.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicRotationsWithPeriod.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicSTSWAP.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicSTSWAP.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicSTSWAP.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicSTSWAP.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicSTSWAP.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicSTSWAP.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/IntrinsicSTSWAP.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/IntrinsicSTSWAP.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/MeasureAndReuse.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/MeasureAndReuse.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/MeasureAndReuse.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/MeasureAndReuse.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/MeasureAndReuse.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/MeasureAndReuse.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/MeasureAndReuse.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/MeasureAndReuse.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/MeasurementComparison.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/MeasurementComparison.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/MeasurementComparison.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/MeasurementComparison.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/MeasurementComparison.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/MeasurementComparison.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/MeasurementComparison.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/MeasurementComparison.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/NestedBranching.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/NestedBranching.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/NestedBranching.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/NestedBranching.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/NestedBranching.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/NestedBranching.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/NestedBranching.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/NestedBranching.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/RandomBit.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/RandomBit.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/RandomBit.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/RandomBit.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/RandomBit.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/RandomBit.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/RandomBit.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/RandomBit.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/SampleTeleport.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/SampleTeleport.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/SampleTeleport.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/SampleTeleport.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/SampleTeleport.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/SampleTeleport.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/SampleTeleport.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/SampleTeleport.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/ShortcuttingMeasurement.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/ShortcuttingMeasurement.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/ShortcuttingMeasurement.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/ShortcuttingMeasurement.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/ShortcuttingMeasurement.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/ShortcuttingMeasurement.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/ShortcuttingMeasurement.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/ShortcuttingMeasurement.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/Slicing.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/Slicing.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/Slicing.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/Slicing.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/Slicing.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/Slicing.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/Slicing.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/Slicing.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/SuperdenseCoding.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/SuperdenseCoding.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/SuperdenseCoding.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/SuperdenseCoding.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/SuperdenseCoding.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/SuperdenseCoding.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/SuperdenseCoding.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/SuperdenseCoding.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/SwitchHandling.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/SwitchHandling.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/SwitchHandling.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/SwitchHandling.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/SwitchHandling.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/SwitchHandling.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/SwitchHandling.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/SwitchHandling.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/ThreeQubitRepetitionCode.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/ThreeQubitRepetitionCode.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/ThreeQubitRepetitionCode.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/ThreeQubitRepetitionCode.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/ThreeQubitRepetitionCode.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/ThreeQubitRepetitionCode.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/ThreeQubitRepetitionCode.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/ThreeQubitRepetitionCode.out diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/WithinApply.ll b/source/qdk_package/tests-integration/resources/adaptive_ri/output/WithinApply.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/WithinApply.ll rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/WithinApply.ll diff --git a/source/pip/tests-integration/resources/adaptive_ri/output/WithinApply.out b/source/qdk_package/tests-integration/resources/adaptive_ri/output/WithinApply.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_ri/output/WithinApply.out rename to source/qdk_package/tests-integration/resources/adaptive_ri/output/WithinApply.out diff --git a/source/pip/tests-integration/resources/adaptive_rif/input/Doubles.qs b/source/qdk_package/tests-integration/resources/adaptive_rif/input/Doubles.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rif/input/Doubles.qs rename to source/qdk_package/tests-integration/resources/adaptive_rif/input/Doubles.qs diff --git a/source/pip/tests-integration/resources/adaptive_rif/output/Doubles.ll b/source/qdk_package/tests-integration/resources/adaptive_rif/output/Doubles.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rif/output/Doubles.ll rename to source/qdk_package/tests-integration/resources/adaptive_rif/output/Doubles.ll diff --git a/source/pip/tests-integration/resources/adaptive_rif/output/Doubles.out b/source/qdk_package/tests-integration/resources/adaptive_rif/output/Doubles.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rif/output/Doubles.out rename to source/qdk_package/tests-integration/resources/adaptive_rif/output/Doubles.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/ArithmeticOps.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/ArithmeticOps.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/ArithmeticOps.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/ArithmeticOps.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/BernsteinVaziraniNISQ.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/BernsteinVaziraniNISQ.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/BernsteinVaziraniNISQ.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/BernsteinVaziraniNISQ.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/ConstantFolding.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/ConstantFolding.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/ConstantFolding.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/ConstantFolding.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/CopyAndUpdateExpressions.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/CopyAndUpdateExpressions.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/CopyAndUpdateExpressions.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/CopyAndUpdateExpressions.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/DeutschJozsaNISQ.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/DeutschJozsaNISQ.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/DeutschJozsaNISQ.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/DeutschJozsaNISQ.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/Doubles.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/Doubles.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/Doubles.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/Doubles.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/ExpandedTests.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/ExpandedTests.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/ExpandedTests.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/ExpandedTests.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/Functors.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/Functors.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/Functors.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/Functors.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/HiddenShiftNISQ.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/HiddenShiftNISQ.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/HiddenShiftNISQ.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/HiddenShiftNISQ.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntegerComparison.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntegerComparison.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/IntegerComparison.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntegerComparison.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicCCNOT.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicCCNOT.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicCCNOT.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicCCNOT.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicCNOT.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicCNOT.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicCNOT.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicCNOT.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicHIXYZ.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicHIXYZ.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicHIXYZ.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicHIXYZ.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicM.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicM.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicM.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicM.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithBitFlipCode.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithBitFlipCode.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithBitFlipCode.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithBitFlipCode.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithPhaseFlipCode.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithPhaseFlipCode.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithPhaseFlipCode.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicMeasureWithPhaseFlipCode.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicRotationsWithPeriod.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicRotationsWithPeriod.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicRotationsWithPeriod.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicRotationsWithPeriod.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicSTSWAP.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicSTSWAP.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/IntrinsicSTSWAP.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/IntrinsicSTSWAP.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/LoopOverArrays.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/LoopOverArrays.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/LoopOverArrays.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/LoopOverArrays.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/MeasureAndReuse.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/MeasureAndReuse.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/MeasureAndReuse.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/MeasureAndReuse.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/MeasurementComparison.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/MeasurementComparison.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/MeasurementComparison.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/MeasurementComparison.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/NestedBranching.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/NestedBranching.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/NestedBranching.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/NestedBranching.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/RUS.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/RUS.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/RUS.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/RUS.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/RandomBit.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/RandomBit.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/RandomBit.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/RandomBit.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/SampleTeleport.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/SampleTeleport.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/SampleTeleport.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/SampleTeleport.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/ShortcuttingMeasurement.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/ShortcuttingMeasurement.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/ShortcuttingMeasurement.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/ShortcuttingMeasurement.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/Slicing.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/Slicing.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/Slicing.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/Slicing.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/SuperdenseCoding.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/SuperdenseCoding.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/SuperdenseCoding.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/SuperdenseCoding.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/SwitchHandling.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/SwitchHandling.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/SwitchHandling.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/SwitchHandling.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/ThreeQubitRepetitionCode.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/ThreeQubitRepetitionCode.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/ThreeQubitRepetitionCode.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/ThreeQubitRepetitionCode.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/input/WithinApply.qs b/source/qdk_package/tests-integration/resources/adaptive_rifla/input/WithinApply.qs similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/input/WithinApply.qs rename to source/qdk_package/tests-integration/resources/adaptive_rifla/input/WithinApply.qs diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/ArithmeticOps.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/BernsteinVaziraniNISQ.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/ConstantFolding.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/ConstantFolding.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/ConstantFolding.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/ConstantFolding.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/ConstantFolding.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/CopyAndUpdateExpressions.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/DeutschJozsaNISQ.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/Doubles.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/Doubles.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/Doubles.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/Doubles.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/Doubles.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/ExpandedTests.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/ExpandedTests.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/ExpandedTests.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/ExpandedTests.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/ExpandedTests.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/Functors.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/Functors.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/Functors.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/Functors.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/Functors.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/Functors.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/Functors.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/Functors.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/HiddenShiftNISQ.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntegerComparison.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntegerComparison.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntegerComparison.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntegerComparison.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntegerComparison.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicCCNOT.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicCNOT.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicHIXYZ.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicM.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicM.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicM.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicM.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicM.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicM.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicM.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicM.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithBitFlipCode.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicMeasureWithPhaseFlipCode.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicRotationsWithPeriod.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/IntrinsicSTSWAP.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/LoopOverArrays.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/LoopOverArrays.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/LoopOverArrays.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/LoopOverArrays.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/LoopOverArrays.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/LoopOverArrays.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/LoopOverArrays.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/LoopOverArrays.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/MeasureAndReuse.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/MeasurementComparison.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/NestedBranching.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/NestedBranching.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/NestedBranching.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/NestedBranching.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/NestedBranching.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/NestedBranching.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/NestedBranching.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/NestedBranching.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/RUS.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/RUS.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/RUS.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/RUS.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/RUS.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/RUS.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/RUS.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/RUS.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/RandomBit.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/RandomBit.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/RandomBit.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/RandomBit.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/RandomBit.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/RandomBit.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/RandomBit.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/RandomBit.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/SampleTeleport.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/SampleTeleport.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/SampleTeleport.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/SampleTeleport.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/SampleTeleport.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/SampleTeleport.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/SampleTeleport.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/SampleTeleport.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/ShortcuttingMeasurement.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/Slicing.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/Slicing.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/Slicing.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/Slicing.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/Slicing.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/Slicing.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/Slicing.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/Slicing.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/SuperdenseCoding.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/SwitchHandling.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/SwitchHandling.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/SwitchHandling.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/SwitchHandling.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/SwitchHandling.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/ThreeQubitRepetitionCode.out diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/WithinApply.ll b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/WithinApply.ll similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/WithinApply.ll rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/WithinApply.ll diff --git a/source/pip/tests-integration/resources/adaptive_rifla/output/WithinApply.out b/source/qdk_package/tests-integration/resources/adaptive_rifla/output/WithinApply.out similarity index 100% rename from source/pip/tests-integration/resources/adaptive_rifla/output/WithinApply.out rename to source/qdk_package/tests-integration/resources/adaptive_rifla/output/WithinApply.out diff --git a/source/pip/tests-integration/test_adaptive_ri_qir.py b/source/qdk_package/tests-integration/test_adaptive_ri_qir.py similarity index 98% rename from source/pip/tests-integration/test_adaptive_ri_qir.py rename to source/qdk_package/tests-integration/test_adaptive_ri_qir.py index 3fc2c9f117..2970bb4dda 100644 --- a/source/pip/tests-integration/test_adaptive_ri_qir.py +++ b/source/qdk_package/tests-integration/test_adaptive_ri_qir.py @@ -4,7 +4,7 @@ import pytest -from qsharp import TargetProfile +from qdk import TargetProfile from utils import ( assert_strings_equal_ignore_line_endings, compile_qsharp, diff --git a/source/pip/tests-integration/test_adaptive_rif_qir.py b/source/qdk_package/tests-integration/test_adaptive_rif_qir.py similarity index 98% rename from source/pip/tests-integration/test_adaptive_rif_qir.py rename to source/qdk_package/tests-integration/test_adaptive_rif_qir.py index 87891f549a..61a0ce0996 100644 --- a/source/pip/tests-integration/test_adaptive_rif_qir.py +++ b/source/qdk_package/tests-integration/test_adaptive_rif_qir.py @@ -4,7 +4,7 @@ import pytest -from qsharp import TargetProfile +from qdk import TargetProfile from utils import ( assert_strings_equal_ignore_line_endings, compile_qsharp, diff --git a/source/pip/tests-integration/test_adaptive_rifla_qir.py b/source/qdk_package/tests-integration/test_adaptive_rifla_qir.py similarity index 98% rename from source/pip/tests-integration/test_adaptive_rifla_qir.py rename to source/qdk_package/tests-integration/test_adaptive_rifla_qir.py index ae07e4af44..f9eabc9cf7 100644 --- a/source/pip/tests-integration/test_adaptive_rifla_qir.py +++ b/source/qdk_package/tests-integration/test_adaptive_rifla_qir.py @@ -4,7 +4,7 @@ import pytest -from qsharp import TargetProfile +from qdk import TargetProfile from utils import ( assert_strings_equal_ignore_line_endings, compile_qsharp, diff --git a/source/pip/tests-integration/test_base_qir.py b/source/qdk_package/tests-integration/test_base_qir.py similarity index 99% rename from source/pip/tests-integration/test_base_qir.py rename to source/qdk_package/tests-integration/test_base_qir.py index 05646cc568..9d773a33db 100644 --- a/source/pip/tests-integration/test_base_qir.py +++ b/source/qdk_package/tests-integration/test_base_qir.py @@ -3,8 +3,7 @@ import pytest -import qsharp - +import qdk as qsharp try: from pyqir import ( Call, diff --git a/source/pip/tests-integration/test_requirements.txt b/source/qdk_package/tests-integration/test_requirements.txt similarity index 100% rename from source/pip/tests-integration/test_requirements.txt rename to source/qdk_package/tests-integration/test_requirements.txt diff --git a/source/pip/tests-integration/utils.py b/source/qdk_package/tests-integration/utils.py similarity index 98% rename from source/pip/tests-integration/utils.py rename to source/qdk_package/tests-integration/utils.py index 796e8db57b..9206aebdbd 100644 --- a/source/pip/tests-integration/utils.py +++ b/source/qdk_package/tests-integration/utils.py @@ -12,7 +12,7 @@ import os -from qsharp._native import ( +from qdk._native import ( Interpreter, TargetProfile, QSharpError, @@ -121,7 +121,7 @@ def get_interpreter( manifest_descriptor = None language_features = None - from qsharp._fs import read_file, list_directory + from qdk._fs import read_file, list_directory interpreter = Interpreter( target_profile, From c95fe044610924e3ee6d2dada0472cb564ce53fa Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Thu, 30 Apr 2026 13:53:09 -0700 Subject: [PATCH 07/25] move benchmark to qdk --- source/{pip => qdk_package}/benchmarks/bench_qre.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) rename source/{pip => qdk_package}/benchmarks/bench_qre.py (92%) diff --git a/source/pip/benchmarks/bench_qre.py b/source/qdk_package/benchmarks/bench_qre.py similarity index 92% rename from source/pip/benchmarks/bench_qre.py rename to source/qdk_package/benchmarks/bench_qre.py index 6697aba2ad..1ded85ffa9 100644 --- a/source/pip/benchmarks/bench_qre.py +++ b/source/qdk_package/benchmarks/bench_qre.py @@ -3,15 +3,15 @@ import timeit from dataclasses import dataclass, KW_ONLY, field -from qsharp.qre import linear_function, generic_function -from qsharp.qre._architecture import _make_instruction -from qsharp.qre.models import ( +from qdk.qre import linear_function, generic_function +from qdk.qre._architecture import _make_instruction +from qdk.qre.models import ( GateBased, SurfaceCode, TwoDimensionalYokedSurfaceCode, Litinski19Factory, ) -from qsharp.qre._enumeration import _enumerate_instances +from qdk.qre._enumeration import _enumerate_instances def bench_enumerate_instances(): From dd997bef9d894cf50181924fdfb2ef1efd6069cb Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Thu, 30 Apr 2026 14:54:39 -0700 Subject: [PATCH 08/25] fix test option in build.py --- .gitignore | 2 ++ build.py | 13 +++++++------ .../pip => samples/testing}/test_requirements.txt | 0 3 files changed, 9 insertions(+), 6 deletions(-) rename {source/pip => samples/testing}/test_requirements.txt (100%) diff --git a/.gitignore b/.gitignore index 25e5e9fa19..1aa761c233 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,8 @@ __pycache__/ /source/fuzz/coverage /source/fuzz/Cargo.lock /source/pip/doc +/source/pip/build/ +/source/pip/*.egg-info/ /source/samples_test/src/tests/*_generated.rs .mypy_cache/ .pytest_cache/ diff --git a/build.py b/build.py index a4b564ae2c..e5e9a1be22 100755 --- a/build.py +++ b/build.py @@ -442,7 +442,7 @@ def run_ci_historic_benchmark(): step_start("Building the qdk python package") # Reuse (or create) the pip environment so qdk wheel can be built/installed consistently. - (python_bin, pip_env) = use_python_env(qdk_python_src) + python_bin, pip_env = use_python_env(qdk_python_src) # Build the qdk wheel with maturin (it now owns the native extension). build_maturin_wheel(qdk_python_src, python_bin, pip_env) @@ -476,7 +476,7 @@ def run_ci_historic_benchmark(): if build_pip: step_start("Building the pip package") - (python_bin, pip_env) = use_python_env(pip_src) + python_bin, pip_env = use_python_env(pip_src) # qsharp is now a pure-Python shim depending on qdk. # Build with setuptools (no maturin needed). @@ -505,6 +505,7 @@ def run_ci_historic_benchmark(): "pip", "install", "--force-reinstall", + "--no-deps", "--no-index", "--find-links=" + wheels_dir, "qdk", @@ -538,7 +539,7 @@ def run_ci_historic_benchmark(): if build_widgets: step_start("Building the Python widgets") - (python_bin, _) = use_python_env(qdk_python_src) + python_bin, _ = use_python_env(qdk_python_src) widgets_build_args = [ python_bin, @@ -664,7 +665,7 @@ def run_ci_historic_benchmark(): if build_jupyterlab: step_start("Building the JupyterLab extension") - (python_bin, _) = use_python_env(jupyterlab_src) + python_bin, _ = use_python_env(jupyterlab_src) pip_build_args = [ python_bin, @@ -704,7 +705,7 @@ def run_ci_historic_benchmark(): or f.startswith("carbon.") ) ] - (python_bin, pip_env) = use_python_env(samples_src) + python_bin, pip_env = use_python_env(samples_src) # Install the qsharp package pip_install_args = [ @@ -818,7 +819,7 @@ def _run_notebooks(files): dir for dir, _, _ in project_directories if dir.find("testing") != -1 ] - install_python_test_requirements(pip_src, python_bin) + install_python_test_requirements(os.path.join(samples_src, "testing"), python_bin) for test_project_dir in test_projects_directories: run_python_tests(test_project_dir, python_bin, pip_env) step_end() diff --git a/source/pip/test_requirements.txt b/samples/testing/test_requirements.txt similarity index 100% rename from source/pip/test_requirements.txt rename to samples/testing/test_requirements.txt From 8b4eb34569e49ad3dd76d0f69d1dc056a7d2a6c2 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Fri, 1 May 2026 10:22:50 -0700 Subject: [PATCH 09/25] minor change to pyproject.toml --- source/pip/pyproject.toml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/source/pip/pyproject.toml b/source/pip/pyproject.toml index 3331449980..ba1b419af5 100644 --- a/source/pip/pyproject.toml +++ b/source/pip/pyproject.toml @@ -4,8 +4,8 @@ version = "0.0.0" readme = "README.md" requires-python = ">= 3.10" dependencies = ["qdk==0.0.0"] +license = "MIT" classifiers = [ - "License :: OSI Approved :: MIT License", "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Programming Language :: Python :: 3", @@ -20,11 +20,14 @@ classifiers = [ ] [project.optional-dependencies] -jupyterlab = ["qsharp-jupyterlab"] -widgets = ["qsharp-widgets"] -qiskit = ["qiskit>=1.2.2,<3.0.0"] -cirq = ["cirq-core>=1.6.1,<1.7"] -qre = ["cirq-core==1.6.1,<1.7", "pandas>=2.1", "ply>=3.11", "pyqir>=0.12.3,<0.13"] +jupyterlab = ["qdk[jupyter]"] +widgets = ["qdk[jupyter]"] +jupyter = ["qdk[jupyter]"] +qiskit = ["qdk[qiskit]"] +cirq = ["qdk[cirq]"] +qre = ["qdk[qre]"] +azure = ["qdk[azure]"] +applications = ["qdk[applications]"] [build-system] requires = ["setuptools>=64", "wheel"] @@ -32,3 +35,4 @@ build-backend = "setuptools.build_meta" [tool.setuptools.packages.find] where = ["."] +exclude = ["build*"] From 494c6c1da37b954a61247a31b9fff82ce396dbba Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Fri, 1 May 2026 12:18:42 -0700 Subject: [PATCH 10/25] merge conflicts resolve --- source/qdk_package/qdk/_adaptive_bytecode.py | 4 +- source/qdk_package/qdk/_adaptive_pass.py | 131 ++++++++++---- .../qdk_package/qdk/_device/_atom/__init__.py | 28 +++ source/qdk_package/qdk/_ipython.py | 27 +-- source/qdk_package/qdk/_qsharp.py | 8 + source/qdk_package/qdk/_simulation.py | 170 ++++++++++-------- .../qdk/noisy_simulator/__init__.py | 4 + source/qdk_package/qdk/qiskit/__init__.py | 2 + .../tests/test_adaptive_cpu_bytecode.py | 8 +- .../tests/test_adaptive_cpu_noise.py | 26 +-- .../qdk_package/tests/test_adaptive_pass.py | 1 - 11 files changed, 252 insertions(+), 157 deletions(-) diff --git a/source/qdk_package/qdk/_adaptive_bytecode.py b/source/qdk_package/qdk/_adaptive_bytecode.py index 876a0a196f..aa244fc59c 100644 --- a/source/qdk_package/qdk/_adaptive_bytecode.py +++ b/source/qdk_package/qdk/_adaptive_bytecode.py @@ -44,6 +44,7 @@ OP_RESET = 0x12 OP_READ_RESULT = 0x13 OP_RECORD_OUTPUT = 0x14 +OP_READ_LOSS = 0x15 # ── Integer Arithmetic ─────────────────────────────────────────────────────── OP_ADD = 0x20 @@ -127,6 +128,3 @@ REG_TYPE_F32 = 3 REG_TYPE_F64 = 4 REG_TYPE_PTR = 5 - -# ── Sentinel values ────────────────────────────────────────────────────────── -VOID_RETURN = 0xFFFFFFFF # Function does not have a return value. diff --git a/source/qdk_package/qdk/_adaptive_pass.py b/source/qdk_package/qdk/_adaptive_pass.py index a3cd815de6..11d89fcf0e 100644 --- a/source/qdk_package/qdk/_adaptive_pass.py +++ b/source/qdk_package/qdk/_adaptive_pass.py @@ -12,11 +12,18 @@ from __future__ import annotations from dataclasses import dataclass, astuple +from enum import Enum import pyqir import struct from typing import Any, Dict, List, Optional, Tuple, TypeAlias, cast from ._adaptive_bytecode import * + +class Bytecode(Enum): + Bit32 = 32 + Bit64 = 64 + + # --------------------------------------------------------------------------- # Gate name → OpID mapping (must match shader_types.rs OpID enum) # --------------------------------------------------------------------------- @@ -48,6 +55,7 @@ "mz": 21, "mresetz": 22, "swap": 24, + "move": 28, } # Gates that take a result ID as a second argument @@ -59,6 +67,12 @@ # Rotation gates that take an angle parameter as first argument ROTATION_GATES = {"rx", "ry", "rz", "rxx", "ryy", "rzz"} +# Single-qubit gates whose QIR signature carries device-specific extra +# arguments after the qubit pointer (e.g. ``move(qubit, i64, i64)``). The +# extra args are scheduling metadata for hardware backends and are not +# qubit IDs, so we resolve only ``args[0]`` and ignore the rest. +MOVE_GATES = {"move"} + # --------------------------------------------------------------------------- # ICmp / FCmp predicate mappings # --------------------------------------------------------------------------- @@ -160,7 +174,14 @@ class QuantumOp: q1: int q2: int q3: int - angle: float + # ``angle`` is stored as the raw bit pattern of an IEEE-754 float + # (encoded via ``encode_float_as_bits``) so it can be packed into the + # same integer-typed FFI table as the qubit indices. The Rust side + # reinterprets these bits as f32/f64 depending on the bytecode width. + # + # This also follows the same pattern in which floats are encoded as ints + # in the ``Instruction`` class. + angle: int @dataclass @@ -192,17 +213,26 @@ class SwitchCase: @dataclass class IntOperand: - val: int = 0 + val: int + bits: int def __post_init__(self): - # Mask to u32 range so negative Python ints become their - # two's-complement u32 representation (e.g. -7 → 0xFFFFFFF9). - self.val = self.val & 0xFFFFFFFF + # Mask to the appropriate word-width so negative Python ints and + # wider-than-target constants become their two's-complement + # representation at the target bit width + # (e.g. -7 → 0xFFFFFFF9 for 32-bit, 0xFFFFFFFFFFFFFFF9 for 64-bit). + # + # Note: we have no way to tell if a negative number, represented by + # pyqir as an u64 is an overflow or just a negative number. + # therefore we don't perform overflow checks here, and instead + # default to a wrapping behavior. + mask = (1 << self.bits) - 1 + self.val = self.val & mask class FloatOperand: - def __init__(self, val: float = 0.0) -> None: - self.val: int = encode_float_as_bits(val) + def __init__(self, val: float, bytecode_kind: Bytecode) -> None: + self.val: int = encode_float_as_bits(val, bytecode_kind) @dataclass @@ -255,14 +285,24 @@ def unwrap_operands( return (dst, src0, src1, aux0, aux1, aux2, aux3) -def encode_float_as_bits(val: float) -> int: - return struct.unpack(" int: + if bytecode_kind == Bytecode.Bit32: + return struct.unpack(" int: idx = self._next_qop self._next_qop += 1 @@ -425,11 +467,11 @@ def _resolve_operand(self, value: pyqir.Value) -> IntOperand | FloatOperand | Re if isinstance(value, pyqir.IntConstant): val = value.value - return IntOperand(val) + return IntOperand(val, self._int_bits) if isinstance(value, pyqir.FloatConstant): val = value.value - return FloatOperand(val) + return FloatOperand(val, self._bytecode_kind) # Forward reference (e.g. phi incoming from a later block). # Pre-allocate a register; the defining instruction will reuse it @@ -442,7 +484,7 @@ def _resolve_operand(self, value: pyqir.Value) -> IntOperand | FloatOperand | Re # Try extracting as a qubit/result pointer constant. pid = pyqir.ptr_id(value) if pid is not None: - return IntOperand(pid) + return IntOperand(pid, self._int_bits) # Null pointer if value.is_null: reg = self._alloc_reg(value, REG_TYPE_PTR) @@ -626,8 +668,6 @@ def _emit_call(self, call: pyqir.Call) -> None: dst = self._alloc_reg(call, REG_TYPE_BOOL) result_reg = self._resolve_result_operand(call.args[0]) self._emit(OP_READ_RESULT, dst=dst, src0=result_reg) - case _ if callee.startswith("__quantum__qis__"): - self._emit_quantum_call(call) case "__quantum__rt__result_record_output": result_reg = self._resolve_result_operand(call.args[0]) label_str = self._extract_label(call.args[1]) @@ -681,9 +721,17 @@ def _emit_call(self, call: pyqir.Call) -> None: | "__quantum__rt__begin_parallel" | "__quantum__rt__end_parallel" | "__quantum__qis__barrier__body" - | "__quantum__rt__read_loss" ): pass # No-op + case "__quantum__rt__read_loss": + # Allocate a bool register and emit OP_READ_LOSS so the runtime + # can ask the simulator whether the given result was produced + # by measuring a lost qubit. Programs may branch on this value. + dst = self._alloc_reg(call, REG_TYPE_BOOL) + result_reg = self._resolve_result_operand(call.args[0]) + self._emit(OP_READ_LOSS, dst=dst, src0=result_reg) + case _ if callee.startswith("__quantum__qis__"): + self._emit_quantum_call(call) case _ if callee in self._func_to_id: self._emit_ir_function_call(call) case _ if "qdk_noise" in call.callee.attributes.func: @@ -699,7 +747,11 @@ def _emit_call(self, call: pyqir.Call) -> None: def _resolve_qubit_operands( self, args: List[pyqir.Value] ) -> Tuple[IntOperand | Reg, IntOperand | Reg, IntOperand | Reg]: - qs: List[IntOperand | Reg] = [IntOperand(), IntOperand(), IntOperand()] + qs: List[IntOperand | Reg] = [ + IntOperand(0, self._int_bits), + IntOperand(0, self._int_bits), + IntOperand(0, self._int_bits), + ] for i, arg in enumerate(args): qs[i] = self._resolve_qubit_operand(arg) return (qs[0], qs[1], qs[2]) @@ -744,17 +796,36 @@ def _emit_quantum_call(self, call: pyqir.Call) -> None: aux1=q, ) return + if gate_name in MOVE_GATES: + # ``move(qubit, i64, i64)``: only the first arg is a qubit; the + # remaining args are device-specific scheduling metadata that + # the simulator ignores. Emit a single-qubit OP_QUANTUM_GATE so + # the runtime invokes ``Simulator::mov`` (which applies the + # configured ``noise.mov`` faults to that qubit). + q1, q2, q3 = self._resolve_qubit_operands([call.args[0]]) + angle = FloatOperand(0.0, self._bytecode_kind) + qop_idx = self._emit_quantum_op(op_id, q1.val, q2.val, q3.val, angle.val) + self._emit( + OP_QUANTUM_GATE, + src0=angle, + aux0=qop_idx, + aux1=q1, + aux2=q2, + aux3=q3, + ) + return if gate_name in ROTATION_GATES: qubit_arg_offset = 1 angle = self._resolve_angle_operand(call.args[0]) else: qubit_arg_offset = 0 - angle = FloatOperand() + angle = FloatOperand(0.0, self._bytecode_kind) qubit_arg_offset = 1 if gate_name in ROTATION_GATES else 0 q1, q2, q3 = self._resolve_qubit_operands(call.args[qubit_arg_offset:]) qop_idx = self._emit_quantum_op(op_id, q1.val, q2.val, q3.val, angle.val) self._emit( OP_QUANTUM_GATE, + src0=angle, aux0=qop_idx, aux1=q1, aux2=q2, @@ -795,8 +866,8 @@ def _emit_noise_intrinsic_call(self, call: pyqir.Call) -> None: self._emit( OP_QUANTUM_GATE, aux0=qop_idx, - aux1=IntOperand(qubit_count), - aux2=IntOperand(arg_offset), + aux1=IntOperand(qubit_count, self._int_bits), + aux2=IntOperand(arg_offset, self._int_bits), ) elif self._noise_intrinsics is not None: raise ValueError(f"Missing noise intrinsic: {callee_name}") @@ -860,18 +931,14 @@ def _emit_switch(self, switch_instr: pyqir.Switch) -> None: compilation). ``operands`` is not affected by this behavior. """ # operands layout: [cond, default_block, case_val0, case_block0, ...] - ops = switch_instr.operands - cond_reg = self._resolve_operand(ops[0]) - default_block = self._block_to_id[ops[1]] + cond_reg = self._resolve_operand(switch_instr.operands[0]) + default_block = self._block_to_id[switch_instr.default] case_offset = len(self.switch_cases) - num_case_pairs = (len(ops) - 2) // 2 - for i in range(num_case_pairs): - case_val = ops[2 + 2 * i] - case_block = ops[2 + 2 * i + 1] - target_block = self._block_to_id[case_block] + for case_val, block in switch_instr.cases: + target_block = self._block_to_id[block] switch_case = SwitchCase(case_val.value, target_block) self.switch_cases.append(switch_case) - case_count = num_case_pairs + case_count = len(switch_instr.cases) self._emit( OP_SWITCH, src0=cond_reg, @@ -896,7 +963,7 @@ def _emit_ret(self, instr: Any) -> None: self._emit(OP_RET, dst=ret_reg) else: # Void return — use immediate 0 as exit code. - self._emit(OP_RET, dst=IntOperand(0)) + self._emit(OP_RET, dst=IntOperand(0, self._int_bits)) # ------------------------------------------------------------------ # Comparison emitters @@ -960,7 +1027,7 @@ def _emit_ir_function_call(self, call: Any) -> None: self.call_args.append(reg.val) # Allocate return register if function has non-void return type if call.type.is_void: - return_reg = VOID_RETURN # no return + return_reg = void_return(self._bytecode_kind) # no return else: return_reg = self._alloc_reg(call, REG_TYPE_I32) self._emit( diff --git a/source/qdk_package/qdk/_device/_atom/__init__.py b/source/qdk_package/qdk/_device/_atom/__init__.py index bea15a145c..f58a7ab77f 100644 --- a/source/qdk_package/qdk/_device/_atom/__init__.py +++ b/source/qdk_package/qdk/_device/_atom/__init__.py @@ -258,6 +258,34 @@ def simulate( if noise is None: noise = NoiseConfig() + # Override t, t_adj, s, s_adj, and z noise if they are unset and rz noise is set. + if noise and not noise.rz.is_noiseless(): + if noise.t.is_noiseless(): + noise.t.x = noise.rz.x + noise.t.y = noise.rz.y + noise.t.z = noise.rz.z + noise.t.loss = noise.rz.loss + if noise.t_adj.is_noiseless(): + noise.t_adj.x = noise.rz.x + noise.t_adj.y = noise.rz.y + noise.t_adj.z = noise.rz.z + noise.t_adj.loss = noise.rz.loss + if noise.s.is_noiseless(): + noise.s.x = noise.rz.x + noise.s.y = noise.rz.y + noise.s.z = noise.rz.z + noise.s.loss = noise.rz.loss + if noise.s_adj.is_noiseless(): + noise.s_adj.x = noise.rz.x + noise.s_adj.y = noise.rz.y + noise.s_adj.z = noise.rz.z + noise.s_adj.loss = noise.rz.loss + if noise.z.is_noiseless(): + noise.z.x = noise.rz.x + noise.z.y = noise.rz.y + noise.z.z = noise.rz.z + noise.z.loss = noise.rz.loss + compiled = self.compile(qir) module = Module.from_ir(Context(), str(compiled)) ValidateNoConditionalBranches().run(module) diff --git a/source/qdk_package/qdk/_ipython.py b/source/qdk_package/qdk/_ipython.py index c010befe72..041148e4da 100644 --- a/source/qdk_package/qdk/_ipython.py +++ b/source/qdk_package/qdk/_ipython.py @@ -9,12 +9,11 @@ """ from time import monotonic -from IPython.display import display, Javascript, clear_output +from IPython.display import display, clear_output from IPython.core.magic import register_cell_magic from ._native import QSharpError from ._qsharp import get_interpreter, qsharp_value_to_python_value from . import telemetry_events -import pathlib def register_magic(): @@ -49,30 +48,6 @@ def callback(output): raise QSharpCellError(str(e)) -def enable_classic_notebook_codemirror_mode(): - """ - Registers %%qsharp cells with MIME type text/x-qsharp - and defines a CodeMirror mode to enable syntax highlighting. - This only works in "classic" Jupyter notebooks, not Notebook v7. - """ - js_to_inject = open( - pathlib.Path(__file__) - .parent.resolve() - .joinpath(".data", "qsharp_codemirror.js"), - mode="r", - encoding="utf-8", - ).read() - - # Extend the JavaScript display helper to print nothing when used - # in a non-browser context (i.e. IPython console) - class JavaScriptWithPlainTextFallback(Javascript): - def __repr__(self): - return "" - - # This will run the JavaScript in the context of the frontend. - display(JavaScriptWithPlainTextFallback(js_to_inject)) - - class QSharpCellError(BaseException): """ Error raised when a %%qsharp cell fails. diff --git a/source/qdk_package/qdk/_qsharp.py b/source/qdk_package/qdk/_qsharp.py index b9e055a06b..5c7ce595d0 100644 --- a/source/qdk_package/qdk/_qsharp.py +++ b/source/qdk_package/qdk/_qsharp.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. +import warnings from . import telemetry_events, code from ._native import ( # type: ignore Interpreter, @@ -1056,6 +1057,13 @@ def estimate( ipython_helper() + warnings.warn( + "This version of QRE is deprecated and will be removed in a future release. " + "Please use the new version of QRE in qdk.qre. Refer to aka.ms/qdk.QREv3 for more information.", + DeprecationWarning, + stacklevel=2, + ) + def _coerce_estimator_params( params: Optional[ Union[Dict[str, Any], List[Dict[str, Any]], EstimatorParams] diff --git a/source/qdk_package/qdk/_simulation.py b/source/qdk_package/qdk/_simulation.py index 3d2f8f7399..b20a2ef54b 100644 --- a/source/qdk_package/qdk/_simulation.py +++ b/source/qdk_package/qdk/_simulation.py @@ -9,8 +9,10 @@ QirInstructionId, QirInstruction, run_clifford, + run_clifford_adaptive, run_parallel_shots, run_adaptive_parallel_shots, + run_cpu_adaptive, run_cpu_full_state, NoiseConfig, GpuContext, @@ -25,7 +27,12 @@ ) from ._qsharp import QirInputData, Result from typing import TYPE_CHECKING -from ._adaptive_pass import AdaptiveProfilePass, OP_RECORD_OUTPUT +from ._adaptive_pass import ( + AdaptiveProfilePass, + AdaptiveProgram, + Bytecode, + OP_RECORD_OUTPUT, +) if TYPE_CHECKING: # This is in the pyi file only from ._native import GpuShotResults @@ -485,61 +492,86 @@ def is_adaptive(mod: pyqir.Module) -> bool: return func_attrs["qir_profiles"].string_value == "adaptive_profile" -def run_qir_clifford( - input: Union[QirInputData, str, bytes], - shots: Optional[int] = 1, - noise: Optional[NoiseConfig] = None, - seed: Optional[int] = None, -) -> List: - (mod, shots, noise, seed) = preprocess_simulation_input(input, shots, noise, seed) +def str_to_result(result: str): + match result: + case "0": + return Result.Zero + case "1": + return Result.One + case "L": + return Result.Loss + case _: + raise ValueError(f"Invalid result {result}") + + +def run_base( + rust_run_base_fn: Callable, + mod: pyqir.Module, + shots: int, + noise: Optional[NoiseConfig], + seed: int, +): + """ + Runs a base profile program given a rust simulator. Adds output recording logic. + """ if noise is None: (gates, num_qubits, num_results) = AggregateGatesPass().run(mod) else: (gates, num_qubits, num_results) = CorrelatedNoisePass(noise).run(mod) recorder = OutputRecordingPass() recorder.run(mod) - return list( map( recorder.process_output, - run_clifford(gates, num_qubits, num_results, shots, noise, seed), + rust_run_base_fn(gates, num_qubits, num_results, shots, noise, seed), ) ) -def run_qir_cpu( +def run_adaptive( + rust_run_adaptive_fn: Callable, + mod: pyqir.Module, + program: AdaptiveProgram, + shots: int, + noise: Optional[NoiseConfig], + seed: int, +): + """ + Runs an adaptive profile program given a rust simulator. Adds output recording logic. + """ + results = rust_run_adaptive_fn(program.as_dict(), shots, noise, seed) + recorder = OutputRecordingPass() + recorder.run(mod) + return list(map(recorder.process_output, results)) + + +def run_qir_clifford( input: Union[QirInputData, str, bytes], shots: Optional[int] = 1, noise: Optional[NoiseConfig] = None, seed: Optional[int] = None, ) -> List: (mod, shots, noise, seed) = preprocess_simulation_input(input, shots, noise, seed) - DecomposeCcxPass().run(mod) - if noise is None: - (gates, num_qubits, num_results) = AggregateGatesPass().run(mod) + if is_adaptive(mod): + program = AdaptiveProfilePass(Bytecode.Bit64).run(mod, noise) + return run_adaptive(run_clifford_adaptive, mod, program, shots, noise, seed) else: - (gates, num_qubits, num_results) = CorrelatedNoisePass(noise).run(mod) - recorder = OutputRecordingPass() - recorder.run(mod) - - return list( - map( - recorder.process_output, - run_cpu_full_state(gates, num_qubits, num_results, shots, noise, seed), - ) - ) + return run_base(run_clifford, mod, shots, noise, seed) -def str_to_result(result: str): - match result: - case "0": - return Result.Zero - case "1": - return Result.One - case "L": - return Result.Loss - case _: - raise ValueError(f"Invalid result {result}") +def run_qir_cpu( + input: Union[QirInputData, str, bytes], + shots: Optional[int] = 1, + noise: Optional[NoiseConfig] = None, + seed: Optional[int] = None, +) -> List: + (mod, shots, noise, seed) = preprocess_simulation_input(input, shots, noise, seed) + DecomposeCcxPass().run(mod) + if is_adaptive(mod): + program = AdaptiveProfilePass(Bytecode.Bit64).run(mod, noise) + return run_adaptive(run_cpu_adaptive, mod, program, shots, noise, seed) + else: + return run_base(run_cpu_full_state, mod, shots, noise, seed) def run_qir_gpu( @@ -552,34 +584,12 @@ def run_qir_gpu( # Ccx is not support in the GPU simulator, decompose it DecomposeCcxPass().run(mod) if is_adaptive(mod): - program = AdaptiveProfilePass().run(mod, noise) - results = run_adaptive_parallel_shots(program.as_dict(), shots, noise, seed) - - # Extract recorded output result indices from the bytecode. - # OP_RECORD_OUTPUT with aux1=0 is result_record_output where - # src0 is the result index in the results buffer. - recorded_result_indices = [] - for ins in program.instructions: - if (ins.opcode & 0xFF) == OP_RECORD_OUTPUT and ins.aux1 == 0: - recorded_result_indices.append(ins.src0) - # Filter shot_results to only include recorded output indices - filtered = [] - for s in results: - filtered.append([str_to_result(s[i]) for i in recorded_result_indices]) - return filtered - else: - if noise is None: - (gates, num_qubits, num_results) = AggregateGatesPass().run(mod) - else: - (gates, num_qubits, num_results) = CorrelatedNoisePass(noise).run(mod) - recorder = OutputRecordingPass() - recorder.run(mod) - return list( - map( - recorder.process_output, - run_parallel_shots(gates, shots, num_qubits, num_results, noise, seed), - ) + program = AdaptiveProfilePass(Bytecode.Bit32).run(mod, noise) + return run_adaptive( + run_adaptive_parallel_shots, mod, program, shots, noise, seed ) + else: + return run_base(run_parallel_shots, mod, shots, noise, seed) def prepare_qir_with_correlated_noise( @@ -609,7 +619,7 @@ class GpuSimulator: def __init__(self): self.gpu_context = GpuContext() self._is_adaptive = False - self._recorded_result_indices = [] + self._recorder = None self.tables = None def load_noise_tables( @@ -647,17 +657,15 @@ def set_program(self, input: Union[QirInputData, str, bytes]): noise_intrinsics = None if self.tables is not None: noise_intrinsics = {name: table_id for table_id, name, _ in self.tables} - program = AdaptiveProfilePass().run(mod, noise_intrinsics=noise_intrinsics) + program = AdaptiveProfilePass(Bytecode.Bit32).run( + mod, noise_intrinsics=noise_intrinsics + ) self.gpu_context.set_adaptive_program(program.as_dict()) - - # Extract recorded output result indices from the bytecode. - # OP_RECORD_OUTPUT with aux1=0 is result_record_output where - # src0 is the result index in the results buffer. - self._recorded_result_indices = [] - for instr in program.instructions: - if instr.opcode & 0xFF == OP_RECORD_OUTPUT and instr.aux1 == 0: - self._recorded_result_indices.append(instr.src0) + # This is used later for output recording + self._recorder = OutputRecordingPass() + self._recorder.run(mod) else: + self._is_adaptive = False (self.gates, self.required_num_qubits, self.required_num_results) = ( prepare_qir_with_correlated_noise( input, self.tables if not self.tables is None else [] @@ -675,13 +683,19 @@ def run_shots(self, shots: int, seed: Optional[int] = None) -> "GpuShotResults": seed = seed if seed is not None else random.randint(0, 2**32 - 1) if self._is_adaptive: results = self.gpu_context.run_adaptive_shots(shots, seed=seed) - # Filter shot_results to only include recorded output indices - if self._recorded_result_indices: - indices = self._recorded_result_indices - filtered = [] - for s in results["shot_results"]: - filtered.append("".join(s[i] for i in indices)) - results["shot_results"] = filtered + for i, (shot_ret_code, shot_result) in enumerate( + zip(results["shot_result_codes"], results["shot_results"]) + ): + if shot_ret_code == 0: + # If the ret_code was zero, we do an output recording pass + # on the output. + results["shot_results"][i] = self._recorder.process_output( + shot_result + ) + else: + # If the shot finished with a ret_code other than zero, + # we set the result to `None`. + results["shot_results"][i] = None return results return self.gpu_context.run_shots(shots, seed=seed) diff --git a/source/qdk_package/qdk/noisy_simulator/__init__.py b/source/qdk_package/qdk/noisy_simulator/__init__.py index c150ec9a45..af73d8f073 100644 --- a/source/qdk_package/qdk/noisy_simulator/__init__.py +++ b/source/qdk_package/qdk/noisy_simulator/__init__.py @@ -7,6 +7,8 @@ Instrument, DensityMatrixSimulator, StateVectorSimulator, + DensityMatrix, + StateVector, ) __all__ = [ @@ -15,4 +17,6 @@ "Instrument", "DensityMatrixSimulator", "StateVectorSimulator", + "DensityMatrix", + "StateVector", ] diff --git a/source/qdk_package/qdk/qiskit/__init__.py b/source/qdk_package/qdk/qiskit/__init__.py index 8a422db319..b32db63a72 100644 --- a/source/qdk_package/qdk/qiskit/__init__.py +++ b/source/qdk_package/qdk/qiskit/__init__.py @@ -48,6 +48,7 @@ print(result.results[0].data.counts) # accepted shots only print(result.results[0].data.raw_counts) # includes loss shots """ + from typing import Any, Dict, List, Optional, Union from ..estimator import EstimatorParams, EstimatorResult @@ -105,4 +106,5 @@ def estimate( "estimate", "EstimatorParams", "EstimatorResult", + "QasmError", ] diff --git a/source/qdk_package/tests/test_adaptive_cpu_bytecode.py b/source/qdk_package/tests/test_adaptive_cpu_bytecode.py index ca7995ec9e..eb40059f27 100644 --- a/source/qdk_package/tests/test_adaptive_cpu_bytecode.py +++ b/source/qdk_package/tests/test_adaptive_cpu_bytecode.py @@ -16,10 +16,10 @@ from collections import Counter import pytest from qdk._simulation import run_qir, NoiseConfig, Result +import qdk import qdk.openqasm from typing import Literal - # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- @@ -1704,10 +1704,10 @@ def _run_openqasm( sim_type: Literal["clifford", "cpu"] = "cpu", ): """Compile OpenQASM source via the adaptive pass and run on the given simulator.""" - qir = qsharp.openqasm.compile( + qir = qdk.openqasm.compile( qasm_src, - output_semantics=qsharp.openqasm.OutputSemantics.OpenQasm, - target_profile=qsharp.TargetProfile.Adaptive_RIF, + output_semantics=qdk.openqasm.OutputSemantics.OpenQasm, + target_profile=qdk.TargetProfile.Adaptive_RIF, ) results = run_qir(qir, shots, seed=seed, type=sim_type) return [map_result_list_to_str(r) for r in results] diff --git a/source/qdk_package/tests/test_adaptive_cpu_noise.py b/source/qdk_package/tests/test_adaptive_cpu_noise.py index b61a39d661..327942b6b4 100644 --- a/source/qdk_package/tests/test_adaptive_cpu_noise.py +++ b/source/qdk_package/tests/test_adaptive_cpu_noise.py @@ -14,10 +14,10 @@ from typing import Optional, List import pytest from qdk._simulation import run_qir, NoiseConfig, Result +import qdk import qdk.openqasm from typing import Literal - # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- @@ -231,10 +231,10 @@ def test_probabilistic_x_noise(sim_type): bit[3] res = measure qs; """ -QIR_WITH_CORRELATED_NOISE = qsharp.openqasm.compile( +QIR_WITH_CORRELATED_NOISE = qdk.openqasm.compile( QASM_WITH_CORRELATED_NOISE, - output_semantics=qsharp.openqasm.OutputSemantics.OpenQasm, - target_profile=qsharp.TargetProfile.Adaptive_RIF, + output_semantics=qdk.openqasm.OutputSemantics.OpenQasm, + target_profile=qdk.TargetProfile.Adaptive_RIF, ) @@ -335,10 +335,10 @@ def test_noise_intrinsics_with_registers_noisy(sim_type): bit res = measure q; """ -QIR_NOISE_1Q = qsharp.openqasm.compile( +QIR_NOISE_1Q = qdk.openqasm.compile( QASM_NOISE_1Q, - output_semantics=qsharp.openqasm.OutputSemantics.OpenQasm, - target_profile=qsharp.TargetProfile.Adaptive_RIF, + output_semantics=qdk.openqasm.OutputSemantics.OpenQasm, + target_profile=qdk.TargetProfile.Adaptive_RIF, ) @@ -364,10 +364,10 @@ def test_noise_intrinsic_1q_x_flip(sim_type): bit[2] res = measure qs; """ -QIR_NOISE_2Q = qsharp.openqasm.compile( +QIR_NOISE_2Q = qdk.openqasm.compile( QASM_NOISE_2Q, - output_semantics=qsharp.openqasm.OutputSemantics.OpenQasm, - target_profile=qsharp.TargetProfile.Adaptive_RIF, + output_semantics=qdk.openqasm.OutputSemantics.OpenQasm, + target_profile=qdk.TargetProfile.Adaptive_RIF, ) @@ -395,10 +395,10 @@ def test_noise_intrinsic_2q_xx_flip(sim_type): bit[5] res = measure qs; """ -QIR_NOISE_5Q = qsharp.openqasm.compile( +QIR_NOISE_5Q = qdk.openqasm.compile( QASM_NOISE_5Q, - output_semantics=qsharp.openqasm.OutputSemantics.OpenQasm, - target_profile=qsharp.TargetProfile.Adaptive_RIF, + output_semantics=qdk.openqasm.OutputSemantics.OpenQasm, + target_profile=qdk.TargetProfile.Adaptive_RIF, ) diff --git a/source/qdk_package/tests/test_adaptive_pass.py b/source/qdk_package/tests/test_adaptive_pass.py index 70ecdc152b..f4d744657e 100644 --- a/source/qdk_package/tests/test_adaptive_pass.py +++ b/source/qdk_package/tests/test_adaptive_pass.py @@ -15,7 +15,6 @@ from qdk._adaptive_pass import AdaptiveProfilePass, AdaptiveProgram, Bytecode from qdk._adaptive_bytecode import * - # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- From 9a730922fa8a270370a525807bac6beb3f84840d Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Fri, 1 May 2026 12:40:29 -0700 Subject: [PATCH 11/25] update notebook --- samples/qre/2_analysing_results.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/qre/2_analysing_results.ipynb b/samples/qre/2_analysing_results.ipynb index ca9cdf72be..da2c2a2d47 100644 --- a/samples/qre/2_analysing_results.ipynb +++ b/samples/qre/2_analysing_results.ipynb @@ -192,7 +192,7 @@ } ], "source": [ - "from qsharp.code import EstimateAdder\n", + "from qdk.code import EstimateAdder\n", "\n", "app = QSharpApplication(EstimateAdder)\n", "arch = GateBased(error_rate=1e-4, gate_time=100, measurement_time=500)\n", From c1af9b05b4509cb6cc2cb0fccd1d515365f8d9e7 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Fri, 1 May 2026 13:30:43 -0700 Subject: [PATCH 12/25] update test notebooks --- .../test-no-lang-metadata.ipynb | 93 +++++----- .../test-notebook-profile.ipynb | 2 +- .../test-workspace/test.ipynb | 166 +++++++++--------- 3 files changed, 132 insertions(+), 129 deletions(-) diff --git a/source/vscode/test/suites/language-service/test-workspace/test-no-lang-metadata.ipynb b/source/vscode/test/suites/language-service/test-workspace/test-no-lang-metadata.ipynb index 440964f30a..ef387b0346 100644 --- a/source/vscode/test/suites/language-service/test-workspace/test-no-lang-metadata.ipynb +++ b/source/vscode/test/suites/language-service/test-workspace/test-no-lang-metadata.ipynb @@ -1,49 +1,52 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "1e8e4faa", - "metadata": {}, - "outputs": [], - "source": [ - "import qsharp\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1b55e53c", - "metadata": { - "scrolled": false - }, - "outputs": [], - "source": [ - "%%qsharp\n", - "\n", - "Test()\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.4" + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "1e8e4faa", + "metadata": {}, + "outputs": [], + "source": [ + "from qdk import qsharp\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1b55e53c", + "metadata": { + "scrolled": false, + "vscode": { + "languageId": "qsharp" } + }, + "outputs": [], + "source": [ + "%%qsharp\n", + "\n", + "Test()\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" }, - "nbformat": 4, - "nbformat_minor": 5 + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 } diff --git a/source/vscode/test/suites/language-service/test-workspace/test-notebook-profile.ipynb b/source/vscode/test/suites/language-service/test-workspace/test-notebook-profile.ipynb index 652b9b3e64..681a0c244f 100644 --- a/source/vscode/test/suites/language-service/test-workspace/test-notebook-profile.ipynb +++ b/source/vscode/test/suites/language-service/test-workspace/test-notebook-profile.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "import qsharp\n" + "from qdk import qsharp\n" ] }, { diff --git a/source/vscode/test/suites/language-service/test-workspace/test.ipynb b/source/vscode/test/suites/language-service/test-workspace/test.ipynb index 5a9ca84f31..16fc186f17 100644 --- a/source/vscode/test/suites/language-service/test-workspace/test.ipynb +++ b/source/vscode/test/suites/language-service/test-workspace/test.ipynb @@ -1,88 +1,88 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "1e8e4faa", - "metadata": {}, - "outputs": [], - "source": [ - "import qsharp\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9df62352", - "metadata": { - "vscode": { - "languageId": "qsharp" - } - }, - "outputs": [], - "source": [ - "%%qsharp\n", - "\n", - "operation Test() : Unit {\n", - " let foo = \"hello!\";\n", - " Message(foo);\n", - "}\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1b55e53c", - "metadata": { - "scrolled": false, - "vscode": { - "languageId": "qsharp" - } - }, - "outputs": [], - "source": [ - "%%qsharp\n", - "\n", - "Test()\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cc18584a", - "metadata": { - "vscode": { - "languageId": "qsharp" - } - }, - "outputs": [], - "source": [ - "%%qsharp\n", - "\n", - "operation BadSyntax() {\n", - "}\n" - ] + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "1e8e4faa", + "metadata": {}, + "outputs": [], + "source": [ + "from qdk import qsharp\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9df62352", + "metadata": { + "vscode": { + "languageId": "qsharp" + } + }, + "outputs": [], + "source": [ + "%%qsharp\n", + "\n", + "operation Test() : Unit {\n", + " let foo = \"hello!\";\n", + " Message(foo);\n", + "}\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1b55e53c", + "metadata": { + "scrolled": false, + "vscode": { + "languageId": "qsharp" } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.4" + }, + "outputs": [], + "source": [ + "%%qsharp\n", + "\n", + "Test()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cc18584a", + "metadata": { + "vscode": { + "languageId": "qsharp" } + }, + "outputs": [], + "source": [ + "%%qsharp\n", + "\n", + "operation BadSyntax() {\n", + "}\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" }, - "nbformat": 4, - "nbformat_minor": 5 + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 } From dd745c16afad4283acdcac07f857793c5340421b Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Fri, 1 May 2026 13:33:21 -0700 Subject: [PATCH 13/25] updated codeowners to use the new paths --- .github/CODEOWNERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5edd1862eb..c6e627a2de 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -19,7 +19,8 @@ /library @swernli @orpuente-MS /source/npm @billti @minestarks @ScottCarda-MS /source/pip @billti @idavis @minestarks -/source/pip/qsharp/qre @msoeken @brad-lackey @jwhogabo +/source/qdk_package @billti @idavis @minestarks +/source/qdk_package/qdk/qre @msoeken @brad-lackey @jwhogabo /source/playground @billti @minestarks /source/qre @msoeken @brad-lackey @jwhogabo /source/resource_estimator @billti @swernli From 052c29fcfe8305d29077eaf50bab553e9b0fbbe3 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Fri, 1 May 2026 13:38:22 -0700 Subject: [PATCH 14/25] update copilot instructions --- .github/copilot-instructions.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 7d0098bc05..ae42a474bc 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -10,7 +10,7 @@ All internal source code for the compiler and related tooling has been moved und Most of the core components are implemented in Rust. These components are packaged in two ways: -1. Compiled as a native Python module and packaged into the `qsharp` Python package +1. Compiled as a native Python module and packaged into the `qdk` Python package 2. Compiled into WebAssembly and packaged into the `qsharp-lang` npm package ## Repo Contents @@ -52,7 +52,8 @@ Most of the core components are implemented in Rust. These components are packag **Python** -- **pip/**: The `qsharp` Python package +- **qdk_package/**: The `qdk` Python package (core package with native Rust extension) +- **pip/**: The `qsharp` Python package (thin deprecation shim that re-exports from `qdk`) - **jupyterlab/**: JupyterLab extension for Q# - **widgets/**: Q# Jupyter widgets @@ -82,7 +83,8 @@ Most of the core components are implemented in Rust. These components are packag - `./build.py` runs full CI checks, including lints and unit tests. - `./build.py --wasm --npm --vscode` only builds the VS Code extension, including its dependencies the WASM module and the `qsharp-lang` npm package. -- `./build.py --pip` only builds the `qsharp` Python package, including its native dependencies. +- `./build.py --qdk` only builds the `qdk` Python package, including its native dependencies. +- `./build.py --pip` only builds the `qsharp` shim package (requires `qdk` to be built first). - Pass `--no-check` to `./build.py`, in combination with any other command line options, to skip the lints and formatting checks. - When working in Rust parts of the codebase, using `cargo` commands is usually more efficient than building via `./build.py`. - Many lints can be auto-fixed via `cargo clippy --fix`. From a298cb8a6a3b4c2a75274dc8d8d8c7a3a37d89a1 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Mon, 4 May 2026 11:28:05 -0700 Subject: [PATCH 15/25] reorganize internals --- source/pip/qsharp/__init__.py | 26 +- source/pip/qsharp/_qsharp.py | 5 +- source/pip/qsharp/_simulation.py | 4 +- source/pip/qsharp/noisy_simulator/__init__.py | 12 +- source/pip/qsharp/utils/__init__.py | 8 +- source/qdk_package/README.md | 21 +- source/qdk_package/qdk/__init__.py | 16 +- .../qdk_package/qdk/_device/_atom/__init__.py | 4 +- source/qdk_package/qdk/_device/_device.py | 2 +- .../qdk/{_qsharp.py => _interpreter.py} | 695 +++++++----------- source/qdk_package/qdk/_ipython.py | 2 +- source/qdk_package/qdk/_types.py | 337 +++++++++ source/qdk_package/qdk/cirq/_neutral_atom.py | 10 +- source/qdk_package/qdk/cirq/_result.py | 3 +- .../qdk/noisy_simulator/__init__.py | 22 - source/qdk_package/qdk/openqasm/_circuit.py | 5 +- source/qdk_package/qdk/openqasm/_compile.py | 6 +- source/qdk_package/qdk/openqasm/_estimate.py | 2 +- source/qdk_package/qdk/openqasm/_import.py | 2 +- source/qdk_package/qdk/openqasm/_run.py | 7 +- source/qdk_package/qdk/qiskit/__init__.py | 4 +- .../qiskit/backends/neutral_atom_backend.py | 2 +- source/qdk_package/qdk/qre/interop/_qir.py | 2 +- source/qdk_package/qdk/qre/interop/_qsharp.py | 2 +- source/qdk_package/qdk/qsharp.py | 6 +- .../{simulation.py => simulation/__init__.py} | 28 +- .../_noisy_simulator.py | 0 .../_noisy_simulator.pyi | 0 .../qdk/{ => simulation}/_simulation.py | 27 +- source/qdk_package/qdk/utils/__init__.py | 8 - source/qdk_package/qdk/utils/_utils.py | 50 -- .../qdk_package/tests-integration/conftest.py | 6 +- .../interop_cirq/test_neutral_atom.py | 2 +- .../interop_qiskit/test_neutral_atom.py | 2 +- source/qdk_package/tests/conftest.py | 8 +- .../tests/test_adaptive_cpu_bytecode.py | 3 +- .../tests/test_adaptive_cpu_noise.py | 6 +- .../tests/test_adaptive_cpu_quantum_ops.py | 3 +- .../tests/test_adaptive_gpu_bytecode.py | 2 +- .../tests/test_adaptive_gpu_noise.py | 3 +- .../tests/test_adaptive_gpu_quantum_ops.py | 2 +- .../tests/test_clifford_simulator.py | 9 +- .../tests/test_correlated_noise.py | 7 +- .../qdk_package/tests/test_cpu_simulator.py | 9 +- source/qdk_package/tests/test_enums.py | 1 - .../qdk_package/tests/test_gpu_simulator.py | 21 +- source/qdk_package/tests/test_interpreter.py | 52 +- source/qdk_package/tests/test_noisy_config.py | 2 +- .../qdk_package/tests/test_noisy_simulator.py | 3 +- source/qdk_package/tests/test_qsharp.py | 145 ++-- .../tests/test_simulators_gates_noiseless.py | 7 +- .../tests/test_simulators_gates_noisy.py | 5 +- .../tests/test_sparse_simulator.py | 2 +- 53 files changed, 863 insertions(+), 755 deletions(-) rename source/qdk_package/qdk/{_qsharp.py => _interpreter.py} (77%) create mode 100644 source/qdk_package/qdk/_types.py delete mode 100644 source/qdk_package/qdk/noisy_simulator/__init__.py rename source/qdk_package/qdk/{simulation.py => simulation/__init__.py} (54%) rename source/qdk_package/qdk/{noisy_simulator => simulation}/_noisy_simulator.py (100%) rename source/qdk_package/qdk/{noisy_simulator => simulation}/_noisy_simulator.pyi (100%) rename source/qdk_package/qdk/{ => simulation}/_simulation.py (96%) delete mode 100644 source/qdk_package/qdk/utils/__init__.py delete mode 100644 source/qdk_package/qdk/utils/_utils.py diff --git a/source/pip/qsharp/__init__.py b/source/pip/qsharp/__init__.py index 7fc2ecfb87..b07108b4f7 100644 --- a/source/pip/qsharp/__init__.py +++ b/source/pip/qsharp/__init__.py @@ -22,7 +22,15 @@ ) # Re-export the full public API from qdk so that existing code keeps working. -from qdk._qsharp import ( +from qdk._types import ( + StateDump, + ShotResult, + PauliNoise, + DepolarizingNoise, + BitFlipNoise, + PhaseFlipNoise, +) +from qdk._interpreter import ( init, eval, run, @@ -34,16 +42,16 @@ set_classical_seed, dump_machine, dump_circuit, - StateDump, - ShotResult, - PauliNoise, - DepolarizingNoise, - BitFlipNoise, - PhaseFlipNoise, - CircuitGenerationMethod, ) -from qdk._native import Result, Pauli, QSharpError, TargetProfile, estimate_custom +from qdk._native import ( + Result, + Pauli, + QSharpError, + TargetProfile, + estimate_custom, + CircuitGenerationMethod, +) from qdk import telemetry_events diff --git a/source/pip/qsharp/_qsharp.py b/source/pip/qsharp/_qsharp.py index 2210eb7486..70255ad810 100644 --- a/source/pip/qsharp/_qsharp.py +++ b/source/pip/qsharp/_qsharp.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -# Deprecated: use qdk._qsharp instead. -from qdk._qsharp import * # noqa: F401,F403 +# Deprecated: use qdk._types and qdk._interpreter instead. +from qdk._types import * # noqa: F401,F403 +from qdk._interpreter import * # noqa: F401,F403 diff --git a/source/pip/qsharp/_simulation.py b/source/pip/qsharp/_simulation.py index f024496ccd..48ce6e9a41 100644 --- a/source/pip/qsharp/_simulation.py +++ b/source/pip/qsharp/_simulation.py @@ -1,5 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -# Deprecated: use qdk._simulation instead. -from qdk._simulation import * # noqa: F401,F403 +# Deprecated: use qdk.simulation instead. +from qdk.simulation._simulation import * # noqa: F401,F403 diff --git a/source/pip/qsharp/noisy_simulator/__init__.py b/source/pip/qsharp/noisy_simulator/__init__.py index ab8c6901e7..61805a2899 100644 --- a/source/pip/qsharp/noisy_simulator/__init__.py +++ b/source/pip/qsharp/noisy_simulator/__init__.py @@ -1,5 +1,13 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -# Deprecated: use qdk.noisy_simulator instead. -from qdk.noisy_simulator import * # noqa: F401,F403 +# Deprecated: use qdk.simulation instead. +from qdk.simulation import ( # noqa: F401 + NoisySimulatorError, + Operation, + Instrument, + DensityMatrixSimulator, + StateVectorSimulator, + DensityMatrix, + StateVector, +) diff --git a/source/pip/qsharp/utils/__init__.py b/source/pip/qsharp/utils/__init__.py index 2eb60148d5..618a84d247 100644 --- a/source/pip/qsharp/utils/__init__.py +++ b/source/pip/qsharp/utils/__init__.py @@ -1,5 +1,9 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -# Deprecated: use qdk.utils instead. -from qdk.utils import * # noqa: F401,F403 +# Deprecated: use qdk.qsharp.dump_operation instead. +from qdk.qsharp import dump_operation # noqa: F401 + +__all__ = [ + "dump_operation", +] diff --git a/source/qdk_package/README.md b/source/qdk_package/README.md index 66535a03f2..59f13ad9b1 100644 --- a/source/qdk_package/README.md +++ b/source/qdk_package/README.md @@ -73,6 +73,7 @@ Submodules: - `qdk.qsharp` – exports the same APIs as the `qsharp` Python package - `qdk.openqasm` – exports the same APIs as the `openqasm` submodule of the `qsharp` Python package. - `qdk.estimator` – exports the same APIs as the `estimator` submodule of the `qsharp` Python package. +- `qdk.simulation` – noise-aware simulation utilities: `NeutralAtomDevice`, `NoiseConfig`, `run_qir`, `DensityMatrixSimulator`, `StateVectorSimulator`, and related types. - `qdk.widgets` – exports the Jupyter widgets available from the `qsharp-widgets` Python package (requires the `qdk[jupyter]` extra to be installed). - `qdk.azure` – exports the Python APIs available from the `azure-quantum` Python package (requires the `qdk[azure]` extra to be installed). - `qdk.qiskit` – exports the same APIs as the `interop.qiskit` submodule of the `qsharp` Python package (requires the `qdk[qiskit]` extra to be installed). @@ -122,8 +123,8 @@ qdk_package/ │ │ │ │── # ——— Moved from pip/qsharp/ (implementation modules) ——— │ ├── _native.pyd/.so # Built by maturin (module-name = "qdk._native") -│ ├── _qsharp.py # Core interpreter -│ ├── _simulation.py # QIR simulation +│ ├── _types.py # Pure Python types (PauliNoise, StateDump, etc.) +│ ├── _interpreter.py # Interpreter lifecycle & operations │ ├── _ipython.py # %%qsharp cell magic │ ├── _http.py # fetch_github() │ ├── _fs.py # File system callbacks @@ -141,11 +142,6 @@ qdk_package/ │ ├── openqasm/ # Direct module — no re-export shim needed │ │ └── __init__.py │ │ -│ ├── utils/ -│ │ └── __init__.py # dump_operation -│ │ -│ ├── noisy_simulator/ -│ │ └── __init__.py │ │ │ ├── qiskit/ # Lifted out of interop/ │ │ ├── __init__.py # QSharpBackend, NeutralAtomBackend, etc. @@ -182,9 +178,14 @@ qdk_package/ │ │ ├── models/__init__.py │ │ └── geometry/__init__.py │ │ -│ │── # ——— Remaining re-export modules (to revisit later) ——— -│ ├── qsharp.py # Re-exports full qsharp-like API from qdk._qsharp -│ ├── simulation.py # Re-exports NeutralAtomDevice, NoiseConfig +│ │── # ——— Re-export / facade modules ——— +│ ├── qsharp.py # Re-exports full qsharp-like API from _types + _interpreter +│ │ +│ ├── simulation/ # Simulation facade package +│ │ ├── __init__.py # Public API: NeutralAtomDevice, NoiseConfig, run_qir, etc. +│ │ ├── _simulation.py # QIR simulation implementation (internal) +│ │ ├── _noisy_simulator.py # Private wrapper for noisy simulator types +│ │ └── _noisy_simulator.pyi # Type stubs │ │ │ │── # ——— Unchanged ——— │ ├── widgets.py # from qsharp_widgets import * (external) diff --git a/source/qdk_package/qdk/__init__.py b/source/qdk_package/qdk/__init__.py index 67674a3649..895af6b56a 100644 --- a/source/qdk_package/qdk/__init__.py +++ b/source/qdk_package/qdk/__init__.py @@ -15,20 +15,14 @@ """ - from .telemetry_events import on_qdk_import on_qdk_import() # Some common utilities are lifted to the qdk root. from . import code -from ._qsharp import ( - set_quantum_seed, - set_classical_seed, - dump_machine, - init, - Result, - TargetProfile, +from ._native import Result, TargetProfile +from ._types import ( StateDump, ShotResult, PauliNoise, @@ -36,6 +30,12 @@ BitFlipNoise, PhaseFlipNoise, ) +from ._interpreter import ( + set_quantum_seed, + set_classical_seed, + dump_machine, + init, +) # utilities lifted from qsharp __all__ = [ diff --git a/source/qdk_package/qdk/_device/_atom/__init__.py b/source/qdk_package/qdk/_device/_atom/__init__.py index f58a7ab77f..7951fa27ff 100644 --- a/source/qdk_package/qdk/_device/_atom/__init__.py +++ b/source/qdk_package/qdk/_device/_atom/__init__.py @@ -2,9 +2,9 @@ # Licensed under the MIT License. from .._device import Device, Zone, ZoneType -from ..._simulation import NoiseConfig, run_qir_clifford, run_qir_cpu, run_qir_gpu +from ...simulation._simulation import NoiseConfig, run_qir_clifford, run_qir_cpu, run_qir_gpu from ..._native import try_create_gpu_adapter -from ..._qsharp import QirInputData +from ..._types import QirInputData from ... import telemetry_events from typing import List, Literal, Optional diff --git a/source/qdk_package/qdk/_device/_device.py b/source/qdk_package/qdk/_device/_device.py index 991dc46b24..e0030fa18a 100644 --- a/source/qdk_package/qdk/_device/_device.py +++ b/source/qdk_package/qdk/_device/_device.py @@ -2,7 +2,7 @@ # Licensed under the MIT License. from enum import Enum -from .._qsharp import QirInputData +from .._types import QirInputData class ZoneType(Enum): diff --git a/source/qdk_package/qdk/_qsharp.py b/source/qdk_package/qdk/_interpreter.py similarity index 77% rename from source/qdk_package/qdk/_qsharp.py rename to source/qdk_package/qdk/_interpreter.py index 5c7ce595d0..4f3f19ecca 100644 --- a/source/qdk_package/qdk/_qsharp.py +++ b/source/qdk_package/qdk/_interpreter.py @@ -1,6 +1,20 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. +"""Q# interpreter lifecycle and core operations. + +This module manages the singleton Q# interpreter instance and exposes the +public functions that drive it: :func:`init`, :func:`eval`, :func:`run`, +:func:`compile`, :func:`circuit`, :func:`estimate`, :func:`logical_counts`, +:func:`set_quantum_seed`, :func:`set_classical_seed`, :func:`dump_machine`, +and :func:`dump_circuit`. + +Internal helpers such as :func:`get_interpreter`, :func:`ipython_helper`, +:func:`python_args_to_interpreter_args`, and +:func:`qsharp_value_to_python_value` are also defined here for use by other +submodules. +""" + import warnings from . import telemetry_events, code from ._native import ( # type: ignore @@ -26,12 +40,11 @@ Any, Callable, Dict, + List, Optional, + Set, Tuple, - TypedDict, Union, - List, - Set, Iterable, cast, ) @@ -40,14 +53,26 @@ EstimatorParams, LogicalCounts, ) +from ._types import ( + PauliNoise, + DepolarizingNoise, + BitFlipNoise, + PhaseFlipNoise, + StateDump, + ShotResult, + Config, + QirInputData, +) import json -import os import sys import types -from pathlib import Path from time import monotonic from dataclasses import make_dataclass +# --------------------------------------------------------------------------- +# Value conversion helpers +# --------------------------------------------------------------------------- + def lower_python_obj(obj: object, visited: Optional[Set[object]] = None) -> Any: if visited is None: @@ -117,6 +142,89 @@ def python_args_to_interpreter_args(args): return lower_python_obj(args) +def qsharp_value_to_python_value(obj): + # Base case: Primitive types + if isinstance(obj, (bool, int, float, complex, str, Pauli, Result)): + return obj + + # Recursive case: Tuple + if isinstance(obj, tuple): + # Special case Value::UNIT maps to None. + if not obj: + return None + return tuple(qsharp_value_to_python_value(elt) for elt in obj) + + # Recursive case: Array + if isinstance(obj, list): + return [qsharp_value_to_python_value(elt) for elt in obj] + + # Recursive case: Callable or Closure + if isinstance(obj, (GlobalCallable, Closure)): + return obj + + # Recursive case: Udt + if isinstance(obj, UdtValue): + class_name = obj.name + fields = [] + args = [] + for name, value_ir in obj.fields: + val = qsharp_value_to_python_value(value_ir) + ty = type(val) + args.append(val) + fields.append((name, ty)) + return make_dataclass(class_name, fields)(*args) + + +def make_class_rec(qsharp_type: TypeIR) -> type: + class_name = qsharp_type.unwrap_udt().name + fields = {} + for field in qsharp_type.unwrap_udt().fields: + ty = None + kind = field[1].kind() + + if kind == TypeKind.Primitive: + prim_kind = field[1].unwrap_primitive() + if prim_kind == PrimitiveKind.Bool: + ty = bool + elif prim_kind == PrimitiveKind.Int: + ty = int + elif prim_kind == PrimitiveKind.Double: + ty = float + elif prim_kind == PrimitiveKind.Complex: + ty = complex + elif prim_kind == PrimitiveKind.String: + ty = str + elif prim_kind == PrimitiveKind.Pauli: + ty = Pauli + elif prim_kind == PrimitiveKind.Result: + ty = Result + else: + raise QSharpError(f"unknown primitive {prim_kind}") + elif kind == TypeKind.Tuple: + # Special case Value::UNIT maps to None. + if not field[1].unwrap_tuple(): + ty = type(None) + else: + ty = tuple + elif kind == TypeKind.Array: + ty = list + elif kind == TypeKind.Udt: + ty = make_class_rec(field[1]) + else: + raise QSharpError(f"unknown type {kind}") + fields[field[0]] = ty + + return make_dataclass( + class_name, + fields, + ) + + +# --------------------------------------------------------------------------- +# Interpreter singleton +# --------------------------------------------------------------------------- + + _interpreter: Union["Interpreter", None] = None _config: Union["Config", None] = None @@ -143,151 +251,6 @@ def ipython_helper(): pass -class Config: - """ - Configuration hints for the language service. - """ - - _config: Dict[str, Any] - - def __init__( - self, - target_profile: TargetProfile, - language_features: Optional[List[str]], - manifest: Optional[str], - project_root: Optional[str], - ): - if target_profile == TargetProfile.Adaptive_RI: - self._config = {"targetProfile": "adaptive_ri"} - elif target_profile == TargetProfile.Adaptive_RIF: - self._config = {"targetProfile": "adaptive_rif"} - elif target_profile == TargetProfile.Adaptive_RIFLA: - self._config = {"targetProfile": "adaptive_rifla"} - elif target_profile == TargetProfile.Base: - self._config = {"targetProfile": "base"} - elif target_profile == TargetProfile.Unrestricted: - self._config = {"targetProfile": "unrestricted"} - - if language_features is not None: - self._config["languageFeatures"] = language_features - if manifest is not None: - self._config["manifest"] = manifest - if project_root: - # For now, we only support local project roots, so use a file schema in the URI. - # In the future, we may support other schemes, such as github, if/when - # we have VS Code Web + Jupyter support. - self._config["projectRoot"] = Path(os.getcwd(), project_root).as_uri() - - def __repr__(self) -> str: - return "Q# initialized with configuration: " + str(self._config) - - # See https://ipython.readthedocs.io/en/stable/config/integrating.html#rich-display - # See https://ipython.org/ipython-doc/3/notebook/nbformat.html#display-data - # This returns a custom MIME-type representation of the Q# configuration. - # This data will be available in the cell output, but will not be displayed - # to the user, as frontends would not know how to render the custom MIME type. - # Editor services that interact with the notebook frontend - # (i.e. the language service) can read and interpret the data. - def _repr_mimebundle_( - self, include: Union[Any, None] = None, exclude: Union[Any, None] = None - ) -> Dict[str, Dict[str, Any]]: - return {"application/x.qsharp-config": self._config} - - def get_target_profile(self) -> str: - """ - Returns the target profile as a string, or "unspecified" if not set. - """ - return self._config.get("targetProfile", "unspecified") - - -class PauliNoise(Tuple[float, float, float]): - """ - The Pauli noise to use in simulation represented - as probabilities of Pauli-X, Pauli-Y, and Pauli-Z errors - """ - - def __new__(cls, x: float, y: float, z: float): - """ - Creates a new :class:`PauliNoise` instance with the given error probabilities. - - :param x: Probability of a Pauli-X (bit flip) error. Must be non-negative. - :type x: float - :param y: Probability of a Pauli-Y error. Must be non-negative. - :type y: float - :param z: Probability of a Pauli-Z (phase flip) error. Must be non-negative. - :type z: float - :return: A new :class:`PauliNoise` tuple ``(x, y, z)``. - :rtype: PauliNoise - :raises ValueError: If any probability is negative or if ``x + y + z > 1``. - """ - if x < 0 or y < 0 or z < 0: - raise ValueError("Pauli noise probabilities must be non-negative.") - if x + y + z > 1: - raise ValueError("The sum of Pauli noise probabilities must be at most 1.") - return super().__new__(cls, (x, y, z)) - - -class DepolarizingNoise(PauliNoise): - """ - The depolarizing noise to use in simulation. - """ - - def __new__(cls, p: float): - """ - Creates a new :class:`DepolarizingNoise` instance. - - The depolarizing channel applies Pauli-X, Pauli-Y, or Pauli-Z errors each with - probability ``p / 3``. - - :param p: Total depolarizing error probability. Must satisfy ``0 ≤ p ≤ 1``. - :type p: float - :return: A new :class:`DepolarizingNoise` with equal X, Y, and Z error probabilities. - :rtype: DepolarizingNoise - :raises ValueError: If ``p`` is negative or ``p > 1``. - """ - return super().__new__(cls, p / 3, p / 3, p / 3) - - -class BitFlipNoise(PauliNoise): - """ - The bit flip noise to use in simulation. - """ - - def __new__(cls, p: float): - """ - Creates a new :class:`BitFlipNoise` instance. - - The bit flip channel applies a Pauli-X error with probability ``p``. - - :param p: Probability of a bit flip (Pauli-X) error. Must satisfy ``0 ≤ p ≤ 1``. - :type p: float - :return: A new :class:`BitFlipNoise` with X error probability ``p``. - :rtype: BitFlipNoise - :raises ValueError: If ``p`` is negative or ``p > 1``. - """ - return super().__new__(cls, p, 0, 0) - - -class PhaseFlipNoise(PauliNoise): - """ - The phase flip noise to use in simulation. - """ - - def __new__(cls, p: float): - """ - Creates a new :class:`PhaseFlipNoise` instance. - - The phase flip channel applies a Pauli-Z error with probability ``p``. - - :param p: Probability of a phase flip (Pauli-Z) error. Must satisfy ``0 ≤ p ≤ 1``. - :type p: float - :return: A new :class:`PhaseFlipNoise` with Z error probability ``p``. - :rtype: PhaseFlipNoise - :raises ValueError: If ``p`` is negative or ``p > 1``. - """ - return super().__new__(cls, 0, 0, p) - - def init( *, target_profile: TargetProfile = TargetProfile.Unrestricted, @@ -352,7 +315,7 @@ def init( ) try: - (_, manifest_contents) = read_file(qsharp_json) + _, manifest_contents = read_file(qsharp_json) except Exception as e: raise QSharpError( f"Error reading {qsharp_json}. qsharp.json should exist at the project root and be a valid JSON file." @@ -426,165 +389,9 @@ def get_config() -> Config: return _config -class StateDump: - """ - A state dump returned from the Q# interpreter. - """ - - """ - The number of allocated qubits at the time of the dump. - """ - qubit_count: int - - __inner: dict - __data: StateDumpData - - def __init__(self, data: StateDumpData): - self.__data = data - self.__inner = data.get_dict() - self.qubit_count = data.qubit_count - - def __getitem__(self, index: int) -> complex: - return self.__inner.__getitem__(index) - - def __iter__(self): - return self.__inner.__iter__() - - def __len__(self) -> int: - return len(self.__inner) - - def __repr__(self) -> str: - return self.__data.__repr__() - - def __str__(self) -> str: - return self.__data.__str__() - - def _repr_markdown_(self) -> str: - return self.__data._repr_markdown_() - - def check_eq( - self, state: Union[Dict[int, complex], List[complex]], tolerance: float = 1e-10 - ) -> bool: - """ - Checks if the state dump is equal to the given state. This is not mathematical equality, - as the check ignores global phase. - - :param state: The state to check against, provided either as a dictionary of state indices to complex amplitudes, - or as a list of real amplitudes. - :param tolerance: The tolerance for the check. Defaults to 1e-10. - :return: ``True`` if the state dump is equal to the given state within the given tolerance, ignoring global phase. - :rtype: bool - """ - phase = None - # Convert a dense list of real amplitudes to a dictionary of state indices to complex amplitudes - if isinstance(state, list): - state = {i: val for i, val in enumerate(state)} - # Filter out zero states from the state dump and the given state based on tolerance - state = {k: v for k, v in state.items() if abs(v) > tolerance} - inner_state = {k: v for k, v in self.__inner.items() if abs(v) > tolerance} - if len(state) != len(inner_state): - return False - for key in state: - if key not in inner_state: - return False - if phase is None: - # Calculate the phase based on the first state pair encountered. - # Every pair of states after this must have the same phase for the states to be equivalent. - phase = inner_state[key] / state[key] - elif abs(phase - inner_state[key] / state[key]) > tolerance: - # This pair of states does not have the same phase, - # within tolerance, so the equivalence check fails. - return False - return True - - def as_dense_state(self) -> List[complex]: - """ - Returns the state dump as a dense list of complex amplitudes. This will include zero amplitudes. - - :return: A dense list of complex amplitudes, one per computational basis state. - :rtype: List[complex] - """ - return [self.__inner.get(i, complex(0)) for i in range(2**self.qubit_count)] - - -class ShotResult(TypedDict): - """ - A single result of a shot. - """ - - events: List[Output | StateDump | str] - result: Any - messages: List[str] - matrices: List[Output] - dumps: List[StateDump] - - -def eval( - source: str, - *, - save_events: bool = False, -) -> Any: - """ - Evaluates Q# source code. - - Output is printed to console. - - :param source: The Q# source code to evaluate. - :keyword save_events: If true, all output will be saved and returned. If false, they will be printed. - :return: The value returned by the last statement in the source code, or the saved output if ``save_events`` is true. - :rtype: Any - :raises QSharpError: If there is an error evaluating the source code. - """ - ipython_helper() - - results: ShotResult = { - "events": [], - "result": None, - "messages": [], - "matrices": [], - "dumps": [], - } - - def on_save_events(output: Output) -> None: - # Append the output to the last shot's output list - if output.is_matrix(): - results["events"].append(output) - results["matrices"].append(output) - elif output.is_state_dump(): - dump_data = cast(StateDumpData, output.state_dump()) - state_dump = StateDump(dump_data) - results["events"].append(state_dump) - results["dumps"].append(state_dump) - elif output.is_message(): - stringified = str(output) - results["events"].append(stringified) - results["messages"].append(stringified) - - def callback(output: Output) -> None: - if _in_jupyter: - try: - display(output) - return - except: - # If IPython is not available, fall back to printing the output - pass - print(output, flush=True) - - telemetry_events.on_eval() - start_time = monotonic() - - output = get_interpreter().interpret( - source, on_save_events if save_events else callback - ) - results["result"] = qsharp_value_to_python_value(output) - - durationMs = (monotonic() - start_time) * 1000 - telemetry_events.on_eval_end(durationMs) - - if save_events: - return results - else: - return results["result"] +# --------------------------------------------------------------------------- +# Callable / class factory helpers (used by native code) +# --------------------------------------------------------------------------- # Helper function that knows how to create a function that invokes a callable. This will be @@ -648,84 +455,6 @@ def callback(output: Output) -> None: module.__setattr__(callable_name, _callable) -def qsharp_value_to_python_value(obj): - # Base case: Primitive types - if isinstance(obj, (bool, int, float, complex, str, Pauli, Result)): - return obj - - # Recursive case: Tuple - if isinstance(obj, tuple): - # Special case Value::UNIT maps to None. - if not obj: - return None - return tuple(qsharp_value_to_python_value(elt) for elt in obj) - - # Recursive case: Array - if isinstance(obj, list): - return [qsharp_value_to_python_value(elt) for elt in obj] - - # Recursive case: Callable or Closure - if isinstance(obj, (GlobalCallable, Closure)): - return obj - - # Recursive case: Udt - if isinstance(obj, UdtValue): - class_name = obj.name - fields = [] - args = [] - for name, value_ir in obj.fields: - val = qsharp_value_to_python_value(value_ir) - ty = type(val) - args.append(val) - fields.append((name, ty)) - return make_dataclass(class_name, fields)(*args) - - -def make_class_rec(qsharp_type: TypeIR) -> type: - class_name = qsharp_type.unwrap_udt().name - fields = {} - for field in qsharp_type.unwrap_udt().fields: - ty = None - kind = field[1].kind() - - if kind == TypeKind.Primitive: - prim_kind = field[1].unwrap_primitive() - if prim_kind == PrimitiveKind.Bool: - ty = bool - elif prim_kind == PrimitiveKind.Int: - ty = int - elif prim_kind == PrimitiveKind.Double: - ty = float - elif prim_kind == PrimitiveKind.Complex: - ty = complex - elif prim_kind == PrimitiveKind.String: - ty = str - elif prim_kind == PrimitiveKind.Pauli: - ty = Pauli - elif prim_kind == PrimitiveKind.Result: - ty = Result - else: - raise QSharpError(f"unknown primitive {prim_kind}") - elif kind == TypeKind.Tuple: - # Special case Value::UNIT maps to None. - if not field[1].unwrap_tuple(): - ty = type(None) - else: - ty = tuple - elif kind == TypeKind.Array: - ty = list - elif kind == TypeKind.Udt: - ty = make_class_rec(field[1]) - else: - raise QSharpError(f"unknown type {kind}") - fields[field[0]] = ty - - return make_dataclass( - class_name, - fields, - ) - - def _make_class(qsharp_type: TypeIR, namespace: List[str], class_name: str): """ Helper function to create a python class given a description of it. This will be @@ -761,6 +490,79 @@ def _make_class(qsharp_type: TypeIR, namespace: List[str], class_name: str): module.__setattr__(class_name, QSharpClass) +# --------------------------------------------------------------------------- +# Public API functions +# --------------------------------------------------------------------------- + + +def eval( + source: str, + *, + save_events: bool = False, +) -> Any: + """ + Evaluates Q# source code. + + Output is printed to console. + + :param source: The Q# source code to evaluate. + :keyword save_events: If true, all output will be saved and returned. If false, they will be printed. + :return: The value returned by the last statement in the source code, or the saved output if ``save_events`` is true. + :rtype: Any + :raises QSharpError: If there is an error evaluating the source code. + """ + ipython_helper() + + results: ShotResult = { + "events": [], + "result": None, + "messages": [], + "matrices": [], + "dumps": [], + } + + def on_save_events(output: Output) -> None: + # Append the output to the last shot's output list + if output.is_matrix(): + results["events"].append(output) + results["matrices"].append(output) + elif output.is_state_dump(): + dump_data = cast(StateDumpData, output.state_dump()) + state_dump = StateDump(dump_data) + results["events"].append(state_dump) + results["dumps"].append(state_dump) + elif output.is_message(): + stringified = str(output) + results["events"].append(stringified) + results["messages"].append(stringified) + + def callback(output: Output) -> None: + if _in_jupyter: + try: + display(output) + return + except: + # If IPython is not available, fall back to printing the output + pass + print(output, flush=True) + + telemetry_events.on_eval() + start_time = monotonic() + + output = get_interpreter().interpret( + source, on_save_events if save_events else callback + ) + results["result"] = qsharp_value_to_python_value(output) + + durationMs = (monotonic() - start_time) * 1000 + telemetry_events.on_eval_end(durationMs) + + if save_events: + return results + else: + return results["result"] + + def run( entry_expr: Union[str, Callable, GlobalCallable, Closure], shots: int, @@ -890,30 +692,6 @@ def on_save_events(output: Output) -> None: return [shot["result"] for shot in results] -# Class that wraps generated QIR, which can be used by -# azure-quantum as input data. -# -# This class must implement the QirRepresentable protocol -# that is defined by the azure-quantum package. -# See: https://github.com/microsoft/qdk-python/blob/fcd63c04aa871e49206703bbaa792329ffed13c4/azure-quantum/azure/quantum/target/target.py#L21 -class QirInputData: - # The name of this variable is defined - # by the protocol and must remain unchanged. - _name: str - - def __init__(self, name: str, ll_str: str): - self._name = name - self._ll_str = ll_str - - # The name of this method is defined - # by the protocol and must remain unchanged. - def _repr_qir_(self, **kwargs) -> bytes: - return self._ll_str.encode("utf-8") - - def __str__(self) -> str: - return self._ll_str - - def compile( entry_expr: Union[str, Callable, GlobalCallable, Closure], *args ) -> QirInputData: @@ -1187,3 +965,96 @@ def dump_circuit() -> Circuit: """ ipython_helper() return get_interpreter().dump_circuit() + + +def dump_operation(operation: str, num_qubits: int) -> List[List[complex]]: + """ + Returns a square matrix of complex numbers representing the operation performed. + + :param operation: The operation to be performed, which must operate on a list of qubits. + :param num_qubits: The number of qubits to be used. + + :return: The matrix representing the operation. + :rtype: List[List[complex]] + """ + import math + + code_str = f"""{{\n let op = {operation};\n use (targets, extra) = (Qubit[{num_qubits}], Qubit[{num_qubits}]);\n for i in 0..{num_qubits}-1 {{\n H(targets[i]);\n CNOT(targets[i], extra[i]);\n }}\n operation ApplyOp (op : (Qubit[] => Unit), targets : Qubit[]) : Unit {{ op(targets); }}\n ApplyOp(op, targets);\n Microsoft.Quantum.Diagnostics.DumpMachine();\n ResetAll(targets + extra);\n }}""" + result = run(code_str, shots=1, save_events=True)[0] + state = result["events"][-1].state_dump().get_dict() + num_entries = pow(2, num_qubits) + factor = math.sqrt(num_entries) + ndigits = 6 + matrix = [] + for i in range(num_entries): + matrix += [[]] + for j in range(num_entries): + entry = state.get(i * num_entries + j) + if entry is None: + matrix[i] += [complex(0, 0)] + else: + matrix[i] += [ + complex( + round(factor * entry.real, ndigits), + round(factor * entry.imag, ndigits), + ) + ] + return matrix + + +# --------------------------------------------------------------------------- +# __all__ +# --------------------------------------------------------------------------- + +__all__ = [ + # Types (re-exported from _types for convenience) + "PauliNoise", + "DepolarizingNoise", + "BitFlipNoise", + "PhaseFlipNoise", + "StateDump", + "ShotResult", + "Config", + "QirInputData", + # Native types re-exported + "Interpreter", + "TargetProfile", + "QSharpError", + "Output", + "Circuit", + "GlobalCallable", + "Closure", + "Pauli", + "Result", + "CircuitConfig", + "CircuitGenerationMethod", + "NoiseConfig", + "StateDumpData", + # Estimator types + "EstimatorResult", + "EstimatorParams", + "LogicalCounts", + # Interpreter lifecycle + "init", + "get_interpreter", + "get_config", + # Core operations + "eval", + "run", + "compile", + "circuit", + "estimate", + "logical_counts", + # Seed / state + "set_quantum_seed", + "set_classical_seed", + "dump_machine", + "dump_circuit", + "dump_operation", + # Helpers (used by other submodules) + "ipython_helper", + "python_args_to_interpreter_args", + "qsharp_value_to_python_value", + "lower_python_obj", + "make_class_rec", +] diff --git a/source/qdk_package/qdk/_ipython.py b/source/qdk_package/qdk/_ipython.py index 041148e4da..d2140aa2e6 100644 --- a/source/qdk_package/qdk/_ipython.py +++ b/source/qdk_package/qdk/_ipython.py @@ -12,7 +12,7 @@ from IPython.display import display, clear_output from IPython.core.magic import register_cell_magic from ._native import QSharpError -from ._qsharp import get_interpreter, qsharp_value_to_python_value +from ._interpreter import get_interpreter, qsharp_value_to_python_value from . import telemetry_events diff --git a/source/qdk_package/qdk/_types.py b/source/qdk_package/qdk/_types.py new file mode 100644 index 0000000000..6ed7b01710 --- /dev/null +++ b/source/qdk_package/qdk/_types.py @@ -0,0 +1,337 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +"""Core type definitions for the qdk package. + +This module contains the pure-Python types that are used across the qdk +package. They have no dependency on the interpreter lifecycle and can be +imported freely by any submodule. + +Types defined here: + +- :class:`PauliNoise`, :class:`DepolarizingNoise`, :class:`BitFlipNoise`, + :class:`PhaseFlipNoise` — noise models for simulation. +- :class:`StateDump` — sparse state-vector snapshot. +- :class:`ShotResult` — per-shot output container. +- :class:`Config` — interpreter configuration / language-service hint. +- :class:`QirInputData` — compiled QIR wrapper for azure-quantum submission. +""" + +import os +from pathlib import Path +from typing import ( + Any, + Dict, + List, + Optional, + TypedDict, + Union, +) + +from ._native import ( # type: ignore + Output, + StateDumpData, + TargetProfile, +) + +# --------------------------------------------------------------------------- +# Noise models +# --------------------------------------------------------------------------- + + +class PauliNoise(tuple): + """ + The Pauli noise to use in simulation represented + as probabilities of Pauli-X, Pauli-Y, and Pauli-Z errors + """ + + def __new__(cls, x: float, y: float, z: float): + """ + Creates a new :class:`PauliNoise` instance with the given error probabilities. + + :param x: Probability of a Pauli-X (bit flip) error. Must be non-negative. + :type x: float + :param y: Probability of a Pauli-Y error. Must be non-negative. + :type y: float + :param z: Probability of a Pauli-Z (phase flip) error. Must be non-negative. + :type z: float + :return: A new :class:`PauliNoise` tuple ``(x, y, z)``. + :rtype: PauliNoise + :raises ValueError: If any probability is negative or if ``x + y + z > 1``. + """ + if x < 0 or y < 0 or z < 0: + raise ValueError("Pauli noise probabilities must be non-negative.") + if x + y + z > 1: + raise ValueError("The sum of Pauli noise probabilities must be at most 1.") + return super().__new__(cls, (x, y, z)) + + +class DepolarizingNoise(PauliNoise): + """ + The depolarizing noise to use in simulation. + """ + + def __new__(cls, p: float): + """ + Creates a new :class:`DepolarizingNoise` instance. + + The depolarizing channel applies Pauli-X, Pauli-Y, or Pauli-Z errors each with + probability ``p / 3``. + + :param p: Total depolarizing error probability. Must satisfy ``0 ≤ p ≤ 1``. + :type p: float + :return: A new :class:`DepolarizingNoise` with equal X, Y, and Z error probabilities. + :rtype: DepolarizingNoise + :raises ValueError: If ``p`` is negative or ``p > 1``. + """ + return super().__new__(cls, p / 3, p / 3, p / 3) + + +class BitFlipNoise(PauliNoise): + """ + The bit flip noise to use in simulation. + """ + + def __new__(cls, p: float): + """ + Creates a new :class:`BitFlipNoise` instance. + + The bit flip channel applies a Pauli-X error with probability ``p``. + + :param p: Probability of a bit flip (Pauli-X) error. Must satisfy ``0 ≤ p ≤ 1``. + :type p: float + :return: A new :class:`BitFlipNoise` with X error probability ``p``. + :rtype: BitFlipNoise + :raises ValueError: If ``p`` is negative or ``p > 1``. + """ + return super().__new__(cls, p, 0, 0) + + +class PhaseFlipNoise(PauliNoise): + """ + The phase flip noise to use in simulation. + """ + + def __new__(cls, p: float): + """ + Creates a new :class:`PhaseFlipNoise` instance. + + The phase flip channel applies a Pauli-Z error with probability ``p``. + + :param p: Probability of a phase flip (Pauli-Z) error. Must satisfy ``0 ≤ p ≤ 1``. + :type p: float + :return: A new :class:`PhaseFlipNoise` with Z error probability ``p``. + :rtype: PhaseFlipNoise + :raises ValueError: If ``p`` is negative or ``p > 1``. + """ + return super().__new__(cls, 0, 0, p) + + +# --------------------------------------------------------------------------- +# State dump +# --------------------------------------------------------------------------- + + +class StateDump: + """ + A state dump returned from the Q# interpreter. + """ + + """ + The number of allocated qubits at the time of the dump. + """ + qubit_count: int + + __inner: dict + __data: StateDumpData + + def __init__(self, data: StateDumpData): + self.__data = data + self.__inner = data.get_dict() + self.qubit_count = data.qubit_count + + def __getitem__(self, index: int) -> complex: + return self.__inner.__getitem__(index) + + def __iter__(self): + return self.__inner.__iter__() + + def __len__(self) -> int: + return len(self.__inner) + + def __repr__(self) -> str: + return self.__data.__repr__() + + def __str__(self) -> str: + return self.__data.__str__() + + def _repr_markdown_(self) -> str: + return self.__data._repr_markdown_() + + def check_eq( + self, state: Union[Dict[int, complex], List[complex]], tolerance: float = 1e-10 + ) -> bool: + """ + Checks if the state dump is equal to the given state. This is not mathematical equality, + as the check ignores global phase. + + :param state: The state to check against, provided either as a dictionary of state indices to complex amplitudes, + or as a list of real amplitudes. + :param tolerance: The tolerance for the check. Defaults to 1e-10. + :return: ``True`` if the state dump is equal to the given state within the given tolerance, ignoring global phase. + :rtype: bool + """ + phase = None + # Convert a dense list of real amplitudes to a dictionary of state indices to complex amplitudes + if isinstance(state, list): + state = {i: val for i, val in enumerate(state)} + # Filter out zero states from the state dump and the given state based on tolerance + state = {k: v for k, v in state.items() if abs(v) > tolerance} + inner_state = {k: v for k, v in self.__inner.items() if abs(v) > tolerance} + if len(state) != len(inner_state): + return False + for key in state: + if key not in inner_state: + return False + if phase is None: + # Calculate the phase based on the first state pair encountered. + # Every pair of states after this must have the same phase for the states to be equivalent. + phase = inner_state[key] / state[key] + elif abs(phase - inner_state[key] / state[key]) > tolerance: + # This pair of states does not have the same phase, + # within tolerance, so the equivalence check fails. + return False + return True + + def as_dense_state(self) -> List[complex]: + """ + Returns the state dump as a dense list of complex amplitudes. This will include zero amplitudes. + + :return: A dense list of complex amplitudes, one per computational basis state. + :rtype: List[complex] + """ + return [self.__inner.get(i, complex(0)) for i in range(2**self.qubit_count)] + + +# --------------------------------------------------------------------------- +# Shot result +# --------------------------------------------------------------------------- + + +class ShotResult(TypedDict): + """ + A single result of a shot. + """ + + events: List[Output | StateDump | str] + result: Any + messages: List[str] + matrices: List[Output] + dumps: List[StateDump] + + +# --------------------------------------------------------------------------- +# Interpreter configuration +# --------------------------------------------------------------------------- + + +class Config: + """ + Configuration hints for the language service. + """ + + _config: Dict[str, Any] + + def __init__( + self, + target_profile: TargetProfile, + language_features: Optional[List[str]], + manifest: Optional[str], + project_root: Optional[str], + ): + if target_profile == TargetProfile.Adaptive_RI: + self._config = {"targetProfile": "adaptive_ri"} + elif target_profile == TargetProfile.Adaptive_RIF: + self._config = {"targetProfile": "adaptive_rif"} + elif target_profile == TargetProfile.Adaptive_RIFLA: + self._config = {"targetProfile": "adaptive_rifla"} + elif target_profile == TargetProfile.Base: + self._config = {"targetProfile": "base"} + elif target_profile == TargetProfile.Unrestricted: + self._config = {"targetProfile": "unrestricted"} + + if language_features is not None: + self._config["languageFeatures"] = language_features + if manifest is not None: + self._config["manifest"] = manifest + if project_root: + # For now, we only support local project roots, so use a file schema in the URI. + # In the future, we may support other schemes, such as github, if/when + # we have VS Code Web + Jupyter support. + self._config["projectRoot"] = Path(os.getcwd(), project_root).as_uri() + + def __repr__(self) -> str: + return "Q# initialized with configuration: " + str(self._config) + + # See https://ipython.readthedocs.io/en/stable/config/integrating.html#rich-display + # See https://ipython.org/ipython-doc/3/notebook/nbformat.html#display-data + # This returns a custom MIME-type representation of the Q# configuration. + # This data will be available in the cell output, but will not be displayed + # to the user, as frontends would not know how to render the custom MIME type. + # Editor services that interact with the notebook frontend + # (i.e. the language service) can read and interpret the data. + def _repr_mimebundle_( + self, include: Union[Any, None] = None, exclude: Union[Any, None] = None + ) -> Dict[str, Dict[str, Any]]: + return {"application/x.qsharp-config": self._config} + + def get_target_profile(self) -> str: + """ + Returns the target profile as a string, or "unspecified" if not set. + """ + return self._config.get("targetProfile", "unspecified") + + +# --------------------------------------------------------------------------- +# QIR input data +# --------------------------------------------------------------------------- + + +# Class that wraps generated QIR, which can be used by +# azure-quantum as input data. +# +# This class must implement the QirRepresentable protocol +# that is defined by the azure-quantum package. +# See: https://github.com/microsoft/qdk-python/blob/fcd63c04aa871e49206703bbaa792329ffed13c4/azure-quantum/azure/quantum/target/target.py#L21 +class QirInputData: + # The name of this variable is defined + # by the protocol and must remain unchanged. + _name: str + + def __init__(self, name: str, ll_str: str): + self._name = name + self._ll_str = ll_str + + # The name of this method is defined + # by the protocol and must remain unchanged. + def _repr_qir_(self, **kwargs) -> bytes: + return self._ll_str.encode("utf-8") + + def __str__(self) -> str: + return self._ll_str + + +# --------------------------------------------------------------------------- +# __all__ +# --------------------------------------------------------------------------- + +__all__ = [ + "PauliNoise", + "DepolarizingNoise", + "BitFlipNoise", + "PhaseFlipNoise", + "StateDump", + "ShotResult", + "Config", + "QirInputData", +] diff --git a/source/qdk_package/qdk/cirq/_neutral_atom.py b/source/qdk_package/qdk/cirq/_neutral_atom.py index 4ae6e154af..9384415689 100644 --- a/source/qdk_package/qdk/cirq/_neutral_atom.py +++ b/source/qdk_package/qdk/cirq/_neutral_atom.py @@ -12,7 +12,7 @@ from ._result import NeutralAtomCirqResult, measurement_dict, to_cirq_result if TYPE_CHECKING: - from .._simulation import NoiseConfig + from ..simulation import NoiseConfig from .._device._atom import NeutralAtomDevice @@ -33,7 +33,7 @@ class NeutralAtomSampler(cirq.Sampler): import cirq from qdk.cirq import NeutralAtomSampler - from qdk._simulation import NoiseConfig + from qdk.simulation import NoiseConfig q0, q1 = cirq.LineQubit.range(2) circuit = cirq.Circuit([ @@ -54,7 +54,7 @@ class NeutralAtomSampler(cirq.Sampler): result = sampler.run(circuit, repetitions=1000) print(f"Accepted: {len(result.measurements['m'])} / {len(result.raw_shots)}") - :keyword noise: Optional :class:`~qsharp._simulation.NoiseConfig` describing + :keyword noise: Optional :class:`~qdk.simulation.NoiseConfig` describing per-gate noise. The device decomposes gates to the native set ``{Rz, SX, CZ, MResetZ}``; configure noise on those native gates. For example, a Cirq ``X`` gate arriving via QASM 2.0 is decomposed @@ -69,7 +69,7 @@ class NeutralAtomSampler(cirq.Sampler): :kwtype simulator_type: str :keyword seed: Optional integer seed for reproducibility. Defaults to ``None``. :kwtype seed: int - :keyword device: An existing :class:`~qsharp._device._atom.NeutralAtomDevice` + :keyword device: An existing :class:`~qdk._device._atom.NeutralAtomDevice` instance to reuse across calls. A default-configured device is created lazily on the first call when not provided. :kwtype device: NeutralAtomDevice @@ -130,7 +130,7 @@ def _run_once( from .._native import compile_qasm_program_to_qir from .._fs import read_file, list_directory, resolve from .._http import fetch_github - from .._qsharp import TargetProfile + from .._native import TargetProfile # Resolve parameters resolved_circuit = cirq.resolve_parameters(circuit, param_resolver) diff --git a/source/qdk_package/qdk/cirq/_result.py b/source/qdk_package/qdk/cirq/_result.py index 98e37ea822..5e780062af 100644 --- a/source/qdk_package/qdk/cirq/_result.py +++ b/source/qdk_package/qdk/cirq/_result.py @@ -12,7 +12,6 @@ import cirq import numpy as np - # --------------------------------------------------------------------------- # Result type # --------------------------------------------------------------------------- @@ -156,7 +155,7 @@ def _qir_display_to_bitstring(obj: Any) -> str: """ # Handle qsharp.Result enum values produced by the local simulator. try: - from qdk._qsharp import Result as _Result + from qdk._native import Result as _Result if obj == _Result.One: return "1" diff --git a/source/qdk_package/qdk/noisy_simulator/__init__.py b/source/qdk_package/qdk/noisy_simulator/__init__.py deleted file mode 100644 index af73d8f073..0000000000 --- a/source/qdk_package/qdk/noisy_simulator/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from ._noisy_simulator import ( - NoisySimulatorError, - Operation, - Instrument, - DensityMatrixSimulator, - StateVectorSimulator, - DensityMatrix, - StateVector, -) - -__all__ = [ - "NoisySimulatorError", - "Operation", - "Instrument", - "DensityMatrixSimulator", - "StateVectorSimulator", - "DensityMatrix", - "StateVector", -] diff --git a/source/qdk_package/qdk/openqasm/_circuit.py b/source/qdk_package/qdk/openqasm/_circuit.py index eaed78cba8..8719c03a51 100644 --- a/source/qdk_package/qdk/openqasm/_circuit.py +++ b/source/qdk_package/qdk/openqasm/_circuit.py @@ -6,13 +6,12 @@ from .._fs import read_file, list_directory, resolve from .._http import fetch_github from .._native import circuit_qasm_program # type: ignore -from .._qsharp import ( +from .._interpreter import ( get_interpreter, ipython_helper, - Circuit, - CircuitConfig, python_args_to_interpreter_args, ) +from .._native import Circuit, CircuitConfig from .. import telemetry_events diff --git a/source/qdk_package/qdk/openqasm/_compile.py b/source/qdk_package/qdk/openqasm/_compile.py index 8f34963eb1..7a514ec3e5 100644 --- a/source/qdk_package/qdk/openqasm/_compile.py +++ b/source/qdk_package/qdk/openqasm/_compile.py @@ -9,13 +9,13 @@ from .._native import ( # type: ignore compile_qasm_program_to_qir, ) -from .._qsharp import ( - QirInputData, +from .._types import QirInputData +from .._interpreter import ( get_interpreter, ipython_helper, - TargetProfile, python_args_to_interpreter_args, ) +from .._native import TargetProfile from .. import telemetry_events diff --git a/source/qdk_package/qdk/openqasm/_estimate.py b/source/qdk_package/qdk/openqasm/_estimate.py index 7534562600..047eef5688 100644 --- a/source/qdk_package/qdk/openqasm/_estimate.py +++ b/source/qdk_package/qdk/openqasm/_estimate.py @@ -12,7 +12,7 @@ ) from ..estimator import EstimatorParams, EstimatorResult -from .._qsharp import ( +from .._interpreter import ( get_interpreter, ipython_helper, python_args_to_interpreter_args, diff --git a/source/qdk_package/qdk/openqasm/_import.py b/source/qdk_package/qdk/openqasm/_import.py index e616ee0d39..d2e26c5616 100644 --- a/source/qdk_package/qdk/openqasm/_import.py +++ b/source/qdk_package/qdk/openqasm/_import.py @@ -7,7 +7,7 @@ from ._ipython import display_or_print from .._fs import read_file, list_directory, resolve from .._http import fetch_github -from .._qsharp import ( +from .._interpreter import ( get_interpreter, ipython_helper, ) diff --git a/source/qdk_package/qdk/openqasm/_run.py b/source/qdk_package/qdk/openqasm/_run.py index 1b82cb41ff..0401f0dad9 100644 --- a/source/qdk_package/qdk/openqasm/_run.py +++ b/source/qdk_package/qdk/openqasm/_run.py @@ -6,19 +6,20 @@ from .._fs import read_file, list_directory, resolve from .._http import fetch_github from .._native import QasmError, Output, run_qasm_program # type: ignore -from .._qsharp import ( +from .._types import ( BitFlipNoise, DepolarizingNoise, PauliNoise, PhaseFlipNoise, ShotResult, StateDump, - StateDumpData, +) +from .._interpreter import ( get_interpreter, ipython_helper, python_args_to_interpreter_args, - NoiseConfig, ) +from .._native import StateDumpData, NoiseConfig from .. import telemetry_events from ._ipython import display_or_print diff --git a/source/qdk_package/qdk/qiskit/__init__.py b/source/qdk_package/qdk/qiskit/__init__.py index b32db63a72..db8e28ae8f 100644 --- a/source/qdk_package/qdk/qiskit/__init__.py +++ b/source/qdk_package/qdk/qiskit/__init__.py @@ -32,7 +32,7 @@ from qiskit import QuantumCircuit from qsharp.interop.qiskit import NeutralAtomBackend - from qsharp._simulation import NoiseConfig + from qdk.simulation import NoiseConfig circuit = QuantumCircuit(2, 2) circuit.h(0) @@ -86,7 +86,7 @@ def estimate( :return: The estimated resources. :rtype: EstimatorResult """ - from .._qsharp import ipython_helper + from .._interpreter import ipython_helper ipython_helper() backend = ResourceEstimatorBackend() diff --git a/source/qdk_package/qdk/qiskit/backends/neutral_atom_backend.py b/source/qdk_package/qdk/qiskit/backends/neutral_atom_backend.py index 995615a4c7..77667b369f 100644 --- a/source/qdk_package/qdk/qiskit/backends/neutral_atom_backend.py +++ b/source/qdk_package/qdk/qiskit/backends/neutral_atom_backend.py @@ -50,7 +50,7 @@ class NeutralAtomBackend(BackendBase): from qiskit import QuantumCircuit from qsharp.interop.qiskit import NeutralAtomBackend - from qsharp._simulation import NoiseConfig + from qdk.simulation import NoiseConfig qc = QuantumCircuit(2) qc.h(0) diff --git a/source/qdk_package/qdk/qre/interop/_qir.py b/source/qdk_package/qdk/qre/interop/_qir.py index ebfb9559d1..dd6d50eff4 100644 --- a/source/qdk_package/qdk/qre/interop/_qir.py +++ b/source/qdk_package/qdk/qre/interop/_qir.py @@ -6,7 +6,7 @@ import pyqir from ..._native import QirInstructionId -from ..._simulation import AggregateGatesPass +from ...simulation._simulation import AggregateGatesPass from .. import instruction_ids as ids from .._qre import Trace diff --git a/source/qdk_package/qdk/qre/interop/_qsharp.py b/source/qdk_package/qdk/qre/interop/_qsharp.py index e8a8e8fe12..d760dcf66f 100644 --- a/source/qdk_package/qdk/qre/interop/_qsharp.py +++ b/source/qdk_package/qdk/qre/interop/_qsharp.py @@ -7,7 +7,7 @@ import time from typing import Callable, Optional -from ..._qsharp import logical_counts +from ..._interpreter import logical_counts from ...estimator import LogicalCounts from .._qre import Trace from ..instruction_ids import CCX, MEAS_Z, RZ, T, READ_FROM_MEMORY, WRITE_TO_MEMORY diff --git a/source/qdk_package/qdk/qsharp.py b/source/qdk_package/qdk/qsharp.py index 9d54036c6f..edf7953c76 100644 --- a/source/qdk_package/qdk/qsharp.py +++ b/source/qdk_package/qdk/qsharp.py @@ -12,10 +12,10 @@ - :func:`~qsharp.init`, :func:`~qsharp.eval`, :func:`~qsharp.run` — initialize and execute Q# code. - :class:`~qsharp.StateDump`, :class:`~qsharp.TargetProfile` — state inspection and compilation target. - :class:`~qsharp.PauliNoise`, :class:`~qsharp.DepolarizingNoise`, :class:`~qsharp.BitFlipNoise`, :class:`~qsharp.PhaseFlipNoise` — noise models. -- :func:`~qsharp.utils.dump_operation` — compute the unitary matrix of a Q# operation. +- :func:`~qdk.qsharp.dump_operation` — compute the unitary matrix of a Q# operation. For full API documentation see [qsharp](:mod:`qsharp`). """ -from ._qsharp import * # pyright: ignore[reportWildcardImportFromLibrary] -from .utils import dump_operation # pyright: ignore[reportUnusedImport] +from ._types import * # pyright: ignore[reportWildcardImportFromLibrary] +from ._interpreter import * # pyright: ignore[reportWildcardImportFromLibrary] diff --git a/source/qdk_package/qdk/simulation.py b/source/qdk_package/qdk/simulation/__init__.py similarity index 54% rename from source/qdk_package/qdk/simulation.py rename to source/qdk_package/qdk/simulation/__init__.py index 0c0fb1db70..8e5701332c 100644 --- a/source/qdk_package/qdk/simulation.py +++ b/source/qdk_package/qdk/simulation/__init__.py @@ -5,29 +5,30 @@ This module exposes the core building blocks for noise-aware quantum simulation: -- :class:`~qsharp._device._atom.NeutralAtomDevice` — models a +- :class:`~qdk.simulation.NeutralAtomDevice` — models a neutral atom quantum device with configurable zone layouts, qubit registers, and movement constraints. Used to compile and simulate circuits on a realistic hardware topology. -- :class:`~qsharp._simulation.NoiseConfig` — configures per-gate Pauli noise +- :class:`~qdk.simulation.NoiseConfig` — configures per-gate Pauli noise (including qubit loss) for use with the Q# simulator. Assign noise tables to individual gate intrinsics to model depolarizing, bit-flip, phase-flip, or correlated noise channels. -- :func:`~qsharp._simulation.run_qir` — simulates QIR as given in one of +- :func:`~qdk.simulation.run_qir` — simulates QIR as given in one of three backend simulators: clifford, gpu or cpu. -- :class:`~qsharp.noisy_simulator.DensityMatrixSimulator` — an experimental simulator that uses +- :class:`~qdk.simulation.DensityMatrixSimulator` — an experimental simulator that uses a density-matrix to track its state. -- :class:`~qsharp.noisy_simulator.StateVectorSimulator` — an experimental simulator that uses +- :class:`~qdk.simulation.StateVectorSimulator` — an experimental simulator that uses a state-vector to track its state. """ -from ._device._atom import NeutralAtomDevice +from .._device._atom import NeutralAtomDevice from ._simulation import NoiseConfig, run_qir -from .noisy_simulator import ( +from ._noisy_simulator import ( + NoisySimulatorError, DensityMatrixSimulator, StateVectorSimulator, DensityMatrix, @@ -35,3 +36,16 @@ Operation, Instrument, ) + +__all__ = [ + "NeutralAtomDevice", + "NoiseConfig", + "run_qir", + "NoisySimulatorError", + "Operation", + "Instrument", + "DensityMatrixSimulator", + "StateVectorSimulator", + "DensityMatrix", + "StateVector", +] diff --git a/source/qdk_package/qdk/noisy_simulator/_noisy_simulator.py b/source/qdk_package/qdk/simulation/_noisy_simulator.py similarity index 100% rename from source/qdk_package/qdk/noisy_simulator/_noisy_simulator.py rename to source/qdk_package/qdk/simulation/_noisy_simulator.py diff --git a/source/qdk_package/qdk/noisy_simulator/_noisy_simulator.pyi b/source/qdk_package/qdk/simulation/_noisy_simulator.pyi similarity index 100% rename from source/qdk_package/qdk/noisy_simulator/_noisy_simulator.pyi rename to source/qdk_package/qdk/simulation/_noisy_simulator.pyi diff --git a/source/qdk_package/qdk/_simulation.py b/source/qdk_package/qdk/simulation/_simulation.py similarity index 96% rename from source/qdk_package/qdk/_simulation.py rename to source/qdk_package/qdk/simulation/_simulation.py index b20a2ef54b..f8ae3f53f6 100644 --- a/source/qdk_package/qdk/_simulation.py +++ b/source/qdk_package/qdk/simulation/_simulation.py @@ -5,7 +5,7 @@ import random from typing import Callable, Literal, List, Optional, Tuple, TypeAlias, Union import pyqir -from ._native import ( +from .._native import ( QirInstructionId, QirInstruction, run_clifford, @@ -17,6 +17,7 @@ NoiseConfig, GpuContext, try_create_gpu_adapter, + Result, ) from pyqir import ( Function, @@ -25,9 +26,9 @@ Type, Linkage, ) -from ._qsharp import QirInputData, Result +from .._types import QirInputData from typing import TYPE_CHECKING -from ._adaptive_pass import ( +from .._adaptive_pass import ( AdaptiveProfilePass, AdaptiveProgram, Bytecode, @@ -35,7 +36,7 @@ ) if TYPE_CHECKING: # This is in the pyi file only - from ._native import GpuShotResults + from .._native import GpuShotResults class AggregateGatesPass(pyqir.QirModuleVisitor): @@ -515,9 +516,9 @@ def run_base( Runs a base profile program given a rust simulator. Adds output recording logic. """ if noise is None: - (gates, num_qubits, num_results) = AggregateGatesPass().run(mod) + gates, num_qubits, num_results = AggregateGatesPass().run(mod) else: - (gates, num_qubits, num_results) = CorrelatedNoisePass(noise).run(mod) + gates, num_qubits, num_results = CorrelatedNoisePass(noise).run(mod) recorder = OutputRecordingPass() recorder.run(mod) return list( @@ -551,7 +552,7 @@ def run_qir_clifford( noise: Optional[NoiseConfig] = None, seed: Optional[int] = None, ) -> List: - (mod, shots, noise, seed) = preprocess_simulation_input(input, shots, noise, seed) + mod, shots, noise, seed = preprocess_simulation_input(input, shots, noise, seed) if is_adaptive(mod): program = AdaptiveProfilePass(Bytecode.Bit64).run(mod, noise) return run_adaptive(run_clifford_adaptive, mod, program, shots, noise, seed) @@ -565,7 +566,7 @@ def run_qir_cpu( noise: Optional[NoiseConfig] = None, seed: Optional[int] = None, ) -> List: - (mod, shots, noise, seed) = preprocess_simulation_input(input, shots, noise, seed) + mod, shots, noise, seed = preprocess_simulation_input(input, shots, noise, seed) DecomposeCcxPass().run(mod) if is_adaptive(mod): program = AdaptiveProfilePass(Bytecode.Bit64).run(mod, noise) @@ -580,7 +581,7 @@ def run_qir_gpu( noise: Optional[NoiseConfig] = None, seed: Optional[int] = None, ) -> List: - (mod, shots, noise, seed) = preprocess_simulation_input(input, shots, noise, seed) + mod, shots, noise, seed = preprocess_simulation_input(input, shots, noise, seed) # Ccx is not support in the GPU simulator, decompose it DecomposeCcxPass().run(mod) if is_adaptive(mod): @@ -597,13 +598,13 @@ def prepare_qir_with_correlated_noise( noise_tables: List[Tuple[int, str, int]], ) -> Tuple[List[QirInstruction], int, int]: # Turn the input into a QIR module - (mod, _, _, _) = preprocess_simulation_input(input, None, None, None) + mod, _, _, _ = preprocess_simulation_input(input, None, None, None) # Ccx is not support in the GPU simulator, decompose it DecomposeCcxPass().run(mod) # Extract the gates including correlated noise instructions - (gates, required_num_qubits, required_num_results) = GpuCorrelatedNoisePass( + gates, required_num_qubits, required_num_results = GpuCorrelatedNoisePass( noise_tables ).run(mod) @@ -650,7 +651,7 @@ def set_program(self, input: Union[QirInputData, str, bytes]): without needing to create a new simulator instance or reloading noise tables. """ # Parse the QIR module to detect profile - (mod, _, _, _) = preprocess_simulation_input(input, None, None, None) + mod, _, _, _ = preprocess_simulation_input(input, None, None, None) if is_adaptive(mod): self._is_adaptive = True # Build noise_intrinsics dict from loaded noise tables (if any) @@ -666,7 +667,7 @@ def set_program(self, input: Union[QirInputData, str, bytes]): self._recorder.run(mod) else: self._is_adaptive = False - (self.gates, self.required_num_qubits, self.required_num_results) = ( + self.gates, self.required_num_qubits, self.required_num_results = ( prepare_qir_with_correlated_noise( input, self.tables if not self.tables is None else [] ) diff --git a/source/qdk_package/qdk/utils/__init__.py b/source/qdk_package/qdk/utils/__init__.py deleted file mode 100644 index 03d71482e7..0000000000 --- a/source/qdk_package/qdk/utils/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from ._utils import dump_operation - -__all__ = [ - "dump_operation", -] diff --git a/source/qdk_package/qdk/utils/_utils.py b/source/qdk_package/qdk/utils/_utils.py deleted file mode 100644 index 26984dc4db..0000000000 --- a/source/qdk_package/qdk/utils/_utils.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from .._qsharp import run -from typing import List -import math - - -def dump_operation(operation: str, num_qubits: int) -> List[List[complex]]: - """ - Returns a square matrix of complex numbers representing the operation performed. - - :param operation: The operation to be performed, which must operate on a list of qubits. - :param num_qubits: The number of qubits to be used. - - :return: The matrix representing the operation. - :rtype: List[List[complex]] - """ - code = f"""{{ - let op = {operation}; - use (targets, extra) = (Qubit[{num_qubits}], Qubit[{num_qubits}]); - for i in 0..{num_qubits}-1 {{ - H(targets[i]); - CNOT(targets[i], extra[i]); - }} - operation ApplyOp (op : (Qubit[] => Unit), targets : Qubit[]) : Unit {{ op(targets); }} - ApplyOp(op, targets); - Microsoft.Quantum.Diagnostics.DumpMachine(); - ResetAll(targets + extra); - }}""" - result = run(code, shots=1, save_events=True)[0] - state = result["events"][-1].state_dump().get_dict() - num_entries = pow(2, num_qubits) - factor = math.sqrt(num_entries) - ndigits = 6 - matrix = [] - for i in range(num_entries): - matrix += [[]] - for j in range(num_entries): - entry = state.get(i * num_entries + j) - if entry is None: - matrix[i] += [complex(0, 0)] - else: - matrix[i] += [ - complex( - round(factor * entry.real, ndigits), - round(factor * entry.imag, ndigits), - ) - ] - return matrix diff --git a/source/qdk_package/tests-integration/conftest.py b/source/qdk_package/tests-integration/conftest.py index e1ca9f1c71..315c5c2143 100644 --- a/source/qdk_package/tests-integration/conftest.py +++ b/source/qdk_package/tests-integration/conftest.py @@ -17,17 +17,15 @@ # used throughout tests via the ``import qdk as qsharp`` alias. # --------------------------------------------------------------------------- import qdk -from qdk._qsharp import ( # noqa: E402 +from qdk._interpreter import ( # noqa: E402 eval, run, compile, circuit, estimate, logical_counts, - QSharpError, - CircuitGenerationMethod, ) -from qdk._native import estimate_custom # type: ignore # noqa: E402 +from qdk._native import QSharpError, CircuitGenerationMethod, estimate_custom # type: ignore # noqa: E402 qdk.eval = eval qdk.run = run diff --git a/source/qdk_package/tests-integration/interop_cirq/test_neutral_atom.py b/source/qdk_package/tests-integration/interop_cirq/test_neutral_atom.py index 13d48e3321..e69321185e 100644 --- a/source/qdk_package/tests-integration/interop_cirq/test_neutral_atom.py +++ b/source/qdk_package/tests-integration/interop_cirq/test_neutral_atom.py @@ -7,7 +7,7 @@ import numpy as np from qdk.cirq import NeutralAtomCirqResult, NeutralAtomSampler -from qdk._simulation import NoiseConfig +from qdk.simulation import NoiseConfig from qdk._device._atom import NeutralAtomDevice diff --git a/source/qdk_package/tests-integration/interop_qiskit/test_neutral_atom.py b/source/qdk_package/tests-integration/interop_qiskit/test_neutral_atom.py index 7f1f140fd3..89aa66bbd1 100644 --- a/source/qdk_package/tests-integration/interop_qiskit/test_neutral_atom.py +++ b/source/qdk_package/tests-integration/interop_qiskit/test_neutral_atom.py @@ -12,7 +12,7 @@ from qiskit.circuit import QuantumCircuit from qiskit.providers import JobStatus from qdk.qiskit import NeutralAtomBackend - from qdk._simulation import NoiseConfig + from qdk.simulation import NoiseConfig from qdk._device._atom import NeutralAtomDevice from .test_circuits import generate_repro_information diff --git a/source/qdk_package/tests/conftest.py b/source/qdk_package/tests/conftest.py index 9814a62367..b83f73269f 100644 --- a/source/qdk_package/tests/conftest.py +++ b/source/qdk_package/tests/conftest.py @@ -17,17 +17,16 @@ # This is test infrastructure only – it does not affect the public package. # --------------------------------------------------------------------------- import qdk -from qdk._qsharp import ( # noqa: E402 +from qdk._interpreter import ( # noqa: E402 eval, run, compile, circuit, estimate, logical_counts, - QSharpError, - CircuitGenerationMethod, + dump_operation, ) -from qdk._native import estimate_custom # type: ignore # noqa: E402 +from qdk._native import QSharpError, CircuitGenerationMethod, estimate_custom # type: ignore # noqa: E402 qdk.eval = eval qdk.run = run @@ -38,3 +37,4 @@ qdk.QSharpError = QSharpError qdk.CircuitGenerationMethod = CircuitGenerationMethod qdk.estimate_custom = estimate_custom +qdk.dump_operation = dump_operation diff --git a/source/qdk_package/tests/test_adaptive_cpu_bytecode.py b/source/qdk_package/tests/test_adaptive_cpu_bytecode.py index eb40059f27..c0b9986401 100644 --- a/source/qdk_package/tests/test_adaptive_cpu_bytecode.py +++ b/source/qdk_package/tests/test_adaptive_cpu_bytecode.py @@ -15,7 +15,8 @@ from collections import Counter import pytest -from qdk._simulation import run_qir, NoiseConfig, Result +from qdk.simulation import run_qir, NoiseConfig +from qdk.simulation._simulation import Result import qdk import qdk.openqasm from typing import Literal diff --git a/source/qdk_package/tests/test_adaptive_cpu_noise.py b/source/qdk_package/tests/test_adaptive_cpu_noise.py index 327942b6b4..5e7bed7761 100644 --- a/source/qdk_package/tests/test_adaptive_cpu_noise.py +++ b/source/qdk_package/tests/test_adaptive_cpu_noise.py @@ -11,9 +11,11 @@ """ from collections import Counter +from pathlib import Path from typing import Optional, List import pytest -from qdk._simulation import run_qir, NoiseConfig, Result +from qdk.simulation import run_qir, NoiseConfig +from qdk.simulation._simulation import Result import qdk import qdk.openqasm from typing import Literal @@ -256,7 +258,7 @@ def test_noise_intrinsics_noisy(sim_type): @pytest.mark.parametrize("sim_type", SIM_TYPES) def test_noise_intrinsics_load_csv_dir(sim_type): noise = NoiseConfig() - noise.load_csv_dir("./csv_dir_test") + noise.load_csv_dir(str(Path(__file__).parent / "csv_dir_test")) output = run_qir(QIR_WITH_CORRELATED_NOISE, shots=1, noise=noise, type=sim_type) assert output == [[Result.One, Result.Zero, Result.One]] diff --git a/source/qdk_package/tests/test_adaptive_cpu_quantum_ops.py b/source/qdk_package/tests/test_adaptive_cpu_quantum_ops.py index 763424adbb..2f10643f58 100644 --- a/source/qdk_package/tests/test_adaptive_cpu_quantum_ops.py +++ b/source/qdk_package/tests/test_adaptive_cpu_quantum_ops.py @@ -14,7 +14,8 @@ from collections import Counter import pytest -from qdk._simulation import run_qir, Result +from qdk.simulation import run_qir +from qdk.simulation._simulation import Result from typing import Literal SIM_TYPES = ["cpu", "clifford"] diff --git a/source/qdk_package/tests/test_adaptive_gpu_bytecode.py b/source/qdk_package/tests/test_adaptive_gpu_bytecode.py index f2c1b8f228..4956fb139c 100644 --- a/source/qdk_package/tests/test_adaptive_gpu_bytecode.py +++ b/source/qdk_package/tests/test_adaptive_gpu_bytecode.py @@ -35,7 +35,7 @@ except OSError as e: SKIP_REASON = str(e) -from qdk._simulation import GpuSimulator, NoiseConfig, Result, run_qir +from qdk.simulation._simulation import GpuSimulator, NoiseConfig, Result, run_qir # --------------------------------------------------------------------------- # Helpers diff --git a/source/qdk_package/tests/test_adaptive_gpu_noise.py b/source/qdk_package/tests/test_adaptive_gpu_noise.py index 9303d0f4d9..0b7eec71fd 100644 --- a/source/qdk_package/tests/test_adaptive_gpu_noise.py +++ b/source/qdk_package/tests/test_adaptive_gpu_noise.py @@ -36,7 +36,8 @@ except OSError as e: SKIP_REASON = str(e) -from qdk._simulation import run_qir, GpuSimulator, NoiseConfig, Result +from qdk.simulation import run_qir, NoiseConfig +from qdk.simulation._simulation import GpuSimulator, Result # --------------------------------------------------------------------------- # Helpers diff --git a/source/qdk_package/tests/test_adaptive_gpu_quantum_ops.py b/source/qdk_package/tests/test_adaptive_gpu_quantum_ops.py index 47e37bb1ed..186581d925 100644 --- a/source/qdk_package/tests/test_adaptive_gpu_quantum_ops.py +++ b/source/qdk_package/tests/test_adaptive_gpu_quantum_ops.py @@ -34,7 +34,7 @@ except OSError as e: SKIP_REASON = str(e) -from qdk._simulation import GpuSimulator, Result +from qdk.simulation._simulation import GpuSimulator, Result def map_result_list_to_str(results): diff --git a/source/qdk_package/tests/test_clifford_simulator.py b/source/qdk_package/tests/test_clifford_simulator.py index e72d451b27..6cdcbb6a13 100644 --- a/source/qdk_package/tests/test_clifford_simulator.py +++ b/source/qdk_package/tests/test_clifford_simulator.py @@ -5,7 +5,8 @@ import pyqir import qdk as qsharp -from qdk._simulation import run_qir_clifford, NoiseConfig +from qdk.simulation import NoiseConfig +from qdk.simulation._simulation import run_qir_clifford from qdk._device._atom import NeutralAtomDevice from qdk._device._atom._decomp import DecomposeRzAnglesToCliffordGates from qdk._device._atom._validate import ValidateNoConditionalBranches @@ -71,8 +72,7 @@ def test_million(): def test_program_with_branching_succeeds(): qsharp.init(target_profile=TargetProfile.Adaptive_RI) - qsharp.eval( - """ + qsharp.eval(""" operation Main() : Result { use q = Qubit(); H(q); @@ -81,8 +81,7 @@ def test_program_with_branching_succeeds(): } return MResetZ(q); } - """ - ) + """) ir = qsharp.compile("Main()") results = run_qir_clifford(str(ir), 1, NoiseConfig()) assert len(results) == 1 diff --git a/source/qdk_package/tests/test_correlated_noise.py b/source/qdk_package/tests/test_correlated_noise.py index a5cf80843d..0a330bba29 100644 --- a/source/qdk_package/tests/test_correlated_noise.py +++ b/source/qdk_package/tests/test_correlated_noise.py @@ -3,7 +3,8 @@ import pytest import sys -from qdk._simulation import NoiseConfig, run_qir +from pathlib import Path +from qdk.simulation import NoiseConfig, run_qir from qdk import Result import qdk.openqasm @@ -76,7 +77,7 @@ def test_noisy_simulation_gpu(): def test_load_csv_dir(): noise = NoiseConfig() - noise.load_csv_dir("./csv_dir_test") + noise.load_csv_dir(str(Path(__file__).parent / "csv_dir_test")) for type in CPU_SIMULATORS: output = run_qir(QIR_WITH_CORRELATED_NOISE, shots=1, noise=noise, type=type) assert output == [[Result.One, Result.Zero, Result.One]] @@ -85,7 +86,7 @@ def test_load_csv_dir(): @pytest.mark.skipif(not GPU_AVAILABLE, reason=SKIP_REASON) def test_load_csv_dir_gpu(): noise = NoiseConfig() - noise.load_csv_dir("./csv_dir_test") + noise.load_csv_dir(str(Path(__file__).parent / "csv_dir_test")) output = run_qir(QIR_WITH_CORRELATED_NOISE, shots=1, noise=noise, type="gpu") assert output == [[Result.One, Result.Zero, Result.One]] diff --git a/source/qdk_package/tests/test_cpu_simulator.py b/source/qdk_package/tests/test_cpu_simulator.py index 183ccf9952..6ddf6f17ce 100644 --- a/source/qdk_package/tests/test_cpu_simulator.py +++ b/source/qdk_package/tests/test_cpu_simulator.py @@ -15,7 +15,8 @@ from qdk import TargetProfile from qdk import openqasm -from qdk._simulation import run_qir_cpu, NoiseConfig +from qdk.simulation import NoiseConfig +from qdk.simulation._simulation import run_qir_cpu current_file_path = Path(__file__) # Get the directory of the current file @@ -44,16 +45,14 @@ def result_array_to_string(results: Sequence[Result]) -> str: def test_cpu_seeding_no_noise(): qsharp.init(target_profile=TargetProfile.Base) - qsharp.eval( - """ + qsharp.eval(""" operation BellTest() : Result[] { use qs = Qubit[2]; H(qs[0]); CNOT(qs[0], qs[1]); MResetEachZ(qs) } - """ - ) + """) qir = str(qsharp.compile("BellTest()")) diff --git a/source/qdk_package/tests/test_enums.py b/source/qdk_package/tests/test_enums.py index d9ffb69e98..937c8ca891 100644 --- a/source/qdk_package/tests/test_enums.py +++ b/source/qdk_package/tests/test_enums.py @@ -5,7 +5,6 @@ import pytest import qdk as qsharp import qdk.code -import qdk.utils from contextlib import redirect_stdout import io diff --git a/source/qdk_package/tests/test_gpu_simulator.py b/source/qdk_package/tests/test_gpu_simulator.py index dc95ac5d0e..d9290689fc 100644 --- a/source/qdk_package/tests/test_gpu_simulator.py +++ b/source/qdk_package/tests/test_gpu_simulator.py @@ -38,7 +38,8 @@ from qdk import TargetProfile from qdk import openqasm -from qdk._simulation import run_qir_gpu, NoiseConfig +from qdk.simulation import NoiseConfig +from qdk.simulation._simulation import run_qir_gpu current_file_path = Path(__file__) # Get the directory of the current file @@ -68,16 +69,14 @@ def result_array_to_string(results: Sequence[Result]) -> str: @pytest.mark.skipif(not GPU_AVAILABLE, reason=SKIP_REASON) def test_gpu_seeding_no_noise(): qsharp.init(target_profile=TargetProfile.Base) - qsharp.eval( - """ + qsharp.eval(""" operation BellTest() : Result[] { use qs = Qubit[2]; H(qs[0]); CNOT(qs[0], qs[1]); MResetEachZ(qs) } - """ - ) + """) qir = str(qsharp.compile("BellTest()")) @@ -520,8 +519,7 @@ def test_gpu_mz_idempotent_noiseless(): """MZ (measure without reset) should be idempotent: two consecutive measurements on the same qubit must always agree.""" qsharp.init(target_profile=TargetProfile.Base) - qsharp.eval( - """ + qsharp.eval(""" operation MzIdempotent() : Result[] { use q = Qubit(); H(q); @@ -529,8 +527,7 @@ def test_gpu_mz_idempotent_noiseless(): let r1 = M(q); [r0, r1] } - """ - ) + """) qir = str(qsharp.compile("MzIdempotent()")) shots = 1000 @@ -563,8 +560,7 @@ def test_gpu_reset_preserves_distribution(): After CNOT the state is cos(π/12)|00⟩ + sin(π/12)|11⟩. Reset on q0 collapses it, leaving q1 with the same skewed distribution.""" qsharp.init(target_profile=TargetProfile.Base) - qsharp.eval( - """ + qsharp.eval(""" operation ResetPreservesDistribution() : Result[] { use qs = Qubit[2]; Rx(Std.Math.PI() / 6.0, qs[0]); @@ -572,8 +568,7 @@ def test_gpu_reset_preserves_distribution(): Reset(qs[0]); MResetEachZ(qs) } - """ - ) + """) qir = str(qsharp.compile("ResetPreservesDistribution()")) shots = 1000 diff --git a/source/qdk_package/tests/test_interpreter.py b/source/qdk_package/tests/test_interpreter.py index 34d03c095b..5b85b3b697 100644 --- a/source/qdk_package/tests/test_interpreter.py +++ b/source/qdk_package/tests/test_interpreter.py @@ -10,7 +10,7 @@ TargetProfile, CircuitConfig, ) -from qdk._qsharp import qsharp_value_to_python_value +from qdk._interpreter import qsharp_value_to_python_value import pytest from expecttest import assert_expected_inline @@ -452,41 +452,33 @@ def callback(output): def test_dump_circuit() -> None: e = Interpreter(TargetProfile.Unrestricted, trace_circuit=True) - e.interpret( - """ + e.interpret(""" use q1 = Qubit(); use q2 = Qubit(); X(q1); - """ - ) + """) circuit = e.dump_circuit() - assert str(circuit) == dedent( - """\ + assert str(circuit) == dedent("""\ q_0 ── X ── q_1 ─────── - """ - ) + """) e.interpret("X(q2);") circuit = e.dump_circuit() - assert str(circuit) == dedent( - """\ + assert str(circuit) == dedent("""\ q_0 ── X ── q_1 ── X ── - """ - ) + """) def test_entry_expr_circuit() -> None: e = Interpreter(TargetProfile.Unrestricted) e.interpret("operation Foo() : Result { use q = Qubit(); H(q); return M(q) }") circuit = e.circuit(CircuitConfig(), "Foo()") - assert str(circuit) == dedent( - """\ + assert str(circuit) == dedent("""\ q_0 ── H ──── M ── ╘═══ - """ - ) + """) def test_swap_label_circuit() -> None: @@ -495,12 +487,10 @@ def test_swap_label_circuit() -> None: "operation Foo() : Unit { use q1 = Qubit(); use q2 = Qubit(); X(q1); Relabel([q1, q2], [q2, q1]); X(q2); }" ) circuit = e.circuit(CircuitConfig(), "Foo()") - assert str(circuit) == dedent( - """\ + assert str(circuit) == dedent("""\ q_0 ── X ──── X ── q_1 ────────────── - """ - ) + """) def test_callables_failing_profile_validation_are_not_registered() -> None: @@ -581,7 +571,9 @@ def test_adaptive_ri_qir_can_be_generated() -> None: e = Interpreter(TargetProfile.Adaptive_RI) e.interpret(adaptive_input) qir = e.qir("Test.Main()") - assert_expected_inline(qir, """\ + assert_expected_inline( + qir, + """\ %Result = type opaque %Qubit = type opaque @@ -618,7 +610,8 @@ def test_adaptive_ri_qir_can_be_generated() -> None: !2 = !{i32 1, !"dynamic_qubit_management", i1 false} !3 = !{i32 1, !"dynamic_result_management", i1 false} !4 = !{i32 5, !"int_computations", !{!"i64"}} -""") +""", + ) def test_base_qir_can_be_generated() -> None: @@ -642,7 +635,9 @@ def test_base_qir_can_be_generated() -> None: e = Interpreter(TargetProfile.Base) e.interpret(base_input) qir = e.qir("Test.Main()") - assert_expected_inline(qir, """\ + assert_expected_inline( + qir, + """\ %Result = type opaque %Qubit = type opaque @@ -678,19 +673,18 @@ def test_base_qir_can_be_generated() -> None: !1 = !{i32 7, !"qir_minor_version", i32 0} !2 = !{i32 1, !"dynamic_qubit_management", i1 false} !3 = !{i32 1, !"dynamic_result_management", i1 false} -""") +""", + ) def test_operation_circuit() -> None: e = Interpreter(TargetProfile.Unrestricted) e.interpret("operation Foo(q: Qubit) : Result { H(q); return M(q) }") circuit = e.circuit(CircuitConfig(), operation="Foo") - assert str(circuit) == dedent( - """\ + assert str(circuit) == dedent("""\ q_0 ── H ──── M ── ╘═══ - """ - ) + """) def test_unsupported_operation_circuit() -> None: diff --git a/source/qdk_package/tests/test_noisy_config.py b/source/qdk_package/tests/test_noisy_config.py index d7ab2a11dd..720b9a7a5d 100644 --- a/source/qdk_package/tests/test_noisy_config.py +++ b/source/qdk_package/tests/test_noisy_config.py @@ -1,7 +1,7 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from qdk._simulation import NoiseConfig +from qdk.simulation import NoiseConfig import pytest diff --git a/source/qdk_package/tests/test_noisy_simulator.py b/source/qdk_package/tests/test_noisy_simulator.py index c5ddc17d49..b2a2e6aaec 100644 --- a/source/qdk_package/tests/test_noisy_simulator.py +++ b/source/qdk_package/tests/test_noisy_simulator.py @@ -1,7 +1,7 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. -from qdk.noisy_simulator import ( +from qdk.simulation import ( NoisySimulatorError, Operation, Instrument, @@ -10,7 +10,6 @@ ) import pytest - # Tests for the Q# noisy simulator. diff --git a/source/qdk_package/tests/test_qsharp.py b/source/qdk_package/tests/test_qsharp.py index 2b796987c9..cec74ed94d 100644 --- a/source/qdk_package/tests/test_qsharp.py +++ b/source/qdk_package/tests/test_qsharp.py @@ -5,7 +5,6 @@ import pytest import qdk as qsharp import qdk.code -import qdk.utils from contextlib import redirect_stdout import io @@ -26,13 +25,11 @@ def test_stdout_multiple_lines() -> None: qsharp.init(target_profile=qsharp.TargetProfile.Unrestricted) f = io.StringIO() with redirect_stdout(f): - qsharp.eval( - """ + qsharp.eval(""" use q = Qubit(); Microsoft.Quantum.Diagnostics.DumpMachine(); Message("Hello!"); - """ - ) + """) assert f.getvalue() == "STATE:\n|0⟩: 1.0000+0.0000𝑖\nHello!\n" @@ -107,13 +104,11 @@ def test_classical_seed() -> None: def test_dump_machine() -> None: qsharp.init(target_profile=qsharp.TargetProfile.Unrestricted) - qsharp.eval( - """ + qsharp.eval(""" use q1 = Qubit(); use q2 = Qubit(); X(q1); - """ - ) + """) state_dump = qsharp.dump_machine() assert state_dump.qubit_count == 2 assert len(state_dump) == 1 @@ -170,24 +165,24 @@ def test_dump_machine() -> None: def test_dump_operation() -> None: qsharp.init(target_profile=qsharp.TargetProfile.Unrestricted) - res = qsharp.utils.dump_operation("qs => ()", 1) + res = qsharp.dump_operation("qs => ()", 1) assert res == [ [complex(1.0, 0.0), complex(0.0, 0.0)], [complex(0.0, 0.0), complex(1.0, 0.0)], ] - res = qsharp.utils.dump_operation("qs => H(qs[0])", 1) + res = qsharp.dump_operation("qs => H(qs[0])", 1) assert res == [ [complex(0.707107, 0.0), complex(0.707107, 0.0)], [complex(0.707107, 0.0), complex(-0.707107, 0.0)], ] - res = qsharp.utils.dump_operation("qs => CNOT(qs[0], qs[1])", 2) + res = qsharp.dump_operation("qs => CNOT(qs[0], qs[1])", 2) assert res == [ [complex(1.0, 0.0), complex(0.0, 0.0), complex(0.0, 0.0), complex(0.0, 0.0)], [complex(0.0, 0.0), complex(1.0, 0.0), complex(0.0, 0.0), complex(0.0, 0.0)], [complex(0.0, 0.0), complex(0.0, 0.0), complex(0.0, 0.0), complex(1.0, 0.0)], [complex(0.0, 0.0), complex(0.0, 0.0), complex(1.0, 0.0), complex(0.0, 0.0)], ] - res = qsharp.utils.dump_operation("qs => CCNOT(qs[0], qs[1], qs[2])", 3) + res = qsharp.dump_operation("qs => CCNOT(qs[0], qs[1], qs[2])", 3) assert res == [ [ complex(1.0, 0.0), @@ -273,14 +268,14 @@ def test_dump_operation() -> None: qsharp.eval( "operation ApplySWAP(qs : Qubit[]) : Unit is Ctl + Adj { SWAP(qs[0], qs[1]); }" ) - res = qsharp.utils.dump_operation("ApplySWAP", 2) + res = qsharp.dump_operation("ApplySWAP", 2) assert res == [ [complex(1.0, 0.0), complex(0.0, 0.0), complex(0.0, 0.0), complex(0.0, 0.0)], [complex(0.0, 0.0), complex(0.0, 0.0), complex(1.0, 0.0), complex(0.0, 0.0)], [complex(0.0, 0.0), complex(1.0, 0.0), complex(0.0, 0.0), complex(0.0, 0.0)], [complex(0.0, 0.0), complex(0.0, 0.0), complex(0.0, 0.0), complex(1.0, 0.0)], ] - res = qsharp.utils.dump_operation("qs => ()", 8) + res = qsharp.dump_operation("qs => ()", 8) for i in range(8): for j in range(8): if i == j: @@ -901,12 +896,10 @@ def test_callables_with_unsupported_return_types_raise_errors_on_call() -> None: def test_callable_with_unsupported_udt_type_raises_error_on_call() -> None: qsharp.init() - qsharp.eval( - """ + qsharp.eval(""" newtype Data = (Int, Double); function Unsupported(a : Data) : Unit { } - """ - ) + """) with pytest.raises( qsharp.QSharpError, match='unsupported input type: `UDT<"Data":' ): @@ -915,12 +908,10 @@ def test_callable_with_unsupported_udt_type_raises_error_on_call() -> None: def test_callable_with_unsupported_udt_return_type_raises_error_on_call() -> None: qsharp.init() - qsharp.eval( - """ + qsharp.eval(""" newtype Data = (Int, Double); function Unsupported() : Data { fail "won\'t be called" } - """ - ) + """) with pytest.raises( qsharp.QSharpError, match='unsupported output type: `UDT<"Data":' ): @@ -932,14 +923,12 @@ def test_returning_unsupported_udt_from_eval_raises_error_on_call() -> None: with pytest.raises( TypeError, match="structs with anonymous fields are not supported: Data" ): - qsharp.eval( - """ + qsharp.eval(""" { newtype Data = (Int, Double); Data(2, 3.0) } - """ - ) + """) def test_struct_call_constructor_exposed_into_env() -> None: @@ -951,14 +940,12 @@ def test_struct_call_constructor_exposed_into_env() -> None: def test_udts_are_accepted_as_input() -> None: qsharp.init() - qsharp.eval( - """ + qsharp.eval(""" struct Data { a : Int, b : Int } function SwapData(data : Data) : Data { new Data { a = data.b, b = data.a } } - """ - ) + """) # Dict val = qsharp.code.SwapData({"a": 2, "b": 3}) assert val.a == 3 and val.b == 2 @@ -1056,72 +1043,59 @@ def test_qsharp_callables_are_not_shadowed() -> None: def test_circuit_from_python_callable() -> None: qsharp.init() - qsharp.eval( - """ + qsharp.eval(""" operation Foo() : Unit { use q1 = Qubit(); use q2 = Qubit(); X(q1); } - """ - ) + """) circuit = qsharp.circuit(qsharp.code.Foo) - assert str(circuit) == dedent( - """\ + assert str(circuit) == dedent("""\ q_0 ── X ── q_1 ─────── - """ - ) + """) def test_circuit_from_qsharp_callable() -> None: qsharp.init() - qsharp.eval( - """ + qsharp.eval(""" operation Foo() : Unit { use q1 = Qubit(); use q2 = Qubit(); X(q1); } - """ - ) + """) foo = qsharp.eval("Foo") circuit = qsharp.circuit(foo) - assert str(circuit) == dedent( - """\ + assert str(circuit) == dedent("""\ q_0 ── X ── q_1 ─────── - """ - ) + """) def test_circuit_with_generation_method() -> None: qsharp.init() - qsharp.eval( - """ + qsharp.eval(""" operation Foo() : Unit { use q1 = Qubit(); use q2 = Qubit(); X(q1); Reset(q1); } - """ - ) + """) circuit = qsharp.circuit( qsharp.code.Foo, generation_method=qsharp.CircuitGenerationMethod.Simulate ) - assert str(circuit) == dedent( - """\ + assert str(circuit) == dedent("""\ q_0 ── X ──── |0〉 ── q_1 ──────────────── - """ - ) + """) def test_circuit_with_static_generation_method() -> None: qsharp.init(target_profile=qsharp.TargetProfile.Adaptive_RIF) - qsharp.eval( - """ + qsharp.eval(""" operation Foo() : Result { use q = Qubit(); H(q); @@ -1130,23 +1104,19 @@ def test_circuit_with_static_generation_method() -> None: Reset(q); r } - """ - ) + """) circuit = qsharp.circuit( "Foo()", generation_method=qsharp.CircuitGenerationMethod.Static ) - assert str(circuit) == dedent( - """\ + assert str(circuit) == dedent("""\ q_0 ── H ──── M ──── if: c_0 = |1〉 ──── |0〉 ── ╘═══════════ ● ═════════════════ - """ - ) + """) def test_circuit_from_qsharp_callable_static() -> None: qsharp.init(target_profile=qsharp.TargetProfile.Adaptive_RIF) - qsharp.eval( - """ + qsharp.eval(""" operation Foo() : Unit { use q = Qubit(); H(q); @@ -1154,68 +1124,55 @@ def test_circuit_from_qsharp_callable_static() -> None: if r == One { X(q); } Reset(q); } - """ - ) + """) circuit = qsharp.circuit( qsharp.code.Foo, generation_method=qsharp.CircuitGenerationMethod.Static ) - assert str(circuit) == dedent( - """\ + assert str(circuit) == dedent("""\ q_0 ── H ──── M ──── if: c_0 = |1〉 ──── |0〉 ── ╘═══════════ ● ═════════════════ - """ - ) + """) def test_circuit_from_python_callable_with_args() -> None: qsharp.init() - qsharp.eval( - """ + qsharp.eval(""" operation Foo(nQubits : Int) : Unit { use qs = Qubit[nQubits]; ApplyToEach(X, qs); } - """ - ) + """) circuit = qsharp.circuit(qsharp.code.Foo, 2) - assert str(circuit) == dedent( - """\ + assert str(circuit) == dedent("""\ q_0 ── X ── q_1 ── X ── - """ - ) + """) def test_circuit_from_qsharp_callable_with_args() -> None: qsharp.init() - qsharp.eval( - """ + qsharp.eval(""" operation Foo(nQubits : Int) : Unit { use qs = Qubit[nQubits]; ApplyToEach(X, qs); } - """ - ) + """) foo = qsharp.eval("Foo") circuit = qsharp.circuit(foo, 2) - assert str(circuit) == dedent( - """\ + assert str(circuit) == dedent("""\ q_0 ── X ── q_1 ── X ── - """ - ) + """) def test_circuit_with_measure_from_callable() -> None: qsharp.init() qsharp.eval("operation Foo() : Result { use q = Qubit(); H(q); return M(q) }") circuit = qsharp.circuit(qsharp.code.Foo) - assert str(circuit) == dedent( - """\ + assert str(circuit) == dedent("""\ q_0 ── H ──── M ── ╘═══ - """ - ) + """) def test_swap_label_circuit_from_callable() -> None: @@ -1224,9 +1181,7 @@ def test_swap_label_circuit_from_callable() -> None: "operation Foo() : Unit { use q1 = Qubit(); use q2 = Qubit(); X(q1); Relabel([q1, q2], [q2, q1]); X(q2); }" ) circuit = qsharp.circuit(qsharp.code.Foo) - assert str(circuit) == dedent( - """\ + assert str(circuit) == dedent("""\ q_0 ── X ──── X ── q_1 ────────────── - """ - ) + """) diff --git a/source/qdk_package/tests/test_simulators_gates_noiseless.py b/source/qdk_package/tests/test_simulators_gates_noiseless.py index b06840dd99..02a666729b 100644 --- a/source/qdk_package/tests/test_simulators_gates_noiseless.py +++ b/source/qdk_package/tests/test_simulators_gates_noiseless.py @@ -5,12 +5,11 @@ import os import pytest import qdk as qsharp -from qdk._qsharp import compile +from qdk._interpreter import compile from qdk import Result, TargetProfile -from qdk._simulation import ( +from qdk.simulation import run_qir as _run_qir, NoiseConfig +from qdk.simulation._simulation import ( GpuSimulator, - run_qir as _run_qir, - NoiseConfig, try_create_gpu_adapter, ) from typing import Literal, List, Optional, TypeAlias diff --git a/source/qdk_package/tests/test_simulators_gates_noisy.py b/source/qdk_package/tests/test_simulators_gates_noisy.py index 80090fc7e7..f2769405aa 100644 --- a/source/qdk_package/tests/test_simulators_gates_noisy.py +++ b/source/qdk_package/tests/test_simulators_gates_noisy.py @@ -5,9 +5,10 @@ import os import pytest import qdk as qsharp -from qdk._qsharp import compile +from qdk._interpreter import compile from qdk import Result, TargetProfile -from qdk._simulation import run_qir as _run_qir, NoiseConfig, try_create_gpu_adapter +from qdk.simulation import run_qir as _run_qir, NoiseConfig +from qdk.simulation._simulation import try_create_gpu_adapter from typing import Literal, List, Optional, TypeAlias diff --git a/source/qdk_package/tests/test_sparse_simulator.py b/source/qdk_package/tests/test_sparse_simulator.py index 6cb2696147..ec54d45221 100644 --- a/source/qdk_package/tests/test_sparse_simulator.py +++ b/source/qdk_package/tests/test_sparse_simulator.py @@ -15,7 +15,7 @@ from qdk import TargetProfile from qdk import openqasm, run -from qdk._simulation import NoiseConfig +from qdk.simulation import NoiseConfig current_file_path = Path(__file__) # Get the directory of the current file From 792ef7b98926e7a0dd2143a0d1ce24f937f461f9 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Mon, 4 May 2026 11:28:20 -0700 Subject: [PATCH 16/25] style --- source/qdk_package/qdk/_device/_atom/__init__.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/source/qdk_package/qdk/_device/_atom/__init__.py b/source/qdk_package/qdk/_device/_atom/__init__.py index 7951fa27ff..a844d2e813 100644 --- a/source/qdk_package/qdk/_device/_atom/__init__.py +++ b/source/qdk_package/qdk/_device/_atom/__init__.py @@ -2,7 +2,12 @@ # Licensed under the MIT License. from .._device import Device, Zone, ZoneType -from ...simulation._simulation import NoiseConfig, run_qir_clifford, run_qir_cpu, run_qir_gpu +from ...simulation._simulation import ( + NoiseConfig, + run_qir_clifford, + run_qir_cpu, + run_qir_gpu, +) from ..._native import try_create_gpu_adapter from ..._types import QirInputData from ... import telemetry_events From 8e514e9fe84ccee03c0a4edad28aaf1ee4bce0a5 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Mon, 4 May 2026 12:37:41 -0700 Subject: [PATCH 17/25] fixed some internal circular dependencies --- build.py | 1 + .../qdk_package/qdk/_device/_atom/__init__.py | 19 ++++++++++++------- .../devices/test_atom_e2e.py | 3 ++- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/build.py b/build.py index d5ec94c6f4..c9bae449a3 100755 --- a/build.py +++ b/build.py @@ -307,6 +307,7 @@ def install_qsharp_python_package(cwd, wheelhouse, interpreter): "pip", "install", "--force-reinstall", + "--no-deps", "--no-index", "--find-links=" + wheelhouse, "qsharp", diff --git a/source/qdk_package/qdk/_device/_atom/__init__.py b/source/qdk_package/qdk/_device/_atom/__init__.py index a844d2e813..1df3925360 100644 --- a/source/qdk_package/qdk/_device/_atom/__init__.py +++ b/source/qdk_package/qdk/_device/_atom/__init__.py @@ -1,20 +1,19 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. +from __future__ import annotations + from .._device import Device, Zone, ZoneType -from ...simulation._simulation import ( - NoiseConfig, - run_qir_clifford, - run_qir_cpu, - run_qir_gpu, -) from ..._native import try_create_gpu_adapter from ..._types import QirInputData from ... import telemetry_events -from typing import List, Literal, Optional +from typing import List, Literal, Optional, TYPE_CHECKING import time +if TYPE_CHECKING: + from ...simulation._simulation import NoiseConfig + class NeutralAtomDevice(Device): """ @@ -252,6 +251,12 @@ def simulate( :return: The results of each shot of the simulation as a list. """ + from ...simulation._simulation import ( + NoiseConfig, + run_qir_clifford, + run_qir_cpu, + run_qir_gpu, + ) from ._validate import ValidateNoConditionalBranches from ._scheduler import Schedule from ._decomp import DecomposeRzAnglesToCliffordGates diff --git a/source/qdk_package/tests-integration/devices/test_atom_e2e.py b/source/qdk_package/tests-integration/devices/test_atom_e2e.py index 2ab0e4406f..2e6bc4d16f 100644 --- a/source/qdk_package/tests-integration/devices/test_atom_e2e.py +++ b/source/qdk_package/tests-integration/devices/test_atom_e2e.py @@ -5,7 +5,8 @@ from expecttest import assert_expected_inline import qdk as qsharp -from qdk._device._atom import NeutralAtomDevice, NoiseConfig +from qdk._device._atom import NeutralAtomDevice +from qdk.simulation import NoiseConfig try: import pyqir From 05e54f22043d2649b6bf47116d16509643d31fe5 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Mon, 4 May 2026 13:26:43 -0700 Subject: [PATCH 18/25] Updates to the pipeline --- .ado/publish.yml | 11 ++++----- .prettierignore | 2 ++ build.py | 63 ++++++++++++++++++------------------------------ 3 files changed, 30 insertions(+), 46 deletions(-) diff --git a/.ado/publish.yml b/.ado/publish.yml index 46af8b11d4..a5f79ba268 100644 --- a/.ado/publish.yml +++ b/.ado/publish.yml @@ -289,11 +289,10 @@ extends: python ./prereqs.py --skip-wasm && python ./version.py displayName: Install Prereqs and set version - # The jupyterlab and widgets packages have no tests. The qdk package has minimal tests but - # they also depend on the qsharp package which is platform-dependent. So we skip tests here - # and rely on the GitHub CI to run the few `qdk` tests on every PR. + # Build platform-agnostic Python wheels: jupyterlab, widgets, and the + # qsharp shim (pure Python). The qdk native wheel is built per-platform. - script: | - python ./build.py --jupyterlab --widgets --qdk --no-check --no-test --no-check-prereqs + python ./build.py --jupyterlab --widgets --pip --no-check --no-test --no-check-prereqs displayName: Build Platform-Agnostic Packages - script: | @@ -398,13 +397,13 @@ extends: # Windows arm64 - script: | - python build.py --pip --no-check-prereqs --no-integration-tests --no-optional-dependencies + python build.py --qdk --no-check-prereqs --no-integration-tests --no-optional-dependencies displayName: Build Platform-Dependent Py Packages condition: and(eq(variables['Agent.OS'], 'Windows_NT'), eq(variables['arch'], 'aarch64')) # every other platform - script: | - python build.py --pip --no-check-prereqs --integration-tests + python build.py --qdk --no-check-prereqs --integration-tests displayName: Build Platform-Dependent Py Packages condition: not(and(eq(variables['Agent.OS'], 'Windows_NT'), eq(variables['arch'], 'aarch64'))) diff --git a/.prettierignore b/.prettierignore index 6ba56467cb..f2253032c5 100644 --- a/.prettierignore +++ b/.prettierignore @@ -20,6 +20,8 @@ __pycache__/ /source/npm/qsharp/test/**/*.snapshot.* /source/pip/ /source/pip/src/**/*.html +/source/qdk_package/ +/source/qdk_package/src/**/*.html /source/playground/public/libs/ /source/vscode/out/ /source/vscode/test/out/ diff --git a/build.py b/build.py index c9bae449a3..7ecca035e5 100755 --- a/build.py +++ b/build.py @@ -300,21 +300,6 @@ def use_python_env(folder): step_end() -def install_qsharp_python_package(cwd, wheelhouse, interpreter): - command_args = [ - interpreter, - "-m", - "pip", - "install", - "--force-reinstall", - "--no-deps", - "--no-index", - "--find-links=" + wheelhouse, - "qsharp", - ] - subprocess.run(command_args, check=True, text=True, cwd=cwd) - - # If any package fails to install when using a requirements file, the entire # process will fail with unpredicatable state of installed packages. To avoid # this, we install each package individually from the requirements file. @@ -474,32 +459,12 @@ def run_ci_historic_benchmark(): run_python_tests(os.path.join(qdk_python_src, "tests"), python_bin, pip_env) step_end() -if build_pip: - step_start("Building the pip package") - - python_bin, pip_env = use_python_env(pip_src) - - # qsharp is now a pure-Python shim depending on qdk. - # Build with setuptools (no maturin needed). - pip_build_args = [ - python_bin, - "-m", - "build", - "--wheel", - "-v", - "--outdir", - wheels_dir, - pip_src, - ] - subprocess.run(pip_build_args, check=True, text=True, cwd=pip_src, env=pip_env) - step_end() - if args.integration_tests: - step_start("Setting up for integration tests for the pip package") + step_start("Setting up for integration tests for the qdk package") test_dir = os.path.join(qdk_python_src, "tests-integration") install_python_test_requirements(test_dir, python_bin, check=False) - # Install qdk first (qsharp depends on it) + # Install qdk from the freshly built wheel. install_args = [ python_bin, "-m", @@ -516,7 +481,7 @@ def run_ci_historic_benchmark(): for version in QISKIT_VERSION_MATRIX: step_start( - f"Running integration tests for the pip package ({version['label']})" + f"Running integration tests for the qdk package ({version['label']})" ) version_install_args = [ @@ -530,12 +495,30 @@ def run_ci_historic_benchmark(): ] + version["requirements"] subprocess.run(version_install_args, check=True, text=True, cwd=test_dir) - install_qsharp_python_package(pip_src, wheels_dir, python_bin) - run_python_integration_tests(test_dir, python_bin) step_end() +if build_pip: + step_start("Building the pip package") + + python_bin, pip_env = use_python_env(pip_src) + + # qsharp is now a pure-Python shim depending on qdk. + # Build with setuptools (no maturin needed). + pip_build_args = [ + python_bin, + "-m", + "build", + "--wheel", + "-v", + "--outdir", + wheels_dir, + pip_src, + ] + subprocess.run(pip_build_args, check=True, text=True, cwd=pip_src, env=pip_env) + step_end() + if build_widgets: step_start("Building the Python widgets") From d6890dd338d1526f30c77e9ba5e459c3dcbfeeb5 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Mon, 4 May 2026 13:47:22 -0700 Subject: [PATCH 19/25] Start keeping track of PR changes --- source/qdk_package/PR_DESCRIPTION.md | 127 +++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 source/qdk_package/PR_DESCRIPTION.md diff --git a/source/qdk_package/PR_DESCRIPTION.md b/source/qdk_package/PR_DESCRIPTION.md new file mode 100644 index 0000000000..db2d4330ba --- /dev/null +++ b/source/qdk_package/PR_DESCRIPTION.md @@ -0,0 +1,127 @@ +# PR: Migrate `qsharp` pip package → `qdk` + +> **Delete this file before merging.** + +## Summary + +This PR migrates all Python source code, Rust native extension code, tests, and build infrastructure from the `qsharp` pip package (`source/pip/`) into the `qdk` package (`source/qdk_package/`). After this change: + +- **`qdk`** is the primary Python package containing all source code and the native Rust extension. +- **`qsharp`** becomes a thin, pure-Python deprecation shim that re-exports from the `qdk` package. + +## Motivation + +The repo is being rebranded from `qsharp` to `qdk`. Rather than maintaining two packages with real code, all functionality consolidates into `qdk`, and `qsharp` exists solely for backward compatibility during the transition period. + +--- + +## What Changed + +### 1. Rust native extension moved (`source/pip/src/` → `source/qdk_package/src/`) + +The pyo3 native module (`_native`) and all its Rust source files (interpreter, QIR simulation, noisy simulator bindings, resource estimator, etc.) were moved from `source/pip/` into `source/qdk_package/`. The `Cargo.toml` at `source/qdk_package/` now defines the `qdk` crate (previously `qsharp`), and the root `Cargo.toml` workspace member was updated accordingly. + +**Files moved (all "pure" renames):** +- `src/lib.rs`, `src/interpreter.rs`, `src/qir_simulation.rs`, `src/noisy_simulator.rs`, `src/qre.rs`, `src/fs.rs`, `src/interop.rs`, `src/generic_estimator/`, `src/displayable_output/`, `src/state_*_template.html`, and all sub-modules. + +### 2. Python source reorganized (`source/pip/qsharp/` → `source/qdk_package/qdk/`) + +All Python modules were moved from `qsharp.*` to `qdk.*` with import paths updated throughout. + +**Key structural changes:** + +| Before (`qsharp.*`) | After (`qdk.*`) | Notes | +|---|---|---| +| `qsharp/__init__.py` | `qdk/__init__.py` | New minimal root; exposes common utilities | +| `qsharp/_qsharp.py` | `qdk/_interpreter.py` + `qdk/_types.py` | Split: interpreter functions vs. type definitions | +| `qsharp/_simulation.py` | `qdk/simulation/_simulation.py` | Moved into `simulation/` subpackage | +| `qsharp/noisy_simulator/` | `qdk/simulation/_noisy_simulator.py` | Absorbed into `simulation/` subpackage | +| `qsharp/interop/qiskit/` | `qdk/qiskit/` | Promoted from nested `interop` to top-level subpackage | +| `qsharp/interop/cirq/` | `qdk/cirq/` | Promoted from nested `interop` to top-level subpackage | +| `qsharp/utils/_utils.py` | *(deleted)* | `dump_operation` moved into `_interpreter.py` | +| `qsharp/estimator/` | `qdk/estimator/` | Direct move, imports updated | +| `qsharp/openqasm/` | `qdk/openqasm/` | Direct move, imports updated | +| `qsharp/code/` | `qdk/code/` | Direct move | +| `qsharp/applications/` | `qdk/applications/` | Direct move | +| `qsharp/qre/` | `qdk/qre/` | Direct move, imports updated | +| `qsharp/_device/` | `qdk/_device/` | Direct move, circular import fixed (see below) | + +**New `qdk` public API surface:** +- `qdk.qsharp` — Q# interpreter functions (`init`, `eval`, `run`, `compile`, `circuit`, `estimate`, etc.) +- `qdk.simulation` — Simulation APIs (`NeutralAtomDevice`, `NoiseConfig`, noisy simulator types) +- `qdk.qiskit` — Qiskit interop (`QSharpBackend`, `NeutralAtomBackend`, etc.) +- `qdk.cirq` — Cirq interop +- `qdk.estimator` — Resource estimator +- `qdk.openqasm` — OpenQASM compilation/execution +- `qdk.code` — Code analysis +- `qdk.applications` — Domain applications (magnets, etc.) +- `qdk.qre` — QRE v3 + +### 3. `qsharp` package converted to deprecation shim (`source/pip/`) + +`source/pip/qsharp/__init__.py` now: +1. Emits a `DeprecationWarning` on import. +2. Re-exports the full public API from `qdk._types`, `qdk._interpreter`, and `qdk._native`. +3. Registers IPython magics from `qdk._ipython`. + +All other Python files under `source/pip/qsharp/` are now thin re-export wrappers that import from their `qdk.*` counterparts. The package metadata in `source/pip/pyproject.toml` declares `dependencies = ["qdk==0.0.0"]` (version stamped at CI time). + +### 4. Tests moved + +- **Unit tests** remain at `source/qdk_package/tests/` — import paths updated from `qsharp.*` to `qdk.*`. +- **Integration tests** moved from `source/pip/tests-integration/` to `source/qdk_package/tests-integration/` — all imports updated to use `qdk.*`. + +### 5. Build script changes (`build.py`) + +- **`--qdk` flag**: Builds the maturin wheel (Rust + Python) and runs unit tests. Also runs integration tests when `--integration-tests` is passed. +- **`--pip` flag**: Builds only the pure-Python `qsharp` shim wheel via setuptools. No longer runs any tests (integration tests moved to `--qdk`). +- **Removed `install_qsharp_python_package()`**: No longer needed since integration tests don't depend on the `qsharp` shim. +- Install commands use `--no-deps --no-index` to install from local wheels without reaching PyPI. + +### 6. CI/CD pipeline changes + +**GitHub Actions (`ci.yml`)** — No changes needed. The `integration-tests` job already passed both `--qdk` and `--integration-tests`. + +**Azure DevOps (`publish.yml`)** — Restructured for clean platform split: + +| Job | Before | After | +|---|---|---| +| `Platform_Agnostic_Python` | `--jupyterlab --widgets --qdk` | `--jupyterlab --widgets --pip` | +| Per-platform matrix | `--pip --integration-tests` | `--qdk --integration-tests` | + +This restructuring reflects the fact that `qdk` is now the platform-specific package (it contains the native Rust extension), while `qsharp` is now platform-agnostic (pure-Python shim). Previously it was the reverse: `qsharp` held the native code and `qdk` was a pure-Python meta-package. Each of the 6 OS/arch combinations now builds its own native `qdk` wheel, while the platform-agnostic wheels (`qsharp`, widgets, jupyterlab) are built once. + +### 7. Circular import fix (`_device._atom` ↔ `simulation`) + +`simulation/__init__.py` imports `NeutralAtomDevice` from `_device._atom`, while `_device._atom` needs `NoiseConfig` and `run_qir_*` from `simulation._simulation`. This was resolved by: +- Using `from __future__ import annotations` in `_device/_atom/__init__.py` +- Guarding `NoiseConfig` import behind `TYPE_CHECKING` +- Deferring runtime imports of `run_qir_*` into the `simulate()` method body + +### 8. Miscellaneous + +- **`.prettierignore`**: Added `source/qdk_package/src/**/*.html` and `source/qdk_package/tests-integration/**/*.inc` (these files moved from `source/pip/` which was fully ignored). +- **`.github/CODEOWNERS`**: Updated paths from `source/pip/` to `source/qdk_package/`. +- **`.github/copilot-instructions.md`**: Updated architecture documentation. +- **`Cargo.toml` (root)**: Updated workspace member from `source/pip` to `source/qdk_package`. +- **Sample notebooks**: Updated `import qsharp` to `import qdk` / `from qdk import qsharp`. + +--- + +## How to Review + +Due to the large number of file moves, GitHub's diff may be hard to follow. Suggested approach: + +1. **Start with `build.py`** — understand the new build flow (`--qdk` builds native + runs tests, `--pip` just builds the shim). +2. **Read `source/qdk_package/qdk/__init__.py`** — see what the new `qdk` root exposes. +3. **Read `source/pip/qsharp/__init__.py`** — see the deprecation shim pattern. +4. **Skim `source/qdk_package/qdk/simulation/__init__.py`** and `source/qdk_package/qdk/_device/_atom/__init__.py` — these have the circular import fix. +5. **Check `.ado/publish.yml`** — verify the platform-agnostic vs. per-platform split. +6. **The rest is mostly mechanical** — file renames and `s/qsharp/qdk/g` import updates. GitHub should detect most as renames (95%+ similarity). + +## Testing + +- 1,248 unit tests pass (`source/qdk_package/tests/`) +- 338 integration tests pass per Qiskit version (`source/qdk_package/tests-integration/`), run against both Qiskit v1 (`>=1.3,<2`) and v2 (`>=2,<3`) +- Widgets build successfully +- `qsharp` shim wheel builds successfully From ac00b6129b59efe88bf8b88b7aeede919a8f00a1 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Mon, 4 May 2026 14:28:58 -0700 Subject: [PATCH 20/25] Add API Surface description and update difficult test --- source/qdk_package/API_SURFACE.md | 345 ++++++++++++++++++ source/qdk_package/PR_DESCRIPTION.md | 6 + .../test-no-lang-metadata.ipynb | 95 +++-- 3 files changed, 397 insertions(+), 49 deletions(-) create mode 100644 source/qdk_package/API_SURFACE.md diff --git a/source/qdk_package/API_SURFACE.md b/source/qdk_package/API_SURFACE.md new file mode 100644 index 0000000000..58c24d3182 --- /dev/null +++ b/source/qdk_package/API_SURFACE.md @@ -0,0 +1,345 @@ +# `qdk` Package — Public API Surface + +> **Delete this file before merging.** + +## `qdk` + +``` +qdk +├── code # submodule — dynamic Q# callable namespace +├── Result # enum (Zero, One, Loss) +├── TargetProfile # enum (Base, Adaptive_RI, Adaptive_RIF, Adaptive_RIFLA, Unrestricted) +├── StateDump # class — quantum state snapshot +├── ShotResult # TypedDict — single shot result +├── PauliNoise # class — (x, y, z) noise tuple +├── DepolarizingNoise # class — uniform Pauli noise +├── BitFlipNoise # class — X-only noise +├── PhaseFlipNoise # class — Z-only noise +├── init() # function — initialize the Q# interpreter +├── set_quantum_seed() # function +├── set_classical_seed() # function +└── dump_machine() # function — get current state +``` + +## `qdk.qsharp` + +Q# interpreter — the main entry point for writing and running Q# programs. + +``` +qdk.qsharp +│ +│ # Interpreter lifecycle +├── init() # initialize interpreter with target profile, project root, etc. +├── get_interpreter() # get the current Interpreter instance +├── get_config() # get the current Config +│ +│ # Execution +├── eval() # evaluate Q# source code +├── run() # run an entry expression for N shots +├── compile() # compile to QIR → QirInputData +├── circuit() # generate a circuit diagram → Circuit +├── estimate() # resource estimation (deprecated — use qdk.qre) +├── logical_counts() # get logical resource counts → LogicalCounts +│ +│ # State inspection +├── dump_machine() # get current quantum state → StateDump +├── dump_circuit() # get circuit so far → Circuit +├── dump_operation() # get unitary matrix of an operation +├── set_quantum_seed() # set quantum RNG seed +├── set_classical_seed() # set classical RNG seed +│ +│ # Types & Data Classes +├── Config # class — interpreter configuration +├── QirInputData # class — compiled QIR output +├── StateDump # class — quantum state data +├── ShotResult # TypedDict — shot result with events/messages +│ +│ # Noise types +├── PauliNoise # class — (x, y, z) noise specification +├── DepolarizingNoise # class — uniform depolarizing +├── BitFlipNoise # class — bit-flip noise +├── PhaseFlipNoise # class — phase-flip noise +├── NoiseConfig # class — per-gate noise configuration +│ +│ # Enums +├── Result # enum (Zero, One, Loss) +├── Pauli # enum (I, X, Y, Z) +├── TargetProfile # enum (Base, Adaptive_RI, ..., Unrestricted) +├── CircuitGenerationMethod # enum (ClassicalEval, Simulate, Static) +│ +│ # Native types +├── Interpreter # class — the Q# interpreter +├── Circuit # class — circuit representation +├── CircuitConfig # class — circuit generation options +├── Output # class — interpreter output +├── GlobalCallable # class — Q# callable reference +├── Closure # class — Q# closure reference +├── StateDumpData # class — raw state dump from native +├── QSharpError # exception +│ +│ # Estimator types (re-exported) +├── EstimatorResult # class — resource estimation result +├── EstimatorParams # class — resource estimation parameters +└── LogicalCounts # class — logical resource counts +``` + +## `qdk.simulation` + +Simulation APIs — neutral atom device, QIR execution, and noisy simulators. + +``` +qdk.simulation +├── NeutralAtomDevice # class — neutral atom device compiler & simulator +│ ├── compile(program, verbose) +│ ├── show_trace(qir) +│ └── simulate(qir, shots, noise, type, seed) +├── NoiseConfig # class — per-gate noise tables +│ ├── .x, .y, .z, .h, .s, .t, ... # NoiseTable per gate type +│ ├── .intrinsics # NoiseIntrinsicsTable +│ ├── intrinsic(name, num_qubits) +│ └── load_csv_dir(dir_path) +├── run_qir() # function — run QIR with optional noise +│ +│ # Experimental noisy simulation +├── NoisySimulatorError # exception +├── Operation # class — Kraus operator representation +├── Instrument # class — quantum instrument +├── DensityMatrixSimulator # class — density matrix simulator +├── StateVectorSimulator # class — state vector simulator +├── DensityMatrix # class — density matrix state +└── StateVector # class — state vector state +``` + +## `qdk.estimator` + +Resource estimation (v1) — physical qubit and QEC parameter estimation. + +``` +qdk.estimator +├── EstimatorParams # class — estimation input parameters +├── EstimatorInputParamsItem # class — single parameter set +├── EstimatorResult # class — estimation output (dict subclass) +│ ├── data(idx) +│ ├── summary, diagram, plot() +│ └── summary_data_frame() +├── LogicalCounts # class — logical resource counts (dict subclass) +│ └── estimate(params) → EstimatorResult +├── EstimatorError # exception +│ +│ # Parameter building blocks +├── QubitParams # class — predefined qubit models +│ └── GATE_US_E3, GATE_US_E4, GATE_NS_E3, GATE_NS_E4, MAJ_NS_E4, MAJ_NS_E6 +├── QECScheme # class — predefined QEC schemes +│ └── SURFACE_CODE, FLOQUET_CODE +├── MeasurementErrorRate # dataclass +├── EstimatorQubitParams # dataclass +├── EstimatorQecScheme # dataclass +├── ProtocolSpecificDistillationUnitSpecification # dataclass +├── DistillationUnitSpecification # dataclass +├── ErrorBudgetPartition # dataclass +└── EstimatorConstraints # dataclass +``` + +## `qdk.openqasm` + +OpenQASM 3.0 compilation and execution. + +``` +qdk.openqasm +├── run() # function — run OpenQASM program +├── compile() # function — compile to QIR +├── circuit() # function — generate circuit diagram +├── estimate() # function — resource estimation (deprecated) +├── import_openqasm() # function — import OpenQASM into interpreter +├── ProgramType # enum (File, Operation, Fragments) +├── OutputSemantics # enum (Qiskit, OpenQasm, ResourceEstimation) +└── QasmError # exception +``` + +## `qdk.qiskit` + +Qiskit interop — backends, jobs, and resource estimation. + +``` +qdk.qiskit +├── QSharpBackend # class — Qiskit BackendV2 for Q# simulation +├── NeutralAtomBackend # class — Qiskit BackendV2 for neutral atom +├── ResourceEstimatorBackend # class — Qiskit BackendV2 for resource estimation +├── QirTarget # class — Qiskit Target helper +├── estimate() # function — estimate a QuantumCircuit +├── EstimatorParams # class (re-exported from qdk.estimator) +├── EstimatorResult # class (re-exported from qdk.estimator) +├── QasmError # exception +│ +│ # Jobs +├── QsJob # class — abstract job base +├── QsSimJob # class — simulation job +├── ReJob # class — resource estimation job +├── QsJobSet # class — multi-circuit job set +│ +│ # Submodules +├── backends/ # backend implementations +│ ├── Compilation, Errors +│ ├── NeutralAtomTarget +│ └── RemoveDelays (pass) +├── jobs/ # job implementations +├── execution/ # execution helpers +└── passes/ # transpiler passes +``` + +## `qdk.cirq` + +Cirq interop — neutral atom sampler. + +``` +qdk.cirq +├── NeutralAtomSampler # class — cirq.Sampler for neutral atom simulation +│ └── run_sweep(program, params, repetitions) +└── NeutralAtomCirqResult # class — cirq.ResultDict with raw shot access +``` + +## `qdk.qre` + +Quantum Resource Estimation v3. + +``` +qdk.qre +│ +│ # Top-level functions +├── estimate() # function — run full estimation pipeline +├── constraint() # function — create an ISA constraint +├── plot_estimates() # function — visualize estimation results +├── instruction_name() # function — ID → name lookup +├── property_name() # function — ID → name lookup +├── property_name_to_key() # function — name → ID lookup +│ +│ # Function builders (for ISA properties) +├── block_linear_function() # function +├── constant_function() # function +├── linear_function() # function +├── generic_function() # function +│ +│ # Core types (from Rust) +├── ISA # class — instruction set architecture +├── ISARequirements # class — ISA constraint set +├── Instruction # class — single instruction definition +├── InstructionFrontier # class — Pareto frontier of instructions +├── Constraint # class — ISA constraint +├── ConstraintBound # class — comparison bound (lt, le, eq, gt, ge) +├── EstimationResult # class — single estimation result +├── FactoryResult # class — factory estimation result +├── Trace # class — algorithm execution trace +├── Block # class — trace block +│ +│ # Python framework types +├── Application # abstract class — algorithm definition +├── Architecture # abstract class — hardware model +├── ISAContext # class — enumeration context +├── ISATransform # abstract class — ISA transformation +├── ISAQuery # abstract class — ISA enumeration query +├── ISARefNode # class — enumeration leaf node +├── ISA_ROOT # constant — root enumeration node +├── TraceQuery # class — trace enumeration query +├── TraceTransform # abstract class — trace transformation +├── PSSPC # dataclass — Pauli-based rotation synthesis +├── LatticeSurgery # dataclass — lattice surgery transform +├── Encoding # IntEnum (PHYSICAL=0, LOGICAL=1) +├── LOGICAL # constant +├── PHYSICAL # constant +├── InstructionSource # class — instruction provenance +│ +│ # Result types +├── EstimationTable # class — tabular estimation results +├── EstimationTableEntry # frozen dataclass — single result row +├── EstimationTableColumn # frozen dataclass — column definition +│ +│ # Submodules +├── instruction_ids # module — integer constants (PAULI_X, H, CNOT, T, ...) +├── property_keys # module — integer constants (DISTANCE, RUNTIME, ...) +│ +├── application/ # application definitions +│ ├── CirqApplication # dataclass +│ ├── QIRApplication # dataclass +│ ├── QSharpApplication # dataclass +│ └── OpenQASMApplication # dataclass +│ +├── interop/ # trace builders +│ ├── trace_from_cirq() # function +│ ├── trace_from_entry_expr() # function +│ ├── trace_from_entry_expr_cached() # function +│ ├── trace_from_qir() # function +│ ├── PushBlock, PopBlock # classes — Cirq custom gates +│ ├── QubitType, TypedQubit # classes — typed qubits +│ ├── PeakUsageGreedyQubitManager # class — qubit manager +│ ├── ReadFromMemoryGate # class +│ ├── WriteToMemoryGate # class +│ ├── write_to_memory() # function +│ ├── read_from_memory() # function +│ └── assert_qubits_type() # function +│ +└── models/ # hardware models + ├── GateBased # class — gate-based qubit architecture + ├── Majorana # class — Majorana qubit architecture + ├── SurfaceCode # class — surface code QEC + ├── ThreeAux # class — 3-auxiliary QEC + ├── OneDimensionalYokedSurfaceCode # class — yoked surface code (1D) + ├── TwoDimensionalYokedSurfaceCode # class — yoked surface code (2D) + ├── Litinski19Factory # class — magic state factory + ├── MagicUpToClifford # class — factory utility + └── RoundBasedFactory # class — round-based factory +``` + +## `qdk.applications` + +Domain-specific quantum applications. + +``` +qdk.applications +└── magnets/ # quantum magnetism + │ + │ # Geometry + ├── CompleteBipartiteGraph # class + ├── CompleteGraph # class + ├── Chain1D # class + ├── Ring1D # class + ├── Patch2D # class + ├── Torus2D # class + │ + │ # Models + ├── Model # class — base model + ├── IsingModel # class + ├── HeisenbergModel # class + │ + │ # Trotter + ├── TrotterStep # class + ├── TrotterExpansion # class + ├── strang_splitting() # function + ├── suzuki_recursion() # function + ├── yoshida_recursion() # function + ├── fourth_order_trotter_suzuki() # function + │ + │ # Utilities + ├── Hyperedge # class + ├── Hypergraph # class + ├── HypergraphEdgeColoring # class + ├── Pauli # class + ├── PauliString # class + ├── PauliX # constant + ├── PauliY # constant + └── PauliZ # constant +``` + +## `qdk.azure` + +Azure Quantum integration (requires `pip install qdk[azure]`). +Re-exports `azure.quantum.*`. + +## `qdk.widgets` + +Jupyter widgets (requires `pip install qdk[jupyter]`). +Re-exports `qsharp_widgets.*`. + +## `qdk.code` + +Dynamic namespace populated at runtime by the Q# interpreter. +Q# callables and types become attributes (e.g., `qdk.code.Microsoft.Quantum.*`). diff --git a/source/qdk_package/PR_DESCRIPTION.md b/source/qdk_package/PR_DESCRIPTION.md index db2d4330ba..b11ef898ad 100644 --- a/source/qdk_package/PR_DESCRIPTION.md +++ b/source/qdk_package/PR_DESCRIPTION.md @@ -125,3 +125,9 @@ Due to the large number of file moves, GitHub's diff may be hard to follow. Sugg - 338 integration tests pass per Qiskit version (`source/qdk_package/tests-integration/`), run against both Qiskit v1 (`>=1.3,<2`) and v2 (`>=2,<3`) - Widgets build successfully - `qsharp` shim wheel builds successfully + +## Follow-up Work + +- **Move noise types to `qdk.simulation`**: The `PauliNoise`, `DepolarizingNoise`, `BitFlipNoise`, and `PhaseFlipNoise` classes currently live in `qdk._types` and are re-exported through `qdk.qsharp`. These are simulation concepts and should canonically live in `qdk.simulation`, with backward-compatible re-exports from `qdk.qsharp` and `qdk._types`. Deferred from this PR to avoid additional circular import complexity. +- **`NoiseConfig` in `qdk.qsharp`**: Similarly, `NoiseConfig` (from `_native`) is re-exported in `qdk.qsharp.__all__` but semantically belongs in `qdk.simulation` (where it's already exported). The `qdk.qsharp` re-export should be removed in a follow-up. +- **Audit and rewrite docstrings**: Module and function docstrings throughout the package still reference the old `qsharp` import paths and naming conventions. These need to be audited and updated to reflect the new `qdk.*` namespace for accurate generated documentation. diff --git a/source/vscode/test/suites/language-service/test-workspace/test-no-lang-metadata.ipynb b/source/vscode/test/suites/language-service/test-workspace/test-no-lang-metadata.ipynb index ef387b0346..6fd9cfd304 100644 --- a/source/vscode/test/suites/language-service/test-workspace/test-no-lang-metadata.ipynb +++ b/source/vscode/test/suites/language-service/test-workspace/test-no-lang-metadata.ipynb @@ -1,52 +1,49 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "1e8e4faa", - "metadata": {}, - "outputs": [], - "source": [ - "from qdk import qsharp\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1b55e53c", - "metadata": { - "scrolled": false, - "vscode": { - "languageId": "qsharp" + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "1e8e4faa", + "metadata": {}, + "outputs": [], + "source": [ + "from qdk import qsharp\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1b55e53c", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "%%qsharp\n", + "\n", + "Test()\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" } - }, - "outputs": [], - "source": [ - "%%qsharp\n", - "\n", - "Test()\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.4" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} + "nbformat": 4, + "nbformat_minor": 5 +} \ No newline at end of file From 9a19b513ae5a78081bd924c7c7dbdbc7228a7488 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Mon, 4 May 2026 15:04:44 -0700 Subject: [PATCH 21/25] import _ipython magic --- source/qdk_package/qdk/qsharp.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/source/qdk_package/qdk/qsharp.py b/source/qdk_package/qdk/qsharp.py index edf7953c76..385fe87c5c 100644 --- a/source/qdk_package/qdk/qsharp.py +++ b/source/qdk_package/qdk/qsharp.py @@ -19,3 +19,14 @@ from ._types import * # pyright: ignore[reportWildcardImportFromLibrary] from ._interpreter import * # pyright: ignore[reportWildcardImportFromLibrary] + +# Register the %%qsharp cell magic when running inside IPython/Jupyter. +# This mirrors the registration in the ``qsharp`` compatibility shim so that +# notebooks using ``from qdk import qsharp`` get the magic automatically. +try: + if __IPYTHON__: # type: ignore + from ._ipython import register_magic + + register_magic() +except NameError: + pass From 727b7da2488bad88c16cc268b4fdcdf8e7f5a234 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Mon, 4 May 2026 15:28:11 -0700 Subject: [PATCH 22/25] move magic registration to qdk instead of qdk.qsharp --- source/qdk_package/qdk/__init__.py | 9 +++++++++ source/qdk_package/qdk/qsharp.py | 11 ----------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/source/qdk_package/qdk/__init__.py b/source/qdk_package/qdk/__init__.py index 895af6b56a..e6e3e6d87e 100644 --- a/source/qdk_package/qdk/__init__.py +++ b/source/qdk_package/qdk/__init__.py @@ -37,6 +37,15 @@ init, ) +# Register the %%qsharp cell magic when running inside IPython/Jupyter. +try: + if __IPYTHON__: # type: ignore + from ._ipython import register_magic + + register_magic() +except NameError: + pass + # utilities lifted from qsharp __all__ = [ "code", diff --git a/source/qdk_package/qdk/qsharp.py b/source/qdk_package/qdk/qsharp.py index 385fe87c5c..edf7953c76 100644 --- a/source/qdk_package/qdk/qsharp.py +++ b/source/qdk_package/qdk/qsharp.py @@ -19,14 +19,3 @@ from ._types import * # pyright: ignore[reportWildcardImportFromLibrary] from ._interpreter import * # pyright: ignore[reportWildcardImportFromLibrary] - -# Register the %%qsharp cell magic when running inside IPython/Jupyter. -# This mirrors the registration in the ``qsharp`` compatibility shim so that -# notebooks using ``from qdk import qsharp`` get the magic automatically. -try: - if __IPYTHON__: # type: ignore - from ._ipython import register_magic - - register_magic() -except NameError: - pass From c241973e2b3c7d75677615319166ba8e232d4ba7 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Tue, 5 May 2026 09:41:36 -0700 Subject: [PATCH 23/25] updated notebook test --- samples/qre/1_qre_input.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/qre/1_qre_input.ipynb b/samples/qre/1_qre_input.ipynb index a6f07d2d9d..8a39346a81 100644 --- a/samples/qre/1_qre_input.ipynb +++ b/samples/qre/1_qre_input.ipynb @@ -124,7 +124,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "24dad0bb", "metadata": {}, "outputs": [], @@ -132,7 +132,7 @@ "# Load a pre-compiled QIR file for the Hidden Shift algorithm\n", "qir_file = (\n", " Path.cwd().parent.parent\n", - " / \"source\" / \"pip\" / \"tests-integration\" / \"resources\"\n", + " / \"source\" / \"qdk_package\" / \"tests-integration\" / \"resources\"\n", " / \"adaptive_ri\" / \"output\" / \"HiddenShiftNISQ.ll\"\n", ")\n", "qir_app = QIRApplication(qir_file.read_text(encoding=\"utf-8\"))" From 0afafc3f5aa5f241cd4c085cdd8feef9015198f0 Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Tue, 5 May 2026 10:12:53 -0700 Subject: [PATCH 24/25] Updated Readmes --- source/pip/README.md | 12 +-- source/qdk_package/PR_DESCRIPTION.md | 118 +++++++++++++++++++++++ source/qdk_package/README.md | 134 ++++----------------------- 3 files changed, 143 insertions(+), 121 deletions(-) diff --git a/source/pip/README.md b/source/pip/README.md index dfc229221d..98e0c999a7 100644 --- a/source/pip/README.md +++ b/source/pip/README.md @@ -1,20 +1,20 @@ # Q# Language Support for Python +> **Note:** The `qsharp` package is deprecated. Please use the [`qdk`](https://pypi.org/project/qdk/) package instead. This package is a thin compatibility shim that re-exports the `qdk` public API so that existing code continues to work. + Q# is an open-source, high-level programming language for developing and running quantum algorithms. The `qsharp` package for Python provides interoperability with the Q# interpreter, making it easy to simulate Q# programs within Python. ## Installation -To install the Q# language package, run: - ```bash -pip install qsharp +pip install qdk ``` -## Usage +For backward compatibility, `pip install qsharp` also works and will install `qdk` as a dependency. -First, import the `qsharp` module: +## Usage ```python from qdk import qsharp @@ -42,7 +42,7 @@ BellState() ## Telemetry This library sends telemetry. Minimal anonymous data is collected to help measure feature usage and performance. -All telemetry events can be seen in the source file [telemetry_events.py](https://github.com/microsoft/qdk/tree/main/source/pip/qsharp/telemetry_events.py). +All telemetry events can be seen in the source file [telemetry_events.py](https://github.com/microsoft/qdk/tree/main/source/qdk_package/qdk/telemetry_events.py). To disable sending telemetry from this package, set the environment variable `QDK_PYTHON_TELEMETRY=none` diff --git a/source/qdk_package/PR_DESCRIPTION.md b/source/qdk_package/PR_DESCRIPTION.md index b11ef898ad..3296ffe177 100644 --- a/source/qdk_package/PR_DESCRIPTION.md +++ b/source/qdk_package/PR_DESCRIPTION.md @@ -126,6 +126,124 @@ Due to the large number of file moves, GitHub's diff may be hard to follow. Sugg - Widgets build successfully - `qsharp` shim wheel builds successfully +## `qdk` Package Structure + +``` +qdk_package/ +├── Cargo.toml +├── pyproject.toml +├── MANIFEST.in +├── README.md +├── test_requirements.txt +│ +├── src/ # Rust source for _native +│ └── *.rs +│ +├── qdk/ +│ ├── __init__.py # Package root; exposes common utilities +│ │ +│ ├── _native.pyd/.so # Built by maturin (module-name = "qdk._native") +│ ├── _types.py # Pure Python types (PauliNoise, StateDump, etc.) +│ ├── _interpreter.py # Interpreter lifecycle & operations +│ ├── _ipython.py # %%qsharp cell magic +│ ├── _http.py # fetch_github() +│ ├── _fs.py # File system callbacks +│ ├── _adaptive_pass.py +│ ├── _adaptive_bytecode.py +│ ├── telemetry.py +│ ├── telemetry_events.py +│ │ +│ ├── code/ +│ │ └── __init__.py # Dynamic Q# callables namespace +│ │ +│ ├── estimator/ +│ │ └── __init__.py +│ │ +│ ├── openqasm/ +│ │ └── __init__.py +│ │ +│ ├── qiskit/ # Lifted out of interop/ +│ │ ├── __init__.py +│ │ ├── backends/__init__.py +│ │ ├── passes/__init__.py +│ │ ├── jobs/__init__.py +│ │ └── execution/__init__.py +│ │ +│ ├── cirq/ # Lifted out of interop/ +│ │ └── __init__.py +│ │ +│ ├── _device/ +│ │ ├── __init__.py +│ │ ├── _device.py +│ │ └── _atom/ +│ │ └── __init__.py # NeutralAtomDevice +│ │ +│ ├── qre/ +│ │ ├── __init__.py +│ │ ├── application/__init__.py +│ │ ├── models/__init__.py +│ │ │ ├── qubits/__init__.py +│ │ │ ├── qec/__init__.py +│ │ │ └── factories/__init__.py +│ │ ├── interop/__init__.py +│ │ ├── property_keys.py +│ │ └── instruction_ids.py +│ │ +│ ├── applications/ +│ │ ├── __init__.py +│ │ └── magnets/ +│ │ ├── __init__.py +│ │ ├── utilities/ +│ │ ├── trotter/ +│ │ ├── models/ +│ │ └── geometry/ +│ │ +│ ├── qsharp.py # Re-exports full qsharp-like API from _types + _interpreter +│ │ +│ ├── simulation/ # Simulation facade package +│ │ ├── __init__.py # Public API: NeutralAtomDevice, NoiseConfig, run_qir, etc. +│ │ ├── _simulation.py # QIR simulation implementation (internal) +│ │ ├── _noisy_simulator.py # Private wrapper for noisy simulator types +│ │ └── _noisy_simulator.pyi # Type stubs +│ │ +│ ├── widgets.py # from qsharp_widgets import * (external) +│ │ +│ └── azure/ # Re-exports from azure.quantum +│ ├── __init__.py +│ ├── job.py +│ ├── qiskit.py +│ ├── cirq.py +│ ├── argument_types.py +│ └── target/ +│ ├── __init__.py +│ └── rigetti.py +│ +├── tests/ # Unit tests (run with --qdk) +│ ├── conftest.py +│ ├── test_qsharp.py +│ ├── test_interpreter.py +│ ├── test_re.py +│ ├── test_qasm.py +│ ├── ... (30+ test modules) +│ ├── reexports/ # Re-export verification tests +│ ├── qre/ # QRE-specific tests +│ └── applications/ # Application-specific tests +│ +└── tests-integration/ # Integration tests (run with --qdk --integration-tests) + ├── conftest.py + ├── utils.py + ├── test_adaptive_ri_qir.py + ├── test_adaptive_rif_qir.py + ├── test_adaptive_rifla_qir.py + ├── test_base_qir.py + ├── devices/ # Device integration tests + ├── interop_qiskit/ # Qiskit interop tests + ├── interop_cirq/ # Cirq interop tests + └── resources/ # Test resource files (QIR, etc.) +``` + +For a detailed breakdown of every public symbol exported by each `qdk` submodule, see [API_SURFACE.md](API_SURFACE.md). + ## Follow-up Work - **Move noise types to `qdk.simulation`**: The `PauliNoise`, `DepolarizingNoise`, `BitFlipNoise`, and `PhaseFlipNoise` classes currently live in `qdk._types` and are re-exported through `qdk.qsharp`. These are simulation concepts and should canonically live in `qdk.simulation`, with backward-compatible re-exports from `qdk.qsharp` and `qdk._types`. Deferred from this PR to avoid additional circular import complexity. diff --git a/source/qdk_package/README.md b/source/qdk_package/README.md index 59f13ad9b1..678e9dcc1b 100644 --- a/source/qdk_package/README.md +++ b/source/qdk_package/README.md @@ -28,7 +28,7 @@ For Qiskit integration, which exposes Qiskit interop utilities in the `qdk.qiski pip install "qdk[qiskit]" ``` -For Cirq integration, which exposes Cirq interop utilities in the `qdk.azure.cirq` submodule: +For Cirq integration, which exposes Cirq interop utilities in the `qdk.cirq` submodule: ```bash pip install "qdk[cirq]" @@ -70,13 +70,17 @@ Histogram(results) Submodules: -- `qdk.qsharp` – exports the same APIs as the `qsharp` Python package -- `qdk.openqasm` – exports the same APIs as the `openqasm` submodule of the `qsharp` Python package. -- `qdk.estimator` – exports the same APIs as the `estimator` submodule of the `qsharp` Python package. +- `qdk.qsharp` – Q# interpreter functions: `init`, `eval`, `run`, `compile`, `circuit`, `estimate`, and related types. +- `qdk.openqasm` – OpenQASM compilation and execution. +- `qdk.estimator` – resource estimation utilities. - `qdk.simulation` – noise-aware simulation utilities: `NeutralAtomDevice`, `NoiseConfig`, `run_qir`, `DensityMatrixSimulator`, `StateVectorSimulator`, and related types. -- `qdk.widgets` – exports the Jupyter widgets available from the `qsharp-widgets` Python package (requires the `qdk[jupyter]` extra to be installed). -- `qdk.azure` – exports the Python APIs available from the `azure-quantum` Python package (requires the `qdk[azure]` extra to be installed). -- `qdk.qiskit` – exports the same APIs as the `interop.qiskit` submodule of the `qsharp` Python package (requires the `qdk[qiskit]` extra to be installed). +- `qdk.code` – dynamic namespace populated at runtime with user-defined Q# and OpenQASM callables. +- `qdk.qre` – quantum resource estimation v3: `estimate`, `Application`, `Architecture`, `ISA`, `ISATransform`, and related types. +- `qdk.applications` – domain-specific quantum applications (e.g. `qdk.applications.magnets`). +- `qdk.widgets` – Jupyter widgets for visualization (requires the `qdk[jupyter]` extra). +- `qdk.azure` – Azure Quantum service integration (requires the `qdk[azure]` extra). +- `qdk.qiskit` – Qiskit interop: `QSharpBackend`, `NeutralAtomBackend`, and related types (requires the `qdk[qiskit]` extra). +- `qdk.cirq` – Cirq interop: `NeutralAtomSampler` (requires the `qdk[cirq]` extra). ### Top level exports @@ -101,114 +105,14 @@ For convenience, the following helpers and types are also importable directly fr ## Telemetry This library sends telemetry. Minimal anonymous data is collected to help measure feature usage and performance. -All telemetry events can be seen in the source file [telemetry_events.py](https://github.com/microsoft/qdk/tree/main/source/pip/qsharp/telemetry_events.py). +All telemetry events can be seen in the source file [telemetry_events.py](https://github.com/microsoft/qdk/tree/main/source/qdk_package/qdk/telemetry_events.py). -## Target Package Structure (Migration WIP) +To disable sending telemetry from this package, set the environment variable `QDK_PYTHON_TELEMETRY=none` -The `qsharp` package (pip/) is being deprecated. All implementation is moving into `qdk` (qdk_package/). The `qsharp` package will become a thin deprecation shim that depends on `qdk`. +## Support -``` -qdk_package/ -├── Cargo.toml -├── pyproject.toml -├── MANIFEST.in -├── README.md -├── test_requirements.txt -│ -├── src/ # Rust source for _native -│ └── *.rs -│ -├── qdk/ -│ ├── __init__.py # Same public API as today -│ │ -│ │── # ——— Moved from pip/qsharp/ (implementation modules) ——— -│ ├── _native.pyd/.so # Built by maturin (module-name = "qdk._native") -│ ├── _types.py # Pure Python types (PauliNoise, StateDump, etc.) -│ ├── _interpreter.py # Interpreter lifecycle & operations -│ ├── _ipython.py # %%qsharp cell magic -│ ├── _http.py # fetch_github() -│ ├── _fs.py # File system callbacks -│ ├── _adaptive_pass.py -│ ├── _adaptive_bytecode.py -│ ├── telemetry.py -│ ├── telemetry_events.py -│ │ -│ ├── code/ -│ │ └── __init__.py # Dynamic Q# callables namespace -│ │ -│ ├── estimator/ # Direct module — no re-export shim needed -│ │ └── __init__.py -│ │ -│ ├── openqasm/ # Direct module — no re-export shim needed -│ │ └── __init__.py -│ │ -│ │ -│ ├── qiskit/ # Lifted out of interop/ -│ │ ├── __init__.py # QSharpBackend, NeutralAtomBackend, etc. -│ │ ├── backends/__init__.py -│ │ ├── passes/__init__.py -│ │ ├── jobs/__init__.py -│ │ └── execution/__init__.py -│ │ -│ ├── cirq/ # Lifted out of interop/ -│ │ └── __init__.py # NeutralAtomSampler -│ │ -│ ├── _device/ -│ │ ├── __init__.py -│ │ └── _atom/ -│ │ └── __init__.py # NeutralAtomDevice -│ │ -│ ├── qre/ -│ │ ├── __init__.py -│ │ ├── application/__init__.py -│ │ ├── models/__init__.py -│ │ │ ├── qubits/__init__.py -│ │ │ ├── qec/__init__.py -│ │ │ └── factories/__init__.py -│ │ ├── interop/__init__.py -│ │ ├── property_keys.py # Merged with custom_property helpers -│ │ └── instruction_ids.py -│ │ -│ ├── applications/ -│ │ ├── __init__.py -│ │ └── magnets/ -│ │ ├── __init__.py -│ │ ├── utilities/__init__.py -│ │ ├── trotter/__init__.py -│ │ ├── models/__init__.py -│ │ └── geometry/__init__.py -│ │ -│ │── # ——— Re-export / facade modules ——— -│ ├── qsharp.py # Re-exports full qsharp-like API from _types + _interpreter -│ │ -│ ├── simulation/ # Simulation facade package -│ │ ├── __init__.py # Public API: NeutralAtomDevice, NoiseConfig, run_qir, etc. -│ │ ├── _simulation.py # QIR simulation implementation (internal) -│ │ ├── _noisy_simulator.py # Private wrapper for noisy simulator types -│ │ └── _noisy_simulator.pyi # Type stubs -│ │ -│ │── # ——— Unchanged ——— -│ ├── widgets.py # from qsharp_widgets import * (external) -│ │ -│ └── azure/ # Unchanged — re-exports from azure.quantum -│ ├── __init__.py -│ ├── job.py -│ ├── qiskit.py -│ ├── cirq.py -│ ├── argument_types.py -│ └── target/ -│ ├── __init__.py -│ └── rigetti.py -│ -└── tests/ - ├── conftest.py - ├── mocks.py - ├── test_reexports.py - ├── test_extras.py - ├── test_integration/ - │ ├── test_*.py - │ ├── utils.py - │ └── resources/ - └── benchmarks/ - └── bench_qre.py -``` +For more information about the Microsoft Quantum Development Kit, visit [https://aka.ms/qdk](https://aka.ms/qdk). + +## Contributing + +Q# welcomes your contributions! Visit the Q# GitHub repository at [https://github.com/microsoft/qdk] to find out more about the project. From 9df4562c8f5c32eb6e7f24b56d60fa586d57301f Mon Sep 17 00:00:00 2001 From: Scott Carda Date: Tue, 5 May 2026 10:14:18 -0700 Subject: [PATCH 25/25] removed temp PR description file --- source/qdk_package/PR_DESCRIPTION.md | 251 --------------------------- 1 file changed, 251 deletions(-) delete mode 100644 source/qdk_package/PR_DESCRIPTION.md diff --git a/source/qdk_package/PR_DESCRIPTION.md b/source/qdk_package/PR_DESCRIPTION.md deleted file mode 100644 index 3296ffe177..0000000000 --- a/source/qdk_package/PR_DESCRIPTION.md +++ /dev/null @@ -1,251 +0,0 @@ -# PR: Migrate `qsharp` pip package → `qdk` - -> **Delete this file before merging.** - -## Summary - -This PR migrates all Python source code, Rust native extension code, tests, and build infrastructure from the `qsharp` pip package (`source/pip/`) into the `qdk` package (`source/qdk_package/`). After this change: - -- **`qdk`** is the primary Python package containing all source code and the native Rust extension. -- **`qsharp`** becomes a thin, pure-Python deprecation shim that re-exports from the `qdk` package. - -## Motivation - -The repo is being rebranded from `qsharp` to `qdk`. Rather than maintaining two packages with real code, all functionality consolidates into `qdk`, and `qsharp` exists solely for backward compatibility during the transition period. - ---- - -## What Changed - -### 1. Rust native extension moved (`source/pip/src/` → `source/qdk_package/src/`) - -The pyo3 native module (`_native`) and all its Rust source files (interpreter, QIR simulation, noisy simulator bindings, resource estimator, etc.) were moved from `source/pip/` into `source/qdk_package/`. The `Cargo.toml` at `source/qdk_package/` now defines the `qdk` crate (previously `qsharp`), and the root `Cargo.toml` workspace member was updated accordingly. - -**Files moved (all "pure" renames):** -- `src/lib.rs`, `src/interpreter.rs`, `src/qir_simulation.rs`, `src/noisy_simulator.rs`, `src/qre.rs`, `src/fs.rs`, `src/interop.rs`, `src/generic_estimator/`, `src/displayable_output/`, `src/state_*_template.html`, and all sub-modules. - -### 2. Python source reorganized (`source/pip/qsharp/` → `source/qdk_package/qdk/`) - -All Python modules were moved from `qsharp.*` to `qdk.*` with import paths updated throughout. - -**Key structural changes:** - -| Before (`qsharp.*`) | After (`qdk.*`) | Notes | -|---|---|---| -| `qsharp/__init__.py` | `qdk/__init__.py` | New minimal root; exposes common utilities | -| `qsharp/_qsharp.py` | `qdk/_interpreter.py` + `qdk/_types.py` | Split: interpreter functions vs. type definitions | -| `qsharp/_simulation.py` | `qdk/simulation/_simulation.py` | Moved into `simulation/` subpackage | -| `qsharp/noisy_simulator/` | `qdk/simulation/_noisy_simulator.py` | Absorbed into `simulation/` subpackage | -| `qsharp/interop/qiskit/` | `qdk/qiskit/` | Promoted from nested `interop` to top-level subpackage | -| `qsharp/interop/cirq/` | `qdk/cirq/` | Promoted from nested `interop` to top-level subpackage | -| `qsharp/utils/_utils.py` | *(deleted)* | `dump_operation` moved into `_interpreter.py` | -| `qsharp/estimator/` | `qdk/estimator/` | Direct move, imports updated | -| `qsharp/openqasm/` | `qdk/openqasm/` | Direct move, imports updated | -| `qsharp/code/` | `qdk/code/` | Direct move | -| `qsharp/applications/` | `qdk/applications/` | Direct move | -| `qsharp/qre/` | `qdk/qre/` | Direct move, imports updated | -| `qsharp/_device/` | `qdk/_device/` | Direct move, circular import fixed (see below) | - -**New `qdk` public API surface:** -- `qdk.qsharp` — Q# interpreter functions (`init`, `eval`, `run`, `compile`, `circuit`, `estimate`, etc.) -- `qdk.simulation` — Simulation APIs (`NeutralAtomDevice`, `NoiseConfig`, noisy simulator types) -- `qdk.qiskit` — Qiskit interop (`QSharpBackend`, `NeutralAtomBackend`, etc.) -- `qdk.cirq` — Cirq interop -- `qdk.estimator` — Resource estimator -- `qdk.openqasm` — OpenQASM compilation/execution -- `qdk.code` — Code analysis -- `qdk.applications` — Domain applications (magnets, etc.) -- `qdk.qre` — QRE v3 - -### 3. `qsharp` package converted to deprecation shim (`source/pip/`) - -`source/pip/qsharp/__init__.py` now: -1. Emits a `DeprecationWarning` on import. -2. Re-exports the full public API from `qdk._types`, `qdk._interpreter`, and `qdk._native`. -3. Registers IPython magics from `qdk._ipython`. - -All other Python files under `source/pip/qsharp/` are now thin re-export wrappers that import from their `qdk.*` counterparts. The package metadata in `source/pip/pyproject.toml` declares `dependencies = ["qdk==0.0.0"]` (version stamped at CI time). - -### 4. Tests moved - -- **Unit tests** remain at `source/qdk_package/tests/` — import paths updated from `qsharp.*` to `qdk.*`. -- **Integration tests** moved from `source/pip/tests-integration/` to `source/qdk_package/tests-integration/` — all imports updated to use `qdk.*`. - -### 5. Build script changes (`build.py`) - -- **`--qdk` flag**: Builds the maturin wheel (Rust + Python) and runs unit tests. Also runs integration tests when `--integration-tests` is passed. -- **`--pip` flag**: Builds only the pure-Python `qsharp` shim wheel via setuptools. No longer runs any tests (integration tests moved to `--qdk`). -- **Removed `install_qsharp_python_package()`**: No longer needed since integration tests don't depend on the `qsharp` shim. -- Install commands use `--no-deps --no-index` to install from local wheels without reaching PyPI. - -### 6. CI/CD pipeline changes - -**GitHub Actions (`ci.yml`)** — No changes needed. The `integration-tests` job already passed both `--qdk` and `--integration-tests`. - -**Azure DevOps (`publish.yml`)** — Restructured for clean platform split: - -| Job | Before | After | -|---|---|---| -| `Platform_Agnostic_Python` | `--jupyterlab --widgets --qdk` | `--jupyterlab --widgets --pip` | -| Per-platform matrix | `--pip --integration-tests` | `--qdk --integration-tests` | - -This restructuring reflects the fact that `qdk` is now the platform-specific package (it contains the native Rust extension), while `qsharp` is now platform-agnostic (pure-Python shim). Previously it was the reverse: `qsharp` held the native code and `qdk` was a pure-Python meta-package. Each of the 6 OS/arch combinations now builds its own native `qdk` wheel, while the platform-agnostic wheels (`qsharp`, widgets, jupyterlab) are built once. - -### 7. Circular import fix (`_device._atom` ↔ `simulation`) - -`simulation/__init__.py` imports `NeutralAtomDevice` from `_device._atom`, while `_device._atom` needs `NoiseConfig` and `run_qir_*` from `simulation._simulation`. This was resolved by: -- Using `from __future__ import annotations` in `_device/_atom/__init__.py` -- Guarding `NoiseConfig` import behind `TYPE_CHECKING` -- Deferring runtime imports of `run_qir_*` into the `simulate()` method body - -### 8. Miscellaneous - -- **`.prettierignore`**: Added `source/qdk_package/src/**/*.html` and `source/qdk_package/tests-integration/**/*.inc` (these files moved from `source/pip/` which was fully ignored). -- **`.github/CODEOWNERS`**: Updated paths from `source/pip/` to `source/qdk_package/`. -- **`.github/copilot-instructions.md`**: Updated architecture documentation. -- **`Cargo.toml` (root)**: Updated workspace member from `source/pip` to `source/qdk_package`. -- **Sample notebooks**: Updated `import qsharp` to `import qdk` / `from qdk import qsharp`. - ---- - -## How to Review - -Due to the large number of file moves, GitHub's diff may be hard to follow. Suggested approach: - -1. **Start with `build.py`** — understand the new build flow (`--qdk` builds native + runs tests, `--pip` just builds the shim). -2. **Read `source/qdk_package/qdk/__init__.py`** — see what the new `qdk` root exposes. -3. **Read `source/pip/qsharp/__init__.py`** — see the deprecation shim pattern. -4. **Skim `source/qdk_package/qdk/simulation/__init__.py`** and `source/qdk_package/qdk/_device/_atom/__init__.py` — these have the circular import fix. -5. **Check `.ado/publish.yml`** — verify the platform-agnostic vs. per-platform split. -6. **The rest is mostly mechanical** — file renames and `s/qsharp/qdk/g` import updates. GitHub should detect most as renames (95%+ similarity). - -## Testing - -- 1,248 unit tests pass (`source/qdk_package/tests/`) -- 338 integration tests pass per Qiskit version (`source/qdk_package/tests-integration/`), run against both Qiskit v1 (`>=1.3,<2`) and v2 (`>=2,<3`) -- Widgets build successfully -- `qsharp` shim wheel builds successfully - -## `qdk` Package Structure - -``` -qdk_package/ -├── Cargo.toml -├── pyproject.toml -├── MANIFEST.in -├── README.md -├── test_requirements.txt -│ -├── src/ # Rust source for _native -│ └── *.rs -│ -├── qdk/ -│ ├── __init__.py # Package root; exposes common utilities -│ │ -│ ├── _native.pyd/.so # Built by maturin (module-name = "qdk._native") -│ ├── _types.py # Pure Python types (PauliNoise, StateDump, etc.) -│ ├── _interpreter.py # Interpreter lifecycle & operations -│ ├── _ipython.py # %%qsharp cell magic -│ ├── _http.py # fetch_github() -│ ├── _fs.py # File system callbacks -│ ├── _adaptive_pass.py -│ ├── _adaptive_bytecode.py -│ ├── telemetry.py -│ ├── telemetry_events.py -│ │ -│ ├── code/ -│ │ └── __init__.py # Dynamic Q# callables namespace -│ │ -│ ├── estimator/ -│ │ └── __init__.py -│ │ -│ ├── openqasm/ -│ │ └── __init__.py -│ │ -│ ├── qiskit/ # Lifted out of interop/ -│ │ ├── __init__.py -│ │ ├── backends/__init__.py -│ │ ├── passes/__init__.py -│ │ ├── jobs/__init__.py -│ │ └── execution/__init__.py -│ │ -│ ├── cirq/ # Lifted out of interop/ -│ │ └── __init__.py -│ │ -│ ├── _device/ -│ │ ├── __init__.py -│ │ ├── _device.py -│ │ └── _atom/ -│ │ └── __init__.py # NeutralAtomDevice -│ │ -│ ├── qre/ -│ │ ├── __init__.py -│ │ ├── application/__init__.py -│ │ ├── models/__init__.py -│ │ │ ├── qubits/__init__.py -│ │ │ ├── qec/__init__.py -│ │ │ └── factories/__init__.py -│ │ ├── interop/__init__.py -│ │ ├── property_keys.py -│ │ └── instruction_ids.py -│ │ -│ ├── applications/ -│ │ ├── __init__.py -│ │ └── magnets/ -│ │ ├── __init__.py -│ │ ├── utilities/ -│ │ ├── trotter/ -│ │ ├── models/ -│ │ └── geometry/ -│ │ -│ ├── qsharp.py # Re-exports full qsharp-like API from _types + _interpreter -│ │ -│ ├── simulation/ # Simulation facade package -│ │ ├── __init__.py # Public API: NeutralAtomDevice, NoiseConfig, run_qir, etc. -│ │ ├── _simulation.py # QIR simulation implementation (internal) -│ │ ├── _noisy_simulator.py # Private wrapper for noisy simulator types -│ │ └── _noisy_simulator.pyi # Type stubs -│ │ -│ ├── widgets.py # from qsharp_widgets import * (external) -│ │ -│ └── azure/ # Re-exports from azure.quantum -│ ├── __init__.py -│ ├── job.py -│ ├── qiskit.py -│ ├── cirq.py -│ ├── argument_types.py -│ └── target/ -│ ├── __init__.py -│ └── rigetti.py -│ -├── tests/ # Unit tests (run with --qdk) -│ ├── conftest.py -│ ├── test_qsharp.py -│ ├── test_interpreter.py -│ ├── test_re.py -│ ├── test_qasm.py -│ ├── ... (30+ test modules) -│ ├── reexports/ # Re-export verification tests -│ ├── qre/ # QRE-specific tests -│ └── applications/ # Application-specific tests -│ -└── tests-integration/ # Integration tests (run with --qdk --integration-tests) - ├── conftest.py - ├── utils.py - ├── test_adaptive_ri_qir.py - ├── test_adaptive_rif_qir.py - ├── test_adaptive_rifla_qir.py - ├── test_base_qir.py - ├── devices/ # Device integration tests - ├── interop_qiskit/ # Qiskit interop tests - ├── interop_cirq/ # Cirq interop tests - └── resources/ # Test resource files (QIR, etc.) -``` - -For a detailed breakdown of every public symbol exported by each `qdk` submodule, see [API_SURFACE.md](API_SURFACE.md). - -## Follow-up Work - -- **Move noise types to `qdk.simulation`**: The `PauliNoise`, `DepolarizingNoise`, `BitFlipNoise`, and `PhaseFlipNoise` classes currently live in `qdk._types` and are re-exported through `qdk.qsharp`. These are simulation concepts and should canonically live in `qdk.simulation`, with backward-compatible re-exports from `qdk.qsharp` and `qdk._types`. Deferred from this PR to avoid additional circular import complexity. -- **`NoiseConfig` in `qdk.qsharp`**: Similarly, `NoiseConfig` (from `_native`) is re-exported in `qdk.qsharp.__all__` but semantically belongs in `qdk.simulation` (where it's already exported). The `qdk.qsharp` re-export should be removed in a follow-up. -- **Audit and rewrite docstrings**: Module and function docstrings throughout the package still reference the old `qsharp` import paths and naming conventions. These need to be audited and updated to reflect the new `qdk.*` namespace for accurate generated documentation.