diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..7df1de1 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +installer/startup-data-loader/data/*.csv binary diff --git a/.github/workflows/code-quality.yml b/.github/workflows/code-quality.yml new file mode 100644 index 0000000..478d8cc --- /dev/null +++ b/.github/workflows/code-quality.yml @@ -0,0 +1,48 @@ +name: Code Quality Checks + +on: + push: + branches: [ main, deploy ] + pull_request: + branches: [ main, deploy ] + +jobs: + lint-docker: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install Hadolint + run: | + sudo wget -O /usr/local/bin/hadolint https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 + sudo chmod +x /usr/local/bin/hadolint + + - name: Lint Dockerfiles + run: | + find . -name "Dockerfile" -exec hadolint {} \; + + check-dependencies: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Check for security vulnerabilities in Python dependencies + run: | + find . -name "requirements.txt" -exec safety check --file {} \; || true + + validate-shell-scripts: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install ShellCheck + run: | + sudo apt-get update + sudo apt-get install -y shellcheck + + - name: Check shell scripts + run: | + find . -name "*.sh" -exec shellcheck {} \; diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 0000000..01846ae --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,35 @@ +name: Deploy to Staging + +on: + workflow_dispatch: + inputs: + environment: + description: 'Environment to deploy to' + required: true + default: 'staging' + type: choice + options: + - staging + - production + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + deploy: + runs-on: ubuntu-latest + environment: ${{ inputs.environment }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Deploy to ${{ inputs.environment }} + run: | + echo "Deploying to ${{ inputs.environment }} environment" + # Add your deployment commands here + # For example: + # - SSH to your server + # - Pull latest images + # - Run docker compose up + # - Run health checks diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml new file mode 100644 index 0000000..bd576a4 --- /dev/null +++ b/.github/workflows/docker-build.yml @@ -0,0 +1,134 @@ +name: Build and Test DAQ System + +on: + push: + branches: [ deploy ] + pull_request: + branches: [ main, deploy ] + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + validate-compose: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare environment file for CI + run: | + if [ ! -f installer/.env ]; then + cp installer/.env.example installer/.env + echo "Created .env file from .env.example" + fi + + - name: Validate docker-compose files + run: | + cd installer + docker compose config + docker compose -f docker-compose.yml config + + build-and-push: + runs-on: ubuntu-latest + needs: validate-compose + permissions: + contents: read + packages: write + + strategy: + matrix: + service: [slackbot, lap-detector, startup-data-loader, file-uploader] + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Convert repository name to lowercase + run: | + echo "IMAGE_NAME_LOWER=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_ENV + + - name: Log in to Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata + id: meta + run: | + echo "tags<> $GITHUB_OUTPUT + echo "${{ env.REGISTRY }}/${IMAGE_NAME_LOWER}/${{ matrix.service }}:latest" >> $GITHUB_OUTPUT + echo "${{ env.REGISTRY }}/${IMAGE_NAME_LOWER}/${{ matrix.service }}:${{ github.sha }}" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: ./installer/${{ matrix.service }} + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: | + org.opencontainers.image.title=${{ github.event.repository.name }} + org.opencontainers.image.description=WFR DAQ System - ${{ matrix.service }} + org.opencontainers.image.url=${{ github.event.repository.html_url }} + org.opencontainers.image.source=${{ github.event.repository.html_url }} + org.opencontainers.image.version=${{ github.sha }} + org.opencontainers.image.created=${{ steps.meta.outputs.created }} + org.opencontainers.image.revision=${{ github.sha }} + + test-compose: + runs-on: ubuntu-latest + needs: build-and-push + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Create .env file for testing + run: | + cd installer + cp .env.example .env + + - name: Pull pre-built images + run: | + docker pull influxdb:3.5.0-core + docker pull influxdata/influxdb3-ui:1.3.0 + docker pull grafana/grafana + docker pull telegraf:1.30 + docker pull nginx:alpine + + - name: Build test images + run: | + cd installer + docker compose build --parallel + + - name: Validate compose configuration + run: | + cd installer + docker compose config --quiet + + - name: Test container startup (quick smoke test) + run: | + cd installer + # Start core monitoring stack to ensure compose wiring is valid + timeout 60s docker compose up influxdb3 influxdb3-explorer grafana frontend || true + # Check if containers started (even if they exit quickly) + docker compose ps + + cleanup: + runs-on: ubuntu-latest + needs: [validate-compose, build-and-push, test-compose] + if: always() + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Clean up Docker resources + run: | + docker system prune -f + docker image prune -f diff --git a/.github/workflows/sandbox-integration.yml b/.github/workflows/sandbox-integration.yml new file mode 100644 index 0000000..35cf010 --- /dev/null +++ b/.github/workflows/sandbox-integration.yml @@ -0,0 +1,32 @@ +name: Sandbox Integration Test + +on: + push: + branches: [main, deploy] + pull_request: + branches: [main, deploy] + workflow_dispatch: + +jobs: + slicks-sensor-check: + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install slicks + run: pip install slicks + + - name: Discover sensors for June 2025 + env: + INFLUX_URL: ${{ secrets.INFLUX_URL }} + INFLUX_TOKEN: ${{ secrets.INFLUX_TOKEN }} + INFLUX_DB: ${{ secrets.INFLUX_DB }} + run: python dev-utils/ci/slicks-sensor-check.py diff --git a/.github/workflows/stack-smoke-test.yml b/.github/workflows/stack-smoke-test.yml new file mode 100644 index 0000000..2dee02d --- /dev/null +++ b/.github/workflows/stack-smoke-test.yml @@ -0,0 +1,45 @@ +name: Installer Stack Smoke Test + +on: + push: + branches: [ main, deploy ] + pull_request: + branches: [ main, deploy ] + workflow_dispatch: + +jobs: + stack-smoke-test: + runs-on: ubuntu-latest + timeout-minutes: 45 + permissions: + contents: read + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Show Docker versions + run: | + docker version + docker compose version + + - name: Ensure CI script is executable + run: chmod +x ./dev-utils/ci/stack-smoke-test.sh + + - name: Prepare environment file for CI + run: | + if [ ! -f installer/.env ]; then + cp installer/.env.example installer/.env + echo "Created .env file from .env.example" + fi + ls -la installer/.env + + - name: Run installer smoke test + env: + CI: "true" + DOCKER_BUILDKIT: "1" + COMPOSE_DOCKER_CLI_BUILD: "1" + run: ./dev-utils/ci/stack-smoke-test.sh \ No newline at end of file diff --git a/.gitignore b/.gitignore index dda7584..4fa4e92 100644 --- a/.gitignore +++ b/.gitignore @@ -175,3 +175,43 @@ cython_debug/ .DS_Store .idea +car-to-influx/testing_data/cleaned_can.csv + +/.vscode + +# Ignore everything inside the data folder +installer/startup-data-loader/data/* + +# keep README.md +!installer/startup-data-loader/data/README.md + +# Keep only the specific CSVs (example files) +!installer/startup-data-loader/data/2025-01-01-00-00-00.csv +!installer/startup-data-loader/data/2025-01-01-00-07-00.csv +!installer/startup-data-loader/data/2025-01-02-00-05-00.csv +!installer/startup-data-loader/data/2025-01-03-00-13-00.csv + +!/installer/startup-data-loader/data/README.md +installer/startup-data-loader/can_metrics.out +installer/startup-data-loader/load_data_progress.json +installer/data-downloader/data/scanner_status.json +installer/data-downloader/data/runs.json +installer/data-downloader/data/sensors.json + +installer/*.dbc +# Keep example.dbc +!installer/example.dbc + +installer/slackbot/logs/* + +installer/sandbox/prompt-guide.txt + +wfr-telemetry +/installer/data-downloader/data +installer/slackbot/*.png +!installer/slackbot/lappy_test_image.png +installer/slackbot/*.jpg +installer/slackbot/*.jpeg + +# Generated CSV data files +generated-days/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..be3f7b2 --- /dev/null +++ b/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/README.md b/README.md index 33adade..4b0086b 100644 --- a/README.md +++ b/README.md @@ -1,66 +1,175 @@ -This repo is for the helper functions on the server +WFR-DAQ-Banner + +# Western Formula Racing DAQ Components + +This repository hosts the Docker-based telemetry stack that powers Western Formula Racing’s data acquisition (DAQ) pipeline. It is designed to be publicly shareable: all runtime credentials live in `.env` files, sample datasets are anonymised, and every container is documented for easy onboarding. + +## Repository layout + +| Path | Description | +| --- | --- | +| `installer/` | Docker Compose deployment, container sources, and environment templates. | +| `docs/` | Public-facing documentation for each service and the compose stack. | +| `dev-utils` | Development utility scripts (not for production use). | +## Quick start + +1. Install Docker Desktop (macOS/Windows) or Docker Engine + Compose V2 (Linux). +2. Navigate to the installer and copy the environment template: + ```bash + cd installer + cp .env.example .env + # Update values before deploying outside of local development + ``` +3. Launch the stack: + ```bash + docker compose up -d + ``` +4. Visit the services: + - InfluxDB 3 Explorer – http://localhost:8888 + - Grafana – http://localhost:8087 + - Drag and Drop CSV File uploader – http://localhost:8084 + - Data Downloader - http://localhost:3000 + +All services share a bridge network named `datalink` and rely on the admin token supplied through `.env`. + +## System architecture + +```mermaid +graph TB + subgraph Ingestion["Data Ingestion"] + CSV["CSV Log Files"] + DBC["DBC File
CAN Signal Definitions"] + SDL["Startup Data Loader
Bulk loader on boot"] + FU["File Uploader
Drag & drop web UI :8084"] + end + + subgraph Radio["daq-radio repo (external)"] + CAR["Car ECU, Raspberry Pi
CAN → radio transmitter"] + BASE["Base Station
UDP/TCP receiver"] + end + + subgraph Storage["Time-Series Storage"] + INFLUX["InfluxDB 3
Core database :9000"] + end + + subgraph Visualization["Visualization & Exploration"] + EXPLORER["InfluxDB 3 Explorer
Query browser :8888"] + GRAFANA["Grafana
Dashboards :8087"] + end + + subgraph DataExport["Data Downloader :3000"] + DD_FE["Frontend
Vite + TypeScript SPA"] + DD_API["Backend API
FastAPI :8000"] + DD_SCAN["Periodic Scanner
Discovers runs & signals"] + end + + subgraph AI["AI Analysis Pipeline"] + SLACK["Slackbot
Lappy — Socket Mode"] + CODEGEN["Code Generator
Cohere LLM :3030"] + SANDBOX["Sandbox
Isolated Python runner"] + end + + subgraph Tracking["Future: Track Analysis"] + LAP["Lap Detector
Dash app :8050
Planned — requires GPS hardware
"] + end + + CSV --> SDL + CSV --> FU + DBC -.->|cantools decode| SDL + DBC -.->|cantools decode| FU + SDL -->|Write API| INFLUX + FU -->|Write API| INFLUX + + CAR -.->|UDP/TCP| BASE + BASE -.->|Write API| INFLUX + + INFLUX --> EXPLORER + INFLUX --> GRAFANA + + DD_FE -->|REST| DD_API + DD_API -->|SQL queries| INFLUX + DD_SCAN -->|Discover data| INFLUX + DD_SCAN -->|Update metadata| DD_API + + SLACK -->|"!agent prompt"| CODEGEN + CODEGEN -->|Generated Python| SANDBOX + SANDBOX -->|Query via env creds| INFLUX + SANDBOX -->|stdout + images| CODEGEN + CODEGEN -->|Results| SLACK + + SLACK -->|"!location"| LAP + + style Ingestion fill:#e8f5e9,stroke:#2e7d32,color:#000 + style Storage fill:#e3f2fd,stroke:#1565c0,color:#000 + style Visualization fill:#fff3e0,stroke:#e65100,color:#000 + style DataExport fill:#f3e5f5,stroke:#6a1b9a,color:#000 + style AI fill:#fce4ec,stroke:#b71c1c,color:#000 + style Tracking fill:#f5f5f5,stroke:#9e9e9e,color:#000,stroke-dasharray: 5 5 + style Radio fill:#f5f5f5,stroke:#9e9e9e,color:#000,stroke-dasharray: 5 5 +``` -## Port +## System overview -lappy-server: 8050 +The compose stack deploys eight cooperating containers: -car-to-influx: 8085 +1. **InfluxDB 3** – Time-series database seeded with a tiny example dataset. +2. **InfluxDB 3 Explorer** – Web UI for browsing and querying telemetry. +3. **Grafana** – Pre-provisioned dashboards that visualise the stored telemetry. Load your own dashboard provisioning files into `installer/grafana/dashboards/`. +4. **Sandbox** - *Under active development.* Connecting InfluxDB3 with LLM for natural language queries and analysis. +5. **Slack bot d.b.a. Lappy** – Optional automation/notification bot for race ops. +6. **Lap analysis app** – *Under active development.* Dash-based location data visualiser and lap timer. (Useful if GPS data is available.) +7. **Startup data loader** – Seeds the database on boot with sample CAN frames. +8. **File uploader** – Streams uploaded CSV logs into InfluxDB using the shared DBC file. +9. **Data downloader** - Scans InfluxDB periodically, visual SQL query builder, and CSV export service. -InfluxDB: 8086 +Detailed documentation for each service is available in `docs/containers/`. -MangoDB: 3000 (not in this repo) +## Sample data & DBC files -## No Port Assigned +The repository ships with `example.dbc` (a minimal CAN database) and a sample dataset (`2025-01-01-00-00-00.csv`) containing four rows of synthetic telemetry. Replace both assets with production data when working with real vehicles. -Slackbot +## Working with environment variables +Every container reads its credentials from the `.env` file co-located with `docker-compose.yml`. Refer to `installer/.env.example` for the exhaustive list. Never commit real tokens—keep personal overrides in `.env` and add `.env` to your global gitignore. +## Documentation index -## Car-to-influx +- [Compose stack reference](docs/docker-compose.md) +- [Container documentation](docs/containers/) +- [Grafana dashboards](installer/grafana/) +- [Startup data loader](installer/startup-data-loader/README.md) -Car to influx listeners for CAN frames from the car, and load it into Influx DB +## Contributing -## Server Endpoint +TBD -The server exposes a single HTTP endpoint for ingesting CAN messages: +## Hardware Dependencies +https://github.com/Western-Formula-Racing/ECU_25 -``` -POST http://3.98.181.12:8085/can -``` -### Single Message +## Acknowledgements +This project was developed in 2024 and maintained by the Western Formula Racing Data Acquisition team, inspired by the team's prior work on telemetry systems for our Formula SAE vehicles. +https://github.com/Western-Formula-Racing/RaspberryPi-CAN-DAQ-MVP +https://github.com/Western-Formula-Racing/daq-2023 -json -```json -{ - "messages": [ - { - "id": "0x1A3", // CAN ID as string (hex or decimal) - "data": [10, 20, 30, 40, 50, 60, 70, 80], // Data bytes as array of integers - "timestamp": 1648123456.789 // Unix timestamp in seconds - } - ] -} -``` +We also want to acknowledge the open-source tools and libraries that make this project possible. Key components include: +* Docker / Docker Compose for containerisation +* InfluxDB 3 for time-series storage +* Grafana for visualisation +* Python open-source packages (NumPy, Pandas, Requests, etc.) used throughout the stack + +### Explore more work from Western Formula Racing + +If you’re interested in our team’s broader engineering projects, here are some of the hardware systems developed alongside this DAQ stack: +* https://github.com/Western-Formula-Racing/ECU_25 +* https://github.com/Western-Formula-Racing/Custom-BMS_25 +* https://github.com/Western-Formula-Racing/mobo-25 + +## Under Active Development +1. Slack bot improvements + sandbox +2. Lap analysis app + -### Multiple Messages - -json - -```json -{ - "messages": [ - { - "id": "0x1A3", - "data": [10, 20, 30, 40, 50, 60, 70, 80], - "timestamp": 1648123456.789 - }, - { - "id": "26", // Decimal ID also accepted - "data": [1, 2, 3, 4, 5, 6, 7, 8], - "timestamp": 1648123456.790 - } - ] -} -``` \ No newline at end of file +## License +AGPL-3.0 License. See LICENSE file for details. \ No newline at end of file diff --git a/car-to-influx/Dockerfile b/car-to-influx/Dockerfile deleted file mode 100644 index 0456687..0000000 --- a/car-to-influx/Dockerfile +++ /dev/null @@ -1,30 +0,0 @@ -# Dockerfile for car-to-influx - -# 1. Base image -FROM python:3.9-slim - -# 2. Set working directory -WORKDIR /app - -# 3. Install build tools (for cantools) and clean up apt cache -RUN apt-get update \ - && apt-get install -y --no-install-recommends gcc \ - && rm -rf /var/lib/apt/lists/* - -# 4. Install Python dependencies -# If you have a requirements.txt, copy & install it; otherwise install explicitly: -# COPY requirements.txt . -# RUN pip install --no-cache-dir -r requirements.txt -RUN pip install --no-cache-dir \ - flask \ - influxdb-client \ - cantools - -# 5. Copy application code -COPY . . - -# 6. Expose the port your Flask app listens on -EXPOSE 8085 - -# 7. Default command -CMD ["python", "listener.py"] \ No newline at end of file diff --git a/car-to-influx/initialbuild.md b/car-to-influx/initialbuild.md deleted file mode 100644 index 6b938ca..0000000 --- a/car-to-influx/initialbuild.md +++ /dev/null @@ -1,7 +0,0 @@ -docker run -d \ - --name car-to-influx \ - --restart always \ - -v /home/ubuntu/car-to-influx:/app \ - -p 8085:8085 \ - --cpu-shares 4096 \ - car-to-influx \ No newline at end of file diff --git a/car-to-influx/listener.py b/car-to-influx/listener.py deleted file mode 100644 index 8c8f056..0000000 --- a/car-to-influx/listener.py +++ /dev/null @@ -1,148 +0,0 @@ -# on the server -from flask import Flask, request, jsonify -from influxdb_client import InfluxDBClient, Point -from influxdb_client.client.write_api import WriteOptions -from datetime import datetime, timezone, timedelta -import cantools, os, logging - -# ─── CONFIG ──────────────────────────────────────────────────────────────── -INFLUX_URL = os.getenv("INFLUX_URL", "http://influxwfr:8086") -INFLUX_TOKEN = os.getenv("INFLUX_TOKEN", "s9XkBC7pKOlb92-N9M40qilmxxoBe4wrnki4zpS_o0QSVTuMSQRQBerQB9Zv0YV40tmYayuX3w4G2MNizdy3qw==") -INFLUX_ORG = os.getenv("INFLUX_ORG", "WFR") -INFLUX_BUCKET = os.getenv("INFLUX_BUCKET", "ourCar") -DBC_FILE = os.getenv("DBC_FILE", "testing_data/20240129 Gen5 CAN DB.dbc") -PORT = int(os.getenv("PORT", "8085")) -# ──────────────────────────────────────────────────────────────────────────── - -# Load DBC at startup -try: - db = cantools.database.load_file(DBC_FILE) - print(f"Loaded DBC: {DBC_FILE}") -except Exception as e: - raise SystemExit(f"Failed to load DBC file: {e}") - -# Prepare Influx client + write_api -client = InfluxDBClient(url=INFLUX_URL, token=INFLUX_TOKEN, org=INFLUX_ORG) -write_api = client.write_api(write_options=WriteOptions(batch_size=500, flush_interval=1000)) - -app = Flask(__name__) - -# —— Optional: log to file as well as stderr —————————————— -file_handler = logging.FileHandler("listener.log") -file_handler.setLevel(logging.INFO) -file_handler.setFormatter(logging.Formatter( - "%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S")) -app.logger.addHandler(file_handler) -app.logger.setLevel(logging.ERROR) - - -def _bytes_from_field(data_field): - """Convert incoming 'data' (list[str|int] or str) into a bytes object.""" - if isinstance(data_field, list): - return bytes(int(b) & 0xFF for b in data_field) - if isinstance(data_field, str): - return bytes(int(b, 16 if b.lower().startswith("0x") else 10) & 0xFF for b in data_field.split()) - raise ValueError(f"Unrecognized data format: {data_field!r}") - - -def _ts_to_datetime(ts: float) -> datetime: - """Convert the incoming numeric timestamp to an aware UTC datetime. - - If the sender gives absolute Unix‑epoch seconds, use them directly. - If the value looks like a small relative timestamp (e.g. < year 2000), - map it to *now* minus that relative offset so points show up in recent dashboards. - """ - if ts > 946_684_800: # 2000‑01‑01 in epoch seconds - return datetime.fromtimestamp(ts, timezone.utc) - # Treat as relative seconds since log start — anchor to now. - return datetime.now(timezone.utc) - timedelta(seconds=(max(0.0, ts))) - - -@app.route("/can", methods=["POST"]) -def ingest_can(): - """Ingest JSON CAN frames, decode with DBC, and write to InfluxDB.""" - try: - payload = request.get_json(force=True) - except Exception as e: - return jsonify(error=f"Invalid JSON: {e}"), 400 - - # Accept top‑level list or object with "messages" - frames = payload.get("messages") if isinstance(payload, dict) else payload - if not isinstance(frames, list): - return jsonify(error="Expected JSON array or object with 'messages' list"), 400 - - app.logger.info(f"Received {len(frames)} frames") - points = [] - - for idx, frame in enumerate(frames): - try: - can_id = int(frame["id"], 0) # handles "0x1A" or "26" - data = _bytes_from_field(frame["data"]) - ts_raw = float(frame["timestamp"]) - ts_dt = _ts_to_datetime(ts_raw) - msg = db.get_message_by_frame_id(can_id) - except (KeyError, ValueError) as e: - app.logger.warning(f"Frame #{idx}: malformed or missing field → {e}") - continue - except Exception as e: - app.logger.warning(f"Frame #{idx}: DBC error → {e}") - continue - - try: - decoded = msg.decode(data) - except Exception as e: - app.logger.warning(f"Frame #{idx}: decode error → {e}") - continue - - for signal_name, value in decoded.items(): - try: - signal = msg.get_signal_by_name(signal_name) - description = signal.comment or "" - unit = signal.unit or "" - except Exception: - description = "" - unit = "" - - if hasattr(value, "value"): - sensor_val = float(value.value) - signal_label = value.name - else: - sensor_val = float(value) - signal_label = str(value) - - pt = ( - Point("canBus") - .tag("messageName", msg.name) - .tag("signalName", signal_name) - .tag("rawCAN", format(can_id, "#x")) - .field("sensorReading", sensor_val) - .field("unit", unit) - .field("description", description) - .field("signalLabel", signal_label) - .time(ts_dt) - ) - points.append(pt) - - if not points: - app.logger.info("No points decoded – nothing to write.") - return jsonify(status="no_points"), 200 - - try: - # log raw line-protocol for each Point - for pt in points: - app.logger.info("LP: " + pt.to_line_protocol()) - # also log the complete batch payload as a single string - full_payload = "\n".join(pt.to_line_protocol() for pt in points) - app.logger.info("Full InfluxDB payload:\n%s", full_payload) - # write to influx - write_api.write(bucket=INFLUX_BUCKET, record=points) - app.logger.info(f"Wrote {len(points)} points to InfluxDB bucket '{INFLUX_BUCKET}'") - except Exception as e: - app.logger.error(f"Influx write failed: {e}") - return jsonify(error=f"Influx write failed: {e}"), 500 - - return jsonify(status="ok", written=len(points)), 201 - - -if __name__ == "__main__": - app.run(host="0.0.0.0", port=PORT) diff --git a/car-to-influx/sender.py b/car-to-influx/sender.py deleted file mode 100644 index 356076d..0000000 --- a/car-to-influx/sender.py +++ /dev/null @@ -1,75 +0,0 @@ -import re -import time -import requests -import json - -# Configuration -FILE = "testing_data/CanTraceJuly11.txt" -URL = "http://3.98.181.12:8085/can" -BATCH_SIZE = 450 # frames per POST -RATE = 1.0 # batches per second - -def parse_raw_frame(line): - """ - Parse a raw CAN-line into a dict with fields: - index, can_id, dlc, data_bytes (list of ints), timestamp, direction - """ - pattern = r'\s*(\d+)\s+(\w+)(?:\s+X)?\s+(\d+)\s+([0-9\s]+)\s+(\d+\.\d+)\s+([RX])' - m = re.match(pattern, line) - if not m: - return None - idx, cid, dlc, data_bytes, ts, dir_ = m.groups() - return { - "index": int(idx), - "can_id": cid, - "dlc": int(dlc), - "data_bytes": [int(b) for b in data_bytes.split() if b], - "timestamp": float(ts), - "direction": dir_ - } - -def load_lines(filepath): - with open(filepath, 'r') as f: - return [l for l in f.read().splitlines() if l.strip()] - -def chunkify(lst, size): - for i in range(0, len(lst), size): - yield lst[i:i+size] - -def send_batches(lines, url, batch_size, delay): - session = requests.Session() - for batch in chunkify(lines, batch_size): - frames = [] - for l in batch: - parsed = parse_raw_frame(l) - if not parsed: - continue - # Map to server's expected keys - frames.append({ - "id": parsed["can_id"], - "data": parsed["data_bytes"], - "timestamp": parsed["timestamp"] - }) - if not frames: - continue - - try: - resp = session.post(url, json=frames, timeout=5) - resp.raise_for_status() - print(f"Sent {len(frames)} frames → {resp.status_code}") - except requests.HTTPError as e: - print(f"Error sending batch: {e} → status {resp.status_code}") - print("Server response:", resp.text) - except Exception as e: - print(f"Unexpected error: {e}") - - # print server response - print("Server response:", resp.text) - - time.sleep(delay) - -if __name__ == "__main__": - lines = load_lines(FILE) - print(f"Loaded {len(lines)} raw lines from {FILE}") - delay = 1.0 / RATE - send_batches(lines, URL, BATCH_SIZE, delay) diff --git a/car-to-influx/testing_data/raw data b/car-to-influx/testing_data/raw data deleted file mode 100644 index 94d29a5..0000000 --- a/car-to-influx/testing_data/raw data +++ /dev/null @@ -1,24 +0,0 @@ -Arbitration ID (Hex) Data (Bytes) Description ---------------------------------------------------------------------------------------------- -0x173 [0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0] M173_Modulation_And_Flux_Info -0x172 [0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF] M172_Torque_And_Timer_Info -0x194 [0x00, 0x01, 0x7F, 0xFF, 0x00, 0x00, 0x00, 0x00] M194_Read_Write_Param_Response -0x193 [0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0xFF] M193_Read_Write_Param_Command -0x192 [0x01, 0x00, 0x03, 0xE8, 0x00, 0xFA, 0x55, 0xAA] M192_Command_Message -0x171 [0xFF, 0xEE, 0xDD, 0xCC, 0xBB, 0xAA, 0x99, 0x88] M171_Fault_Codes -0x170 [0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0] M170_Internal_States -0x169 [0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x80] M169_Internal_Voltages -0x168 [0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11] M168_Flux_ID_IQ_Info -0x167 [0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99] M167_Voltage_Info -0x166 [0x01, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77] M166_Current_Info -0x165 [0xDE, 0xAD, 0xBE, 0xEF, 0xFE, 0xED, 0xBA, 0xBE] M165_Motor_Position_Info -0x164 [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08] M164_Digital_Input_Status -0x163 [0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0] M163_Analog_Input_Voltages -0x162 [0xFE, 0xED, 0xBA, 0xBE, 0xDE, 0xAD, 0xBE, 0xEF] M162_Temperature_Set_3 -0x161 [0x01, 0xFF, 0x02, 0xFF, 0x03, 0xFF, 0x04, 0xFF] M161_Temperature_Set_2 -0x160 [0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22] M160_Temperature_Set_1 -0x174 [0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x80] M174_Firmware_Info -0x175 [0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0] M175_Diag_Data_Message -0x514 [0x64, 0x00, 0xC8, 0x00, 0x32, 0x00, 0xFA, 0x00] BMS_Current_Limit -0x176 [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08] M176_Fast_Info -0x177 [0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10] M177_Torque_Capability diff --git a/car-to-influx/testing_data/small_CanTrace.txt b/car-to-influx/testing_data/small_CanTrace.txt deleted file mode 100644 index 73ca8da..0000000 --- a/car-to-influx/testing_data/small_CanTrace.txt +++ /dev/null @@ -1,6 +0,0 @@ - 0 514 8 120 0 6 0 0 0 0 0 19.439960 R - 0 1712 8 0 163 14 218 1 0 0 68 19.440220 R - 0 176 8 0 0 253 255 0 0 0 0 19.441670 R - 0 192 8 0 0 0 0 1 0 0 0 19.442860 R - 0 176 8 0 0 255 255 0 0 0 0 19.444740 R - 0 2048 X 8 0 0 0 0 1 0 0 0 19.444990 R \ No newline at end of file diff --git a/dev-utils/ci/slicks-sensor-check.py b/dev-utils/ci/slicks-sensor-check.py new file mode 100644 index 0000000..5225dba --- /dev/null +++ b/dev-utils/ci/slicks-sensor-check.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +""" +CI check: use slicks to discover sensors for June 2025. + +Verifies the sandbox's InfluxDB connection and that the slicks +package can reach the database and return a non-empty sensor list. + +Requires env vars: INFLUX_URL, INFLUX_TOKEN, INFLUX_DB +(set via GitHub secrets). +""" + +import sys +from datetime import datetime + +import slicks + + +def main() -> None: + sensors = slicks.discover_sensors( + start_time=datetime(2025, 6, 1), + end_time=datetime(2025, 7, 1), + ) + + if not sensors: + print("FAIL: discover_sensors returned an empty list for June 2025.") + sys.exit(1) + + print(f"OK: Found {len(sensors)} sensors for June 2025:") + for name in sensors: + print(f" - {name}") + + +if __name__ == "__main__": + main() diff --git a/dev-utils/ci/stack-smoke-test.sh b/dev-utils/ci/stack-smoke-test.sh new file mode 100755 index 0000000..09cd130 --- /dev/null +++ b/dev-utils/ci/stack-smoke-test.sh @@ -0,0 +1,146 @@ +#!/usr/bin/env bash +set -euo pipefail +set -o errtrace + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +INSTALLER_DIR="$REPO_ROOT/installer" +ENV_TEMPLATE="$INSTALLER_DIR/.env.example" +ENV_FILE="$INSTALLER_DIR/.env" + +if [[ ! -d "$INSTALLER_DIR" ]]; then + echo "Installer directory not found at $INSTALLER_DIR" >&2 + exit 1 +fi + +if [[ ! -f "$ENV_TEMPLATE" ]]; then + echo "Missing environment template at $ENV_TEMPLATE" >&2 + exit 1 +fi + +if [[ "${CI:-}" == "true" ]]; then + cp "$ENV_TEMPLATE" "$ENV_FILE" +elif [[ ! -f "$ENV_FILE" ]]; then + cp "$ENV_TEMPLATE" "$ENV_FILE" +fi + +pushd "$INSTALLER_DIR" >/dev/null + +export COMPOSE_PROJECT_NAME="${COMPOSE_PROJECT_NAME:-daqstackci}" + +compose() { + docker compose "$@" +} + +cleanup() { + local exit_code=$1 + trap - EXIT + set +e + + compose ps >/dev/null 2>&1 || true + if [[ $exit_code -ne 0 ]]; then + compose logs --tail 200 >/dev/null 2>&1 || true + fi + + if [[ "${KEEP_DAQ_STACK:-0}" != "1" ]]; then + compose down -v --remove-orphans >/dev/null 2>&1 || true + fi + + popd >/dev/null 2>&1 || true + + # Final return (not exit!) + # This avoids Bash trap exit cross-talk + exit "$exit_code" +} +trap 'cleanup $?' EXIT + +ENABLED_SERVICES=( + influxdb3 + influxdb3-explorer + grafana + data-downloader-api + data-downloader-scanner + data-downloader-frontend + lap-detector + startup-data-loader + file-uploader +) + +compose up --detach --build --remove-orphans "${ENABLED_SERVICES[@]}" + +inspect_container() { + local name="$1" + local container_id + container_id=$(docker ps -a --filter "name=${name}" --format '{{.ID}}' | head -n 1) + + if [[ -z "$container_id" ]]; then + echo "" + return + fi + + local status + local exit_code + status=$(docker inspect -f '{{.State.Status}}' "$container_id" 2>/dev/null || echo "") + exit_code=$(docker inspect -f '{{.State.ExitCode}}' "$container_id" 2>/dev/null || echo "0") + + echo "$container_id $status $exit_code" +} + +ready_timeout_seconds=$((SECONDS + 600)) + +while (( SECONDS < ready_timeout_seconds )); do + not_ready=() + ready_summary=() + + for service in "${ENABLED_SERVICES[@]}"; do + container_info="$(inspect_container "$service")" + + if [[ -z "$container_info" ]]; then + not_ready+=("$service(no-container-yet)") + continue + fi + + container_id=$(echo "$container_info" | awk '{print $1}') + status=$(echo "$container_info" | awk '{print $2}') + code=$(echo "$container_info" | awk '{print $3}') + + if [[ "$service" == "startup-data-loader" ]]; then + if [[ "$status" == "exited" && "$code" -eq 0 ]]; then + ready_summary+=("$service=exited(0)") + else + not_ready+=("$service=$status/$code") + fi + continue + fi + + if [[ "$status" != "running" ]]; then + not_ready+=("$service=$status") + continue + fi + + has_health="$(docker inspect -f '{{if .State.Health}}true{{else}}false{{end}}' "$container_id")" + health_status="$(docker inspect -f '{{if .State.Health}}{{.State.Health.Status}}{{else}}none{{end}}' "$container_id")" + + if [[ "$has_health" == "true" && "$health_status" != "healthy" ]]; then + not_ready+=("$service=health:$health_status") + continue + fi + + if [[ "$has_health" == "true" ]]; then + ready_summary+=("$service=running/$health_status") + else + ready_summary+=("$service=running") + fi + done + + if [[ ${#not_ready[@]} -eq 0 ]]; then + echo "All services ready: ${ready_summary[*]}" + exit 0 + fi + + echo "Waiting for services: ${not_ready[*]}" + sleep 10 +done + +echo "Timed out waiting for services to become ready." >&2 +exit 1 \ No newline at end of file diff --git a/dev-utils/data-generator.py b/dev-utils/data-generator.py new file mode 100644 index 0000000..6f332cd --- /dev/null +++ b/dev-utils/data-generator.py @@ -0,0 +1,309 @@ +import os +import csv +import math +import random +from datetime import datetime, timedelta + +OUTPUT_DIR = "./generated-days" +DAYS = 5 +SESSIONS_PER_DAY = 3 +SESSION_LENGTH_MIN = 15 # minutes (increased for better data duration) +FREQ_HZ = 50 # Base frequency +START_DATE = datetime(2025, 1, 1) +PROTOCOL = "CAN" + +# CSV layout expected by startup-data-loader/load_data.py +CSV_HEADER = ["relative_ms", "protocol", "can_id"] + [f"byte{i}" for i in range(8)] + +# CAN IDs from installer/example.dbc +ID_VCU_STATUS = 192 +ID_PEDAL_SENSORS = 193 +ID_STEERING_WHEEL = 194 +ID_BMS_STATUS = 512 +ID_BMS_CELL_STATS = 513 +ID_MC_COMMAND = 256 +ID_MC_FEEDBACK = 257 +ID_WHEEL_SPEEDS = 768 +ID_IMU_DATA = 1024 +ID_COOLING_STATUS = 1280 + +os.makedirs(OUTPUT_DIR, exist_ok=True) + +def clamp(v, lo, hi): + return max(lo, min(hi, v)) + +def encode_unsigned(value, scale, offset, bits): + raw = int((value - offset) / scale) + return raw & ((1 << bits) - 1) + +def encode_signed(value, scale, offset, bits): + raw = int((value - offset) / scale) + if raw < 0: + raw = (1 << bits) + raw + return raw & ((1 << bits) - 1) + +def to_le_bytes(value, length): + """Return little-endian byte list of the provided raw value.""" + return list(value.to_bytes(length, byteorder="little")) + +class VehicleSimulation: + def __init__(self): + self.time = 0.0 + self.speed = 0.0 # m/s + self.soc = 95.0 # % + self.battery_temp = 30.0 # C + self.motor_temp = 40.0 # C + self.coolant_temp = 35.0 # C + self.odometer = 0.0 + self.lap_timer = 0.0 + + # Internal state for smooth random walks + self.steer_target = 0.0 + self.throttle_target = 0.0 + self.brake_target = 0.0 + + def step(self, dt): + self.time += dt + self.lap_timer += dt + + # Generate driver inputs (random walk) + if random.random() < 0.05: + self.steer_target = random.gauss(0, 60) # degrees + + # Accelerate/Brake cycle logic + cycle_time = self.time % 40 # 40 second cycle + if cycle_time < 15: # Accelerate + self.throttle_target = 80 + random.gauss(0, 10) + self.brake_target = 0 + elif cycle_time < 20: # Coast + self.throttle_target = 0 + self.brake_target = 0 + elif cycle_time < 30: # Brake + self.throttle_target = 0 + self.brake_target = 60 + random.gauss(0, 10) + else: # Low speed / Turn + self.throttle_target = 20 + self.brake_target = 0 + + # Smooth inputs + self.throttle_curr = self.throttle_target # Simplified + self.brake_curr = self.brake_target # Simplified + self.steer_curr = self.steer_target # Simplified + + # Physics (Very basic) + accel = 0.0 + if self.throttle_curr > 5: + accel = (self.throttle_curr / 100.0) * 10.0 # Max 10 m/s^2 + if self.brake_curr > 5: + accel -= (self.brake_curr / 100.0) * 15.0 # Max brake + + # Drag + drag = 0.01 * self.speed * self.speed + accel -= drag + + self.speed += accel * dt + self.speed = max(0, self.speed) + + # Energy + power = self.speed * accel * 200 # mass 200kg approx + constants + current = power / 400.0 # 400V nominal + + # Heat + self.battery_temp += abs(current) * 0.0001 * dt - (self.battery_temp - 25) * 0.001 * dt + self.motor_temp += abs(power) * 0.00005 * dt - (self.motor_temp - 30) * 0.002 * dt + self.coolant_temp = (self.motor_temp + self.battery_temp) / 2.0 - 5.0 + + self.soc -= abs(current) * 0.00005 * dt + self.current = current + self.accel_lat = (self.speed ** 2) * math.sin(math.radians(self.steer_curr)) * 0.1 # Fake cornering + + def get_vcu_status(self): + state = 4 if self.speed > 0.1 else 1 # Drive vs Ready + safety = 1 + inv_en = 1 + + data = [0] * 8 + data[0] = (state & 0x0F) | ((safety & 1) << 4) | ((inv_en & 1) << 5) + return [ID_VCU_STATUS] + data + + def get_pedal_sensors(self): + apps = clamp(self.throttle_curr, 0, 100) + brake_f = clamp(self.brake_curr * 1.5, 0, 200) # bar + brake_r = clamp(self.brake_curr * 1.0, 0, 200) # bar + + apps_raw = encode_unsigned(apps, 0.1, 0, 16) + bf_raw = encode_unsigned(brake_f, 0.1, 0, 16) + br_raw = encode_unsigned(brake_r, 0.1, 0, 16) + + data = ( + to_le_bytes(apps_raw, 2) + + to_le_bytes(apps_raw, 2) + # APPS2 same as 1 + to_le_bytes(bf_raw, 2) + + to_le_bytes(br_raw, 2) + ) + return [ID_PEDAL_SENSORS] + data + + def get_steering(self): + angle = clamp(self.steer_curr, -180, 180) + angle_raw = encode_signed(angle, 0.1, 0, 16) + + drs = 1 if self.speed > 20 and self.throttle_curr > 90 else 0 + launch = 0 + + data = to_le_bytes(angle_raw, 2) + [drs, launch, 0, 0, 0, 0] + return [ID_STEERING_WHEEL] + data + + def get_bms_status(self): + volt = 400.0 + (self.soc - 50) * 0.5 - (self.current * 0.05) + curr = self.current + + v_raw = encode_unsigned(volt, 0.1, 0, 16) + i_raw = encode_signed(curr, 0.1, 0, 16) + soc_raw = encode_unsigned(self.soc, 0.5, 0, 8) + + data = to_le_bytes(v_raw, 2) + to_le_bytes(i_raw, 2) + [soc_raw, 0, 0, 0] + return [ID_BMS_STATUS] + data + + def get_bms_cells(self): + avg_cell = (400.0 + (self.soc - 50) * 0.5) / 100.0 # 100s assumed + min_cell = avg_cell - 0.02 + max_cell = avg_cell + 0.02 + + min_raw = encode_unsigned(min_cell, 0.001, 0, 16) + max_raw = encode_unsigned(max_cell, 0.001, 0, 16) + avg_raw = encode_unsigned(avg_cell, 0.001, 0, 16) + temp_raw = encode_unsigned(self.battery_temp, 1, -40, 8) + + data = to_le_bytes(max_raw, 2) + to_le_bytes(min_raw, 2) + to_le_bytes(avg_raw, 2) + [temp_raw, 0] + return [ID_BMS_CELL_STATS] + data + + def get_mc_command(self): + torque_req = self.throttle_curr * 2.0 # approx 200Nm max + if self.brake_curr > 0: + torque_req = -self.brake_curr # Regen + + trq_raw = encode_signed(torque_req, 0.1, 0, 16) + spd_raw = encode_unsigned(6000, 1, 0, 16) # Limit + + data = to_le_bytes(trq_raw, 2) + to_le_bytes(spd_raw, 2) + [0, 0, 0, 0] + return [ID_MC_COMMAND] + data + + def get_mc_feedback(self): + rpm = self.speed * 60.0 * 3.0 # approx gear ratio / wheel size factor + torque = self.throttle_curr * 2.0 + + rpm_raw = encode_signed(rpm, 1, 0, 16) + trq_raw = encode_signed(torque, 0.1, 0, 16) + dc_i_raw = encode_signed(self.current, 0.1, 0, 16) + temp_raw = encode_unsigned(self.motor_temp, 1, -40, 8) + + data = to_le_bytes(rpm_raw, 2) + to_le_bytes(trq_raw, 2) + to_le_bytes(dc_i_raw, 2) + [temp_raw, 0] + return [ID_MC_FEEDBACK] + data + + def get_wheel_speeds(self): + rpm = self.speed * 60.0 / (0.4 * 3.14159) # 0.4m dia tire approx + + # Add slight slip/noise + fl = rpm * (1.0 + random.gauss(0, 0.01)) + fr = rpm * (1.0 + random.gauss(0, 0.01)) + rl = rpm * (1.0 + random.gauss(0, 0.02)) + rr = rpm * (1.0 + random.gauss(0, 0.02)) + + data = ( + to_le_bytes(encode_unsigned(fl, 1, 0, 16), 2) + + to_le_bytes(encode_unsigned(fr, 1, 0, 16), 2) + + to_le_bytes(encode_unsigned(rl, 1, 0, 16), 2) + + to_le_bytes(encode_unsigned(rr, 1, 0, 16), 2) + ) + return [ID_WHEEL_SPEEDS] + data + + def get_imu(self): + ax = (self.throttle_curr - self.brake_curr) / 100.0 * 1.5 # g approx + ay = self.accel_lat / 9.81 + az = 1.0 + yaw = self.speed * math.tan(math.radians(self.steer_curr)) / 1.53 # wheelbase + + data = ( + to_le_bytes(encode_signed(ax, 0.001, 0, 16), 2) + + to_le_bytes(encode_signed(ay, 0.001, 0, 16), 2) + + to_le_bytes(encode_signed(az, 0.001, 0, 16), 2) + + to_le_bytes(encode_signed(yaw, 0.1, 0, 16), 2) + ) + return [ID_IMU_DATA] + data + + def get_cooling(self): + pump = 100 if self.motor_temp > 50 else 50 + fan = 100 if self.coolant_temp > 60 else 0 + + data = [ + encode_unsigned(self.coolant_temp - 5, 1, -40, 8), # In + encode_unsigned(self.coolant_temp, 1, -40, 8), # Out + encode_unsigned(pump, 1, 0, 8), + encode_unsigned(fan, 1, 0, 8), + 0, 0, 0, 0 + ] + return [ID_COOLING_STATUS] + data + + +def generate_session_csv(session_start, output_dir): + session_name = session_start.strftime("%Y-%m-%d-%H-%M-%S") + fname = os.path.join(output_dir, f"{session_name}.csv") + + duration_ms = SESSION_LENGTH_MIN * 60 * 1000 + dt = 1.0 / FREQ_HZ + interval_ms = int(1000 / FREQ_HZ) + + sim = VehicleSimulation() + + with open(fname, "w", newline="") as f: + writer = csv.writer(f) + writer.writerow(CSV_HEADER) + + rel_ms = 0 + while rel_ms <= duration_ms: + sim.step(dt) + + # Interleave messages. + # For simplicity, we write all messages at the same timestamp block, + # or we could round-robin. + # Writing all provides dense data which is good for demos. + + msgs = [ + sim.get_vcu_status(), + sim.get_pedal_sensors(), + sim.get_steering(), + sim.get_bms_status(), + sim.get_bms_cells(), + sim.get_mc_command(), + sim.get_mc_feedback(), + sim.get_wheel_speeds(), + sim.get_imu(), + sim.get_cooling() + ] + + for msg_data in msgs: + # msg_data is [ID, b0, b1...] + # row: [rel_ms, protocol, id, b0...b7] + row = [rel_ms, PROTOCOL, msg_data[0]] + msg_data[1:] + writer.writerow(row) + + rel_ms += interval_ms + + print(f"Generated: {fname}") + + +def main(): + print(f"Generating data for {DAYS} days...") + for day in range(DAYS): + day_date = START_DATE + timedelta(days=day) + + for session in range(SESSIONS_PER_DAY): + minutes_offset = session * (SESSION_LENGTH_MIN + 30) + random.randint(5, 15) + session_start = day_date + timedelta(minutes=minutes_offset) + generate_session_csv(session_start, OUTPUT_DIR) + + print("Done!") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/docs/containers/file-uploader.md b/docs/containers/file-uploader.md new file mode 100644 index 0000000..7432e8c --- /dev/null +++ b/docs/containers/file-uploader.md @@ -0,0 +1,30 @@ +# File uploader + +The file uploader is a Flask application that streams CAN CSV logs into InfluxDB 3. It exposes a simple web UI for selecting the destination bucket and monitoring progress. + +## Ports + +- Host port **8084** maps to the Flask development server. + +## Environment variables + +| Variable | Description | Default | +| --- | --- | --- | +| `INFLUXDB_URL` | API endpoint for bucket discovery and writes. | `http://influxdb3:8181` | +| `INFLUXDB_TOKEN` | Token with write access to the target bucket. | `dev-influxdb-admin-token` | +| `FILE_UPLOADER_WEBHOOK_URL` | Optional webhook invoked when uploads finish. | empty | +| `SLACK_WEBHOOK_URL` | Fallback webhook if the dedicated uploader value is unset. | empty | + +## Features + +- Validates uploaded files (CSV format only). +- Streams rows asynchronously with backpressure to protect the database. +- Decodes frames using `example.dbc`, located alongside the app. +- Posts completion notifications to the configured webhook. + +## Usage + +1. Visit http://localhost:8084. +2. Choose a target bucket from the drop-down (populated from the InfluxDB API). +3. Upload one or more CSV files exported from the vehicle logger. +4. Monitor progress via the live event stream; notifications are sent upon completion if a webhook is configured. diff --git a/docs/containers/grafana.md b/docs/containers/grafana.md new file mode 100644 index 0000000..8049d4c --- /dev/null +++ b/docs/containers/grafana.md @@ -0,0 +1,28 @@ +# Grafana + +Grafana provides dashboards for visualising the telemetry stored in InfluxDB 3. + +## Ports + +- Host port **8087** maps to Grafana’s internal port **3000**. + +## Configuration + +| Variable | Description | Default | +| --- | --- | --- | +| `GRAFANA_ADMIN_PASSWORD` | Password for the `admin` Grafana user. | `dev-grafana-password` | +| `INFLUXDB_TOKEN` | Injected automatically from `.env` via provisioning. | `dev-influxdb-admin-token` | + +Provisioning files live under `installer/grafana/provisioning/`. They configure the InfluxDB datasource and automatically import dashboards from `installer/grafana/dashboards/`. + +## First login + +1. Visit http://localhost:8087. +2. Sign in with username `admin` and the password defined in `.env`. +3. Explore the “Vehicle Overview” dashboard to confirm the sample data loaded correctly. + +## Customisation + +- Drop additional JSON dashboards into `installer/grafana/dashboards/`. +- Update `installer/grafana/provisioning/datasources/influxdb.yml` to point at different buckets or organisations. +- Install additional plugins by editing `GF_INSTALL_PLUGINS` in `docker-compose.yml`. diff --git a/docs/containers/influxdb2-legacy-build.md b/docs/containers/influxdb2-legacy-build.md new file mode 100644 index 0000000..756a6ac --- /dev/null +++ b/docs/containers/influxdb2-legacy-build.md @@ -0,0 +1,20 @@ +# Legacy InfluxDB 2 bootstrap + +> **Heads up:** The project now uses InfluxDB 3 exclusively. This document is kept for historical context only. + +If you need to spin up an old InfluxDB 2 instance (for example to migrate historical data) you can use the following command as a starting point: + +```bash +docker run -d --name influxdb2 \ + -p 8086:8086 \ + -v ~/influxdb/data:/var/lib/influxdb2 \ + -v ~/influxdb/config:/etc/influxdb2 \ + -e DOCKER_INFLUXDB_INIT_MODE=setup \ + -e DOCKER_INFLUXDB_INIT_USERNAME=admin \ + -e DOCKER_INFLUXDB_INIT_PASSWORD=YOUR_INFLUXDB_PASSWORD \ + -e DOCKER_INFLUXDB_INIT_ORG=WFR \ + -e DOCKER_INFLUXDB_INIT_BUCKET=ourCar \ + influxdb:2 +``` + +For current deployments use the Docker Compose stack under `installer/`, which provisions InfluxDB 3 along with Grafana and the rest of the telemetry tooling. \ No newline at end of file diff --git a/docs/containers/influxdb3-explorer.md b/docs/containers/influxdb3-explorer.md new file mode 100644 index 0000000..b6a40e7 --- /dev/null +++ b/docs/containers/influxdb3-explorer.md @@ -0,0 +1,26 @@ +# InfluxDB 3 Explorer + +The explorer container packages InfluxData’s lightweight UI for browsing InfluxDB 3 clusters. It is optional but useful for inspecting data without installing additional tools. + +## Ports + +- Host port **8888** maps to port **80** inside the container. + +## Configuration + +| Variable | Description | Default | +| --- | --- | --- | +| `EXPLORER_SESSION_SECRET` | Flask session secret used by the UI. | `dev-explorer-session-key` | +| `INFLUXDB_ADMIN_TOKEN` | Token used to authenticate with InfluxDB 3. | `dev-influxdb-admin-token` | + +The token and default connection details are provided via the mounted `installer/influxdb3-explorer-config/config.json` file. + +## Data persistence + +Explorer preferences (saved queries, profiles) are stored in the `influxdb3-explorer-db` Docker volume. + +## Usage tips + +1. Visit http://localhost:8888 after the stack is running. +2. The UI auto-populates the API URL, token, and default database using the mounted config file. +3. Use the query builder to run SQL or InfluxQL queries against the sample bucket `WFR25`. diff --git a/docs/containers/influxdb3.md b/docs/containers/influxdb3.md new file mode 100644 index 0000000..a9fac23 --- /dev/null +++ b/docs/containers/influxdb3.md @@ -0,0 +1,34 @@ +# InfluxDB 3 + +The `influxdb3` service hosts the team’s time-series database. It boots with a development token and user that can be overridden through `.env`. + +## Ports + +- Exposes port **9000** on the host, mapped to **8181** inside the container. +- TCP health check ensures the service is reachable before dependants start. + +## Configuration + +| Variable | Description | Default | +| --- | --- | --- | +| `INFLUXDB_URL` | Internal service URL consumed by other containers. | `http://influxdb3:8181` | +| `INFLUXDB_INIT_USERNAME` | Admin username created on first boot. | `admin` | +| `INFLUXDB_INIT_PASSWORD` | Admin password. | `dev-influxdb-password` | +| `INFLUXDB_ADMIN_TOKEN` | API token shared across the stack. | `dev-influxdb-admin-token` | + +The token is also stored in `installer/influxdb3-admin-token.json` so that the server can import it during initialisation. Regenerate both the environment variable and JSON file if you rotate credentials. + +## Data persistence + +Data is stored in the `influxdb3-data` Docker volume. Removing the volume (`docker compose down -v`) resets the database. + +## Logs & troubleshooting + +- View logs with `docker compose logs -f influxdb3`. +- Inspect the server shell with `docker compose exec influxdb3 /bin/sh`. +- Health endpoint: `curl http://localhost:9000/health`. + +## Related services + +- **Startup data loader** seeds the bucket with the example dataset on first run. +- **Grafana**, **file-uploader**, and **slackbot** authenticate using `INFLUXDB_ADMIN_TOKEN`. \ No newline at end of file diff --git a/docs/containers/lap-detector.md b/docs/containers/lap-detector.md new file mode 100644 index 0000000..0b21cf2 --- /dev/null +++ b/docs/containers/lap-detector.md @@ -0,0 +1,17 @@ +# Lap Detector + +`lap-detector` is a Dash web application used for lap time analysis and visualisation. + +## Ports + +- Host port **8050** maps to the Dash server running inside the container. + +## Configuration + +The service mounts the entire `installer/lap-detector/` directory into the container. Update files in that folder to change the UI, then restart the container. + +## Development tips + +- Edit Python files locally; the volume mount reloads code on container restart. +- Inspect logs with `docker compose logs -f lap-detector` if the UI fails to start. +- Add new Python dependencies to `installer/lap-detector/requirements.txt` and rebuild the image (`docker compose build lap-detector`). diff --git a/docs/containers/slackbot.md b/docs/containers/slackbot.md new file mode 100644 index 0000000..28933ee --- /dev/null +++ b/docs/containers/slackbot.md @@ -0,0 +1,20 @@ +# Slackbot + +The Slack bot listens in Socket Mode and delivers notifications about data imports, telemetry status, and manual commands. + +## Requirements + +- `SLACK_BOT_TOKEN` and `SLACK_APP_TOKEN` must be set in `.env`. +- Ensure Socket Mode is enabled in your Slack app configuration. + +## Behavior + +- Sends webhook notifications when file uploads complete. +- Provides command handlers defined in `installer/slackbot/slack_bot.py`. +- Reads the optional `SLACK_DEFAULT_CHANNEL` to determine where to post updates. + +## Development tips + +- Run `docker compose logs -f slackbot` to see Socket Mode connection status. +- Use `docker compose exec slackbot python slack_bot.py` for interactive debugging. +- Leave Slack credentials blank to skip starting the service in development. diff --git a/docs/containers/startup-data-loader.md b/docs/containers/startup-data-loader.md new file mode 100644 index 0000000..cdf045e --- /dev/null +++ b/docs/containers/startup-data-loader.md @@ -0,0 +1,28 @@ +# Startup data loader + +The startup data loader seeds InfluxDB 3 with a small, deterministic dataset on first boot. It can also backfill additional files if you mount them into the container. + +## Responsibilities + +- Loads CSV files from `/data` (mounted from `installer/startup-data-loader/data/`; copy `2024-01-01-00-00-00.csv.md` to a `.csv` file for the bundled sample). +- Uses `example.dbc` to decode CAN frames into human-readable metrics. +- Writes decoded metrics directly to InfluxDB. + +## Environment variables + +| Variable | Description | Default | +| --- | --- | --- | +| `INFLUXDB_TOKEN` | Token used for direct writes. | `dev-influxdb-admin-token` | +| `INFLUXDB_URL` | Target InfluxDB endpoint. | `http://influxdb3:8181` | + +## Extending the dataset + +1. Drop additional CSV files (following the `YYYY-MM-DD-HH-MM-SS.csv` naming convention) into `installer/startup-data-loader/data/`. +2. Replace `example.dbc` with your real CAN database. +3. Rebuild the image (`docker compose build startup-data-loader`) and restart the service. + +## Troubleshooting + +- Logs are available via `docker compose logs -f startup-data-loader`. +- Progress is tracked in `/app/load_data_progress.json` inside the container. +- The importer supports resuming partially processed files; remove the progress file to force a clean run. \ No newline at end of file diff --git a/docs/data-downloader-demo.md b/docs/data-downloader-demo.md new file mode 100644 index 0000000..b44a919 --- /dev/null +++ b/docs/data-downloader-demo.md @@ -0,0 +1,7 @@ +## Demo +The main Docker Compose script seeds the database with your .csv data, parsed using the .dbc file, and loads it into InfluxDB 3. The data-downloader component provides a web-based query builder to help you quickly locate and download the data you need. +notes +visual-query + + +data-download diff --git a/docs/docker-compose.md b/docs/docker-compose.md new file mode 100644 index 0000000..9a7772c --- /dev/null +++ b/docs/docker-compose.md @@ -0,0 +1,68 @@ +# Docker Compose Reference + +The `installer/docker-compose.yml` file orchestrates the complete DAQ telemetry stack. This document explains how the services fit together, which volumes are persisted, and how to customise the deployment. + +## High-level architecture + +```text +┌────────────┐ ┌────────────┐ +│ Startup │ │ InfluxDB 3 │ +│ data loader├───────────────────────────────▶│ + Explorer │ +└────────────┘ └────────────┘ + │ │ + │ ▼ + │ ┌─────────────────────┐ + │ │ Grafana dashboards │ + ▼ └─────────────────────┘ +┌────────────┐ │ +│ File │ ▼ +│ uploader ├──────────────────────────────────▶│ Slack bot & +└────────────┘ │ notifications +``` + +All containers join the `datalink` bridge network, enabling them to communicate using Docker hostnames (for example `http://influxdb3:8181`). + +## Volumes + +| Volume | Mounted by | Purpose | +| --- | --- | --- | +| `influxdb3-data` | `influxdb3` | Persists InfluxDB 3 metadata and stored telemetry. | +| `influxdb3-explorer-db` | `influxdb3-explorer` | Keeps explorer UI preferences. | +| `grafana-storage` | `grafana` | Stores dashboards, plugins, and Grafana state. | + +Remove volumes with `docker compose down -v` if you need a clean slate. + +## Environment file + +Docker Compose automatically reads `.env` files located next to `docker-compose.yml`. See [`installer/.env.example`](../installer/.env.example) for the full list of variables. Key values include `INFLUXDB_URL`, `INFLUXDB_ADMIN_TOKEN`, and the optional Slack credentials. + +## Conditional services + +The Slack bot relies on valid `SLACK_APP_TOKEN` and `SLACK_BOT_TOKEN` values. Leave them empty (the default) to run the stack without Slack connectivity. All other services start unconditionally. + +## Health checks + +- `influxdb3` exposes a TCP healthcheck on port 8181 to ensure the database is reachable before dependants start. +- `startup-data-loader` waits an additional 5 seconds (`sleep 5`) to give InfluxDB 3 time to finish booting before loading the sample data. + +## Customisation tips + +- Override exposed ports in `docker-compose.override.yml` if default host ports conflict with local services. +- Drop in custom dashboards under `installer/grafana/dashboards/`—Grafana auto-imports JSON files at startup. +- Swap the example dataset in `installer/startup-data-loader/data/` for real telemetry and update `example.dbc` to match your CAN specification. + +## Useful commands + +```bash +# Preview the full resolved configuration +cd installer +docker compose config + +# Tail logs for a specific service +docker compose logs -f startup-data-loader + +# Execute a shell inside the InfluxDB 3 container +docker compose exec influxdb3 /bin/sh +``` + +For detailed service documentation, browse the files under [`docs/containers/`](containers/). \ No newline at end of file diff --git a/influxdb_initialbuild.md b/influxdb_initialbuild.md deleted file mode 100644 index 4a7abea..0000000 --- a/influxdb_initialbuild.md +++ /dev/null @@ -1 +0,0 @@ -```sudo docker run -d --name influxwfr -p 8086:8086 -v ~/influxdb/data:/var/lib/influxdb2 -v ~/influxdb/config:/etc/influxdb2 -e DOCKER_INFLUXDB_INIT_MODE=setup -e DOCKER_INFLUXDB_INIT_USERNAME=admin -e DOCKER_INFLUXDB_INIT_PASSWORD=YOUR_INFLUXDB_PASSWORD -e DOCKER_INFLUXDB_INIT_ORG=WFR -e DOCKER_INFLUXDB_INIT_BUCKET=ourCar influxdb:2``` diff --git a/installer/.env.example b/installer/.env.example new file mode 100644 index 0000000..8c1c2b0 --- /dev/null +++ b/installer/.env.example @@ -0,0 +1,93 @@ +# ------------------------------------------------------------ +# DBC file path +# ------------------------------------------------------------ +DBC_FILE_PATH=example.dbc + +# ------------------------------------------------------------ +# InfluxDB credentials +# ------------------------------------------------------------ +INFLUXDB_URL=http://influxdb3:8181 +INFLUXDB_INIT_USERNAME=admin +INFLUXDB_INIT_PASSWORD=password +INFLUXDB_ADMIN_TOKEN=apiv3_dev-influxdb-admin-token + +# ------------------------------------------------------------ +# Grafana credentials +# ------------------------------------------------------------ +GRAFANA_ADMIN_PASSWORD=password + +# ------------------------------------------------------------ +# Grafana Cloudflare Zero Trust SSO +# ------------------------------------------------------------ +# Cloudflare Access injects Cf-Access-Authenticated-User-Email on every authenticated request. +# Grafana's auth proxy reads it to auto-create/login users with the Editor role. +# No extra config needed here — values are hardcoded in docker-compose.yml because +# they are not secret. Ensure grafana.westernformularacing.org is protected by a +# Cloudflare Access Application in the Zero Trust dashboard. + +# ------------------------------------------------------------ +# Explorer UI +# ------------------------------------------------------------ +EXPLORER_SESSION_SECRET=dev-explorer-session-key + +# ------------------------------------------------------------ +# Slack configuration +# ------------------------------------------------------------ +ENABLE_SLACK=false +SLACK_DEFAULT_CHANNEL=C0123456789 + +# Your real Slack tokens +SLACK_BOT_TOKEN=xoxb-TOKEN +SLACK_APP_TOKEN=xapp-TOKEN +SLACK_WEBHOOK_URL=https://hooks.slack.com/services/TOKEN + +# File uploader webhook (fallback to Slack webhook) +FILE_UPLOADER_WEBHOOK_URL=${SLACK_WEBHOOK_URL} + +# Optional debugging +DEBUG=0 + +# ------------------------------------------------------------ +# Data Downloader configuration +# ------------------------------------------------------------ +INFLUX_HOST=http://influxdb3:8181 +INFLUX_TOKEN=apiv3_dev-influxdb-admin-token +INFLUX_DATABASE=WFR25 +INFLUX_SCHEMA=iox +INFLUX_TABLE=WFR25 + +DATA_DIR=/app/data + +SCANNER_YEAR=2025 +SCANNER_BIN=hour +SCANNER_INCLUDE_COUNTS=true +SCANNER_INITIAL_CHUNK_DAYS=31 + +SENSOR_WINDOW_DAYS=7 +# How far back to look for sensor data when scanning +SENSOR_LOOKBACK_DAYS=365 +# If no sensor data is found in the lookback period, use this fallback range +SENSOR_FALLBACK_START=2025-06-10T00:00:00 +SENSOR_FALLBACK_END=2025-07-10T00:00:00 + +SCAN_INTERVAL_SECONDS=3600 + +VITE_API_BASE_URL=http://localhost:8000 +ALLOWED_ORIGINS=http://localhost:3000,http://localhost:5173 + +# End Data Downloader configuration + +# ------------------------------------------------------------ +# AI Code Generation (Sandbox) configuration +# ------------------------------------------------------------ +# Cohere API key for AI-powered code generation +COHERE_API_KEY=your-cohere-api-key-here + +# Cohere model to use (default: command-r-plus) +COHERE_MODEL=command-r-plus + +# Maximum number of retries when generated code fails (default: 2) +MAX_RETRIES=2 + +# InfluxDB database name for telemetry queries (default: telemetry) +INFLUXDB_DATABASE=telemetry diff --git a/installer/DONT-FORGET-ENV.md b/installer/DONT-FORGET-ENV.md new file mode 100644 index 0000000..788a81d --- /dev/null +++ b/installer/DONT-FORGET-ENV.md @@ -0,0 +1,4 @@ +Did you rename .env.example to .env? + +Do ```cp .env.example .env``` before you run ```docker compose up```! + diff --git a/installer/README.md b/installer/README.md new file mode 100644 index 0000000..c06eab1 --- /dev/null +++ b/installer/README.md @@ -0,0 +1,128 @@ +# DAQ Installer + +This directory contains the Docker Compose deployment used to run the full telemetry pipeline for the Western Formula Racing data acquisition (DAQ) system. It is safe to publish publicly—sensitive credentials are injected at runtime from a local `.env` file and the sample datasets are intentionally anonymised. + +## Contents + +- `docker-compose.yml` – Orchestrates all runtime containers. +- `.env.example` – Template for environment variables required by the stack. +- `influxdb3-admin-token.json` – Development token consumed by the InfluxDB 3 server on first start. +- `influxdb3-explorer-config/` – Configuration for the optional InfluxDB web explorer container. +- Service folders (for example `file-uploader/`, `startup-data-loader/`, `slackbot/`) – Each contains the Docker context and service-specific source code. + +## Prerequisites + +- Docker Desktop 4.0+ or Docker Engine 24+ +- Docker Compose V2 (bundled with recent Docker releases) + +## Quick start + +1. Copy the environment template and adjust the values for your environment: + ```bash + cd installer + cp .env.example .env + # Update tokens/passwords before deploying to production + ``` +2. Launch the stack: + ```bash + docker compose up -d + ``` +3. Verify the services: + ```bash + docker compose ps + docker compose logs influxdb3 | tail + ``` +4. Tear the stack down when you are finished: + ```bash + docker compose down -v + ``` + +The first boot seeds InfluxDB 3 with the sample CAN data in `startup-data-loader/data/`. Subsequent restarts skip the import unless you remove the volumes. + +## Environment variables + +All secrets and tokens are defined in `.env`. The defaults provided in `.env.example` are development-safe placeholders and **must** be replaced for production deployments. + +| Variable | Purpose | Default | +| --- | --- | --- | +| `DBC_FILE_PATH` | Path to the CAN DBC file used by startup-data-loader and file-uploader and other services | `example.dbc` | +| `INFLUXDB_URL` | Internal URL used by services to talk to InfluxDB 3 | `http://influxdb3:8181` | +| `INFLUXDB_INIT_USERNAME` / `INFLUXDB_INIT_PASSWORD` | Bootstraps the initial admin user | `admin` / `dev-influxdb-password` | +| `INFLUXDB_ADMIN_TOKEN` | API token shared by all services | `dev-influxdb-admin-token` | +| `GRAFANA_ADMIN_PASSWORD` | Grafana administrator password | `dev-grafana-password` | +| `EXPLORER_SESSION_SECRET` | Secret for the InfluxDB 3 Explorer UI | `dev-explorer-session-key` | +| `ENABLE_SLACK` | Gate to disable Slack-specific services | `false` | +| `SLACK_BOT_TOKEN` / `SLACK_APP_TOKEN` | Credentials for the Slack bot (optional) | empty | +| `SLACK_WEBHOOK_URL` | Incoming webhook for notifications (optional) | empty | +| `SLACK_DEFAULT_CHANNEL` | Default Slack channel ID for outbound messages | `C0123456789` | +| `FILE_UPLOADER_WEBHOOK_URL` | Webhook invoked after uploads complete | inherits `SLACK_WEBHOOK_URL` | +| `COHERE_API_KEY` | Cohere API key for AI-powered code generation | empty | +| `COHERE_MODEL` | Cohere model to use | `command-a-03-2025` | +| `MAX_RETRIES` | Maximum retries for failed code execution | `2` | +| `INFLUXDB_DATABASE` | Database name for telemetry queries | `telemetry` | +| `DEBUG` | Enables verbose logging for selected services | `0` | + +> **Security reminder:** Replace every default value when deploying outside of a local development environment. Generate secure tokens with `python3 -c "import secrets; print(secrets.token_urlsafe(32))"`. + +## Service catalogue + +| Service | Ports | Description | +| --- | --- | --- | +| `influxdb3` | `9000` (mapped to `8181` internally) | Core time-series database. Initialised with the admin token from `.env`. | +| `influxdb3-explorer` | `8888` | Lightweight UI for browsing data in InfluxDB 3. | +| `data-downloader` | `3000` | Periodically downloads CAN CSV archives from the DAQ server. Visual SQL query builder included. | +| `grafana` | `8087` | Visualises telemetry with pre-provisioned dashboards. | +| `slackbot` | n/a | Socket-mode Slack bot for notifications and automation (optional). Integrates with code-generator for AI queries. | +| `lap-detector` | `8050` | Dash-based lap analysis web application. | +| `startup-data-loader` | n/a | Seeds InfluxDB with sample CAN frames on first boot. | +| `file-uploader` | `8084` | Web UI for uploading CAN CSV archives and streaming them into InfluxDB. | +| `sandbox` | n/a | Custom Python execution environment with internet access for running AI-generated code and InfluxDB queries. | +| `code-generator` | `3030` (internal) | AI-powered code generation service using Cohere. Generates Python code from natural language. | + +## Data and DBC files + +- `startup-data-loader/data/` ships with `2025-01-01-00-00-00.csv`, a csv file to exercise the import pipeline without exposing production telemetry. +- Both the loader and the uploader share `example.dbc`, a minimal CAN database that defines two demo messages. Replace this file with your team’s CAN definition when working with real data. + +## Observability + +- Grafana dashboards are provisioned automatically from `grafana/dashboards/` and use the datasource in `grafana/provisioning/datasources/`. + +## Troubleshooting tips + +- **Service fails to connect to InfluxDB** – Confirm the token in `.env` matches `influxdb3-admin-token.json`. Regenerate the volumes with `docker compose down -v` if you rotate credentials. +- **Re-import sample data** – Run `docker compose down -v` and restart the stack to re-trigger the data loader. +- **Slack services are optional** – Leave Slack variables empty or set `ENABLE_SLACK=false` to skip starting the bot during development. +- **AI code generation not working** – Ensure `COHERE_API_KEY` is set in `.env`. Check logs with `docker compose logs code-generator`. +- **Sandbox execution fails** – Verify sandbox container is running with `docker ps | grep sandbox`. Check logs with `docker compose logs sandbox`. + +## AI-Powered Code Generation + +The stack includes an AI-powered code generation service that allows natural language queries via Slack: + +**Usage:** +``` +!agent plot battery voltage over the last hour +!agent show me motor temperature correlation with RPM +!agent analyze inverter efficiency +``` + +**Features:** +- Automatic code generation from natural language using Cohere AI +- Self-correcting retry mechanism (up to 2 retries on failure) +- Secure sandboxed execution environment +- Auto-generation of plots and visualizations +- Direct InfluxDB access for telemetry queries + +**Setup:** +1. Add `COHERE_API_KEY` to your `.env` file +2. Optional: Configure `COHERE_MODEL` and `MAX_RETRIES` +3. Services start automatically with the stack + +See `sandbox/README.md` for detailed documentation. + +## Next steps + +- Replace the example dataset and `example.dbc` file with production equivalents once you are ready to ingest real telemetry. +- Update the Grafana dashboards under `grafana/dashboards/` to match your data model. +- Review each service’s README in its respective directory for implementation details. \ No newline at end of file diff --git a/installer/config.yml b/installer/config.yml new file mode 100644 index 0000000..164a003 --- /dev/null +++ b/installer/config.yml @@ -0,0 +1,33 @@ +tunnel: fcc90054-958b-4086-b105-b0417898d206 +credentials-file: C:\Users\haoru\.cloudflared\fcc90054-958b-4086-b105-b0417898d206.json + +ingress: + - hostname: explore.0001200.xyz + service: http://127.0.0.1:8888 + originRequest: + noTLSVerify: true + headers: + Host: localhost + + - hostname: influxdb3.0001200.xyz + service: http://127.0.0.1:9000 + originRequest: + noTLSVerify: true + headers: + Host: localhost + + - hostname: influxdb3-data.0001200.xyz + service: http://127.0.0.1:8181 + originRequest: + noTLSVerify: true + headers: + Host: localhost + + - hostname: grafana.0001200.xyz + service: http://127.0.0.1:8087 + originRequest: + noTLSVerify: true + headers: + Host: localhost + + - service: http_status:404 \ No newline at end of file diff --git a/installer/data-downloader/.env.example b/installer/data-downloader/.env.example new file mode 100644 index 0000000..5345fa3 --- /dev/null +++ b/installer/data-downloader/.env.example @@ -0,0 +1,15 @@ +INFLUX_HOST=http://influxdb3:8181 +INFLUX_TOKEN=apiv3_dev-influxdb-admin-token +INFLUX_DATABASE=WFR25 +INFLUX_SCHEMA=iox +INFLUX_TABLE=WFR25 +DATA_DIR=/app/data +SCANNER_YEAR=2025 +SCANNER_BIN=hour +SCANNER_INCLUDE_COUNTS=true +SCANNER_INITIAL_CHUNK_DAYS=31 +SENSOR_WINDOW_DAYS=7 +SENSOR_LOOKBACK_DAYS=30 +SCAN_INTERVAL_SECONDS=3600 +VITE_API_BASE_URL=http://localhost:8000 +ALLOWED_ORIGINS=http://localhost:3000,http://localhost:5173 diff --git a/installer/data-downloader/README.md b/installer/data-downloader/README.md new file mode 100644 index 0000000..ca336b3 --- /dev/null +++ b/installer/data-downloader/README.md @@ -0,0 +1,80 @@ +# Data Downloader Webapp + +This project packages the DAQ data-downloader experience into a small stack: + +- **React frontend** (`frontend/`) for browsing historic runs, triggering scans, and annotating runs. +- **FastAPI backend** (`backend/`) that reads/writes JSON state, exposes REST endpoints, and can launch scans on demand. +- **Scanner worker** (separate Docker service) that periodically runs the InfluxDB availability scan plus the unique sensor collector and exports the results to `data/runs.json` and `data/sensors.json`. + +Both JSON files are shared through the `./data` directory so every service (frontend, API, scanner) sees the latest state. Notes added in the UI are stored in the same JSON payload next to the run entry. + +## Getting started + +1. Duplicate the sample env file and fill in the InfluxDB credentials: + ```bash + cp .env.example .env + ``` +2. Build + launch everything: + ```bash + docker compose up --build + ``` +3. Open http://localhost:3000 to access the web UI, and keep the API running on http://localhost:8000 if you want to call it directly. + +## Runtime behaviour +```mermaid +sequenceDiagram + participant Worker as periodic_worker.py + participant Service as DataDownloaderService + participant Scanner as server_scanner.py + participant Slicks as slicks library + participant InfluxDB as InfluxDB3 + participant Storage as JSON Storage + + Worker->>Service: run_full_scan(source="periodic") + Service->>Service: Sort seasons by year (newest first) + + loop For each season (WFR25, WFR26) + Service->>Scanner: scan_runs(ScannerConfig{
database: season.database,
year: season.year}) + Scanner->>Slicks: connect_influxdb3(url, token, db) + Scanner->>Slicks: scan_data_availability(start, end, table, bin_size) + + loop Adaptive scanning (inside slicks) + Slicks->>InfluxDB: Try query_grouped_bins()
(DATE_BIN + COUNT(*)) + alt Success + InfluxDB-->>Slicks: Return bins with counts + else Failure (timeout/size) + Slicks->>Slicks: Binary subdivision + Slicks->>InfluxDB: query_exists_per_bin()
(SELECT 1 LIMIT 1 per bin) + InfluxDB-->>Slicks: Return existence flags + end + end + + Slicks-->>Scanner: ScanResult (windows) + Scanner-->>Service: List[dict] (formatted runs) + + Service->>Service: fetch_unique_sensors(season.database) + Service->>Storage: runs_repos[season.name].merge_scanned_runs(runs) + Storage-->>Storage: Atomic write to runs_WFR25.json + Service->>Storage: sensors_repos[season.name].write_sensors(sensors) + Storage-->>Storage: Atomic write to sensors_WFR25.json + + alt Season scan failed + Service->>Service: Log error, continue to next season + end + end + + Service->>Storage: status_repo.mark_finish(success) + Storage-->>Storage: Update scanner_status.json +``` + +- `frontend` serves the compiled React bundle via nginx and now proxies `/api` requests (including `/api/scan` and `/api/scanner-status`) directly to the FastAPI container. When the UI is loaded from anything other than `localhost`, the client automatically falls back to relative `/api/...` calls so a single origin on a VPS still reaches the backend. Override `VITE_API_BASE_URL` if you want the UI to talk to a different host (for example when running `npm run dev` locally) and keep that host in `ALLOWED_ORIGINS`. +- `api` runs `uvicorn backend.app:app`, exposing + - `GET /api/runs` and `GET /api/sensors` + - `POST /api/runs/{key}/note` to persist notes per run + - `POST /api/scan` to fire an on-demand scan that refreshes both JSON files in the background + - `POST /api/data/query` to request a timeseries slice for a given `signalName` between two timestamps; the response echoes the exact SQL (matching `sql.py`) so the frontend can display the query being executed. +- `scanner` reuses the same backend image but runs `python -m backend.periodic_worker` so the scan + unique sensor collection happens at the interval defined by `SCAN_INTERVAL_SECONDS`. + +Set `INFLUX_SCHEMA`/`INFLUX_TABLE` to the same values used in the legacy scripts (e.g. `iox` + `WFR25`) so the SQL sent from `backend/server_scanner.py` and `backend/sql.py` matches the proven queries. + +All services mount `./data` inside the container and the FastAPI layer manages file I/O with atomic writes to keep data consistent between the worker and UI actions. If the rolling lookback produces no sensors, the collector now falls back to the oldest/newest run windows discovered by the date scanner, so no manual date tuning is required. diff --git a/installer/data-downloader/backend/Dockerfile b/installer/data-downloader/backend/Dockerfile new file mode 100644 index 0000000..7aa3cbf --- /dev/null +++ b/installer/data-downloader/backend/Dockerfile @@ -0,0 +1,15 @@ +FROM python:3.11-slim AS base + +ENV PYTHONUNBUFFERED=1 \ + PYTHONDONTWRITEBYTECODE=1 + +WORKDIR /app + +COPY backend/requirements.txt /tmp/requirements.txt +RUN pip install --no-cache-dir -r /tmp/requirements.txt + +COPY backend /app/backend + +EXPOSE 8000 + +CMD ["uvicorn", "backend.app:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/installer/data-downloader/backend/__init__.py b/installer/data-downloader/backend/__init__.py new file mode 100644 index 0000000..048223d --- /dev/null +++ b/installer/data-downloader/backend/__init__.py @@ -0,0 +1 @@ +# Makes backend a package. diff --git a/installer/data-downloader/backend/app.py b/installer/data-downloader/backend/app.py new file mode 100644 index 0000000..b4b4dea --- /dev/null +++ b/installer/data-downloader/backend/app.py @@ -0,0 +1,151 @@ +from __future__ import annotations + +from datetime import datetime + +from fastapi import BackgroundTasks, FastAPI, HTTPException +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import HTMLResponse +from pydantic import BaseModel + +from backend.config import get_settings +from backend.services import DataDownloaderService + + +class NotePayload(BaseModel): + note: str + + +class DataQueryPayload(BaseModel): + signal: str + start: datetime + end: datetime + limit: int | None = 2000 + no_limit: bool = False + + +settings = get_settings() +service = DataDownloaderService(settings) + +app = FastAPI(title="DAQ Data Downloader API") +app.add_middleware( + CORSMiddleware, + allow_origins=settings.allowed_origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + + +@app.get("/api/health") +def healthcheck() -> dict: + return {"status": "ok"} + + +@app.get("/api/seasons") +def list_seasons() -> List[dict]: + return service.get_seasons() + + +@app.get("/api/runs") +def list_runs(season: str | None = None) -> dict: + return service.get_runs(season=season) + + +@app.get("/api/sensors") +def list_sensors(season: str | None = None) -> dict: + return service.get_sensors(season=season) + + +@app.get("/api/scanner-status") +def scanner_status() -> dict: + return service.get_scanner_status() + + +@app.post("/api/runs/{key}/note") +def save_note(key: str, payload: NotePayload, season: str | None = None) -> dict: + run = service.update_note(key, payload.note.strip(), season=season) + if not run: + raise HTTPException(status_code=404, detail=f"Run {key} not found (season={season})") + return run + + +@app.post("/api/scan") +def trigger_scan(background_tasks: BackgroundTasks) -> dict: + background_tasks.add_task(service.run_full_scan, "manual") + return {"status": "scheduled"} + + +@app.post("/api/query") +def query_signal(payload: DataQueryPayload, season: str | None = None) -> dict: + limit = None if payload.no_limit else (payload.limit or 2000) + return service.query_signal_series( + payload.signal, + payload.start, + payload.end, + limit, + season=season + ) + + +@app.get("/", response_class=HTMLResponse) +def index(): + """Simple status page for debugging.""" + influx_status = "Unknown" + influx_color = "gray" + try: + service._log_influx_connectivity() + influx_status = "Connected" + influx_color = "green" + except Exception as e: + influx_status = f"Error: {e}" + influx_color = "red" + + # Default to first season for overview + runs = service.get_runs() + sensors = service.get_sensors() + scanner_status = service.get_scanner_status() + seasons_list = service.get_seasons() + seasons_html = ", ".join([f"{s['name']} ({s['year']})" for s in seasons_list]) + + html = f""" + + + + DAQ Data Downloader Status + + + +

DAQ Data Downloader Status

+ +
+

System Status

+

InfluxDB Connection: {influx_status}

+

Scanner Status: {scanner_status.get('status', 'Unknown')} (Last run: {scanner_status.get('last_run', 'Never')})

+

API Version: 1.1.0 (Multi-Season Support)

+
+ +
+

Active Config

+

Seasons Configured: {seasons_html}

+
+ +
+

Default Season Stats ({seasons_list[0]['name'] if seasons_list else 'None'})

+
    +
  • Runs Found: {len(runs.get('runs', []))}
  • +
  • Sensors Found: {len(sensors.get('sensors', []))}
  • +
+
+ +

API Docs | JSON Seasons List | Frontend

+ + + """ + return HTMLResponse(content=html) diff --git a/installer/data-downloader/backend/config.py b/installer/data-downloader/backend/config.py new file mode 100644 index 0000000..653f70e --- /dev/null +++ b/installer/data-downloader/backend/config.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +from functools import lru_cache +import os +from typing import List +from pydantic import BaseModel, Field + + +def _parse_origins(raw: str | None) -> List[str]: + if not raw or raw.strip() == "*": + return ["*"] + return [origin.strip() for origin in raw.split(",") if origin.strip()] + + +class SeasonConfig(BaseModel): + name: str # e.g. "WFR25" + year: int # e.g. 2025 + database: str # e.g. "WFR25" + color: str | None = None # e.g. "222 76 153" + + +def _parse_seasons(raw: str | None) -> List[SeasonConfig]: + """Parse SEASONS env var: "WFR25:2025:222 76 153,WFR26:2026:...".""" + if not raw: + # Default fallback if not set + return [SeasonConfig(name="WFR25", year=2025, database="WFR25", color="#DE4C99")] + + seasons = [] + for part in raw.split(","): + part = part.strip() + if not part: + continue + try: + # Split into at most 3 parts: Name, Year, Color + parts = part.split(":", 2) + name = parts[0] + + if len(parts) >= 2: + year = int(parts[1]) + else: + # Malformed or simple format not supported purely by regex? + # Actually if just "WFR25", split gives ['WFR25'] + # require at least year + continue + + color = parts[2] if len(parts) > 2 else None + + # Assume DB name matches Season Name + seasons.append(SeasonConfig(name=name, year=year, database=name, color=color)) + except ValueError: + continue + + if not seasons: + return [SeasonConfig(name="WFR25", year=2025, database="WFR25")] + + # Sort by year descending (newest first) + seasons.sort(key=lambda s: s.year, reverse=True) + return seasons + + +class Settings(BaseModel): + """Centralised configuration pulled from environment variables.""" + + data_dir: str = Field(default_factory=lambda: os.getenv("DATA_DIR", "./data")) + + influx_host: str = Field(default_factory=lambda: os.getenv("INFLUX_HOST", "http://localhost:9000")) + influx_token: str = Field(default_factory=lambda: os.getenv("INFLUX_TOKEN", "")) + + # Global/Default Influx settings (used for connectivity check or default fallback) + influx_schema: str = Field(default_factory=lambda: os.getenv("INFLUX_SCHEMA", "iox")) + influx_table: str = Field(default_factory=lambda: os.getenv("INFLUX_TABLE", "WFR25")) + + seasons: List[SeasonConfig] = Field(default_factory=lambda: _parse_seasons(os.getenv("SEASONS"))) + + # Scanner settings common to all seasons (unless we want per-season granularity later) + scanner_bin: str = Field(default_factory=lambda: os.getenv("SCANNER_BIN", "hour")) + scanner_include_counts: bool = Field(default_factory=lambda: os.getenv("SCANNER_INCLUDE_COUNTS", "true").lower() == "true") + scanner_initial_chunk_days: int = Field(default_factory=lambda: int(os.getenv("SCANNER_INITIAL_CHUNK_DAYS", "31"))) + + sensor_window_days: int = Field(default_factory=lambda: int(os.getenv("SENSOR_WINDOW_DAYS", "7"))) + sensor_lookback_days: int = Field(default_factory=lambda: int(os.getenv("SENSOR_LOOKBACK_DAYS", "30"))) + + periodic_interval_seconds: int = Field(default_factory=lambda: int(os.getenv("SCAN_INTERVAL_SECONDS", "3600"))) + scan_daily_time: str | None = Field(default_factory=lambda: os.getenv("SCAN_DAILY_TIME")) + + allowed_origins: List[str] = Field(default_factory=lambda: _parse_origins(os.getenv("ALLOWED_ORIGINS", "*"))) + + +@lru_cache(maxsize=1) +def get_settings() -> Settings: + """Cache settings so the same instance is reused across the app.""" + return Settings() diff --git a/installer/data-downloader/backend/influx_queries.py b/installer/data-downloader/backend/influx_queries.py new file mode 100644 index 0000000..95a4369 --- /dev/null +++ b/installer/data-downloader/backend/influx_queries.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +from datetime import datetime, timezone + +from influxdb_client_3 import InfluxDBClient3 + +from backend.config import Settings +from backend.table_utils import quote_literal, quote_table + + +def _normalize(dt: datetime) -> datetime: + if dt.tzinfo is None: + dt = dt.replace(tzinfo=timezone.utc) + return dt.astimezone(timezone.utc) + + +def fetch_signal_series( + settings: Settings, + signal: str, + start: datetime, + end: datetime, + limit: int | None, + database: str | None = None +) -> dict: + start_dt = _normalize(start) + end_dt = _normalize(end) + if start_dt >= end_dt: + raise ValueError("start must be before end") + limit_clause = "" + if limit is not None: + limit = max(10, min(limit, 20000)) + limit_clause = f" LIMIT {limit}" + + table_ref = quote_table(f"{settings.influx_schema}.{settings.influx_table}") + signal_literal = quote_literal(signal) + + sql = f""" + SELECT time, "sensorReading" + FROM {table_ref} + WHERE "signalName" = {signal_literal} + AND time >= TIMESTAMP '{start_dt.isoformat()}' + AND time <= TIMESTAMP '{end_dt.isoformat()}' + ORDER BY time{limit_clause} + """ + + # Use provided database or fallback to default setting + target_db = database if database else settings.influx_database + + with InfluxDBClient3(host=settings.influx_host, token=settings.influx_token, database=target_db) as client: + tbl = client.query(sql) + points = [] + for idx in range(tbl.num_rows): + ts_scalar = tbl.column("time")[idx] + value_scalar = tbl.column("sensorReading")[idx] + ts = _timestamp_scalar_to_datetime(ts_scalar) + value = value_scalar.as_py() + points.append( + { + "time": ts.isoformat(), + "value": float(value), + } + ) + + return { + "signal": signal, + "start": start_dt.isoformat(), + "end": end_dt.isoformat(), + "limit": limit, + "database": target_db, + "row_count": len(points), + "points": points, + "sql": " ".join(line.strip() for line in sql.strip().splitlines()), + } + + +def _timestamp_scalar_to_datetime(scalar) -> datetime: + """Convert PyArrow TimestampScalar to timezone-aware datetime.""" + try: + ts = scalar.as_py() + if ts.tzinfo is None: + ts = ts.replace(tzinfo=timezone.utc) + else: + ts = ts.astimezone(timezone.utc) + return ts + except ValueError: + # Fallback for nanosecond precision timestamps that can't fit in datetime micros + ts_ns = getattr(scalar, "value", None) + if ts_ns is None: + raise + ts = datetime.fromtimestamp(ts_ns / 1_000_000_000, tz=timezone.utc) + return ts diff --git a/installer/data-downloader/backend/periodic_worker.py b/installer/data-downloader/backend/periodic_worker.py new file mode 100644 index 0000000..b2e18a5 --- /dev/null +++ b/installer/data-downloader/backend/periodic_worker.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +import asyncio +import logging +from datetime import datetime, timedelta + +from backend.config import get_settings +from backend.services import DataDownloaderService + +logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") + + +async def run_worker(): + settings = get_settings() + service = DataDownloaderService(settings) + + interval = max(30, settings.periodic_interval_seconds) + daily_time = settings.scan_daily_time + + if daily_time: + logging.info(f"Starting periodic scanner loop (daily at {daily_time})") + else: + logging.info(f"Starting periodic scanner loop (interval={interval}s)") + + while True: + try: + logging.info("Running scheduled scan...") + service.run_full_scan(source="periodic") + logging.info("Finished scheduled scan.") + + if daily_time: + # Calculate seconds until next occurrence of daily_time + now = datetime.now() + target_hour, target_minute = map(int, daily_time.split(":")) + target = now.replace(hour=target_hour, minute=target_minute, second=0, microsecond=0) + + if target <= now: + # If target time has passed today, schedule for tomorrow + target += timedelta(days=1) + + sleep_seconds = (target - now).total_seconds() + logging.info(f"Next scan scheduled for {target} (in {sleep_seconds:.0f}s)") + await asyncio.sleep(sleep_seconds) + else: + await asyncio.sleep(interval) + + except Exception: + logging.exception("Scheduled scan failed. Retrying in 60s...") + await asyncio.sleep(60) + + +if __name__ == "__main__": + asyncio.run(run_worker()) diff --git a/installer/data-downloader/backend/requirements.txt b/installer/data-downloader/backend/requirements.txt new file mode 100644 index 0000000..6f2deab --- /dev/null +++ b/installer/data-downloader/backend/requirements.txt @@ -0,0 +1,5 @@ +fastapi==0.115.4 +uvicorn[standard]==0.23.2 +influxdb3-python==0.16.0 +pydantic==2.9.2 +slicks>=0.1.5 diff --git a/installer/data-downloader/backend/server_scanner.py b/installer/data-downloader/backend/server_scanner.py new file mode 100644 index 0000000..4326027 --- /dev/null +++ b/installer/data-downloader/backend/server_scanner.py @@ -0,0 +1,122 @@ +"""Thin wrapper that delegates run scanning to the *slicks* package. + +The public API (``ScannerConfig`` + ``scan_runs``) is unchanged so the +rest of the backend continues to work without modification. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from datetime import datetime, timezone +from hashlib import md5 +from typing import List + +from zoneinfo import ZoneInfo + +import slicks +from slicks.scanner import scan_data_availability + +UTC = timezone.utc + + +@dataclass(frozen=True) +class ScannerConfig: + host: str + token: str + database: str + table: str + year: int = 2025 + bin_size: str = "hour" # hour or day + include_counts: bool = True + initial_chunk_days: int = 31 + timezone_name: str = "America/Toronto" + + @property + def tz(self) -> ZoneInfo: + return ZoneInfo(self.timezone_name) + + @property + def start(self) -> datetime: + # Season starts in August of the previous year + return datetime(self.year - 1, 8, 1, tzinfo=UTC) + + @property + def end(self) -> datetime: + # Season ends at the end of the configured year (Jan 1 of year + 1) + return datetime(self.year + 1, 1, 1, tzinfo=UTC) + + +def _build_key(start_dt_utc: datetime, end_dt_utc: datetime) -> str: + raw = f"{start_dt_utc.isoformat()}_{end_dt_utc.isoformat()}" + return md5(raw.encode()).hexdigest()[:10] + + +def scan_runs(config: ScannerConfig) -> List[dict]: + """Run the adaptive scan via *slicks* and return formatted windows.""" + + # Configure slicks to point at the same InfluxDB instance + # config.table comes in as "schema.table" from services.py + schema, table_name = "iox", config.table + if "." in config.table: + parts = config.table.split(".", 1) + schema, table_name = parts[0], parts[1] + + slicks.connect_influxdb3( + url=config.host, + token=config.token, + db=config.database, + schema=schema, + table=table_name, + ) + + # Determine the table string slicks expects ("schema.table") + # We pass None to use the global configured table we just set above + # Or we can just pass table_name if scan_data_availability expects a name? + # scan_data_availability expects "schema.table" or defaults to config. + # Let's rely on the global config we just set. + table = None + + result = scan_data_availability( + start=config.start, + end=config.end, + timezone=config.timezone_name, + table=table, + bin_size=config.bin_size, + include_counts=config.include_counts, + show_progress=False, + ) + + # Convert ScanResult → List[dict] matching the old format + formatted: List[dict] = [] + for _day, windows in result: + for w in windows: + entry = { + "key": _build_key(w.start_utc, w.end_utc), + "start_utc": w.start_utc.isoformat(), + "end_utc": w.end_utc.isoformat(), + "start_local": w.start_local.isoformat(), + "end_local": w.end_local.isoformat(), + "timezone": config.timezone_name, + "bins": w.bins, + } + if config.include_counts: + entry["row_count"] = w.row_count + formatted.append(entry) + + return formatted + + +if __name__ == "__main__": # pragma: no cover + import json + import os + + schema = os.getenv("INFLUX_SCHEMA", "iox") + table = os.getenv("INFLUX_TABLE", "WFR25") + + cfg = ScannerConfig( + host=os.getenv("INFLUX_HOST", "http://localhost:9000"), + token=os.getenv("INFLUX_TOKEN", ""), + database=os.getenv("INFLUX_DATABASE", "WFR25"), + table=f"{schema}.{table}", + ) + print(json.dumps(scan_runs(cfg), indent=2)) diff --git a/installer/data-downloader/backend/services.py b/installer/data-downloader/backend/services.py new file mode 100644 index 0000000..ab485cc --- /dev/null +++ b/installer/data-downloader/backend/services.py @@ -0,0 +1,212 @@ +from __future__ import annotations + +from datetime import datetime, timezone +import logging +from pathlib import Path +from typing import Dict, List, Optional + +from influxdb_client_3 import InfluxDBClient3 + +from backend.config import Settings +from backend.storage import RunsRepository, SensorsRepository, ScannerStatusRepository +from backend.influx_queries import fetch_signal_series +from backend.server_scanner import ScannerConfig, scan_runs +from backend.sql import SensorQueryConfig, fetch_unique_sensors + + +logger = logging.getLogger(__name__) + + +def _parse_iso(value: str | None) -> Optional[datetime]: + if not value: + return None + text = value.strip() + if not text: + return None + if text.endswith("Z"): + text = text[:-1] + "+00:00" + dt = datetime.fromisoformat(text) + if dt.tzinfo is None: + dt = dt.replace(tzinfo=timezone.utc) + return dt.astimezone(timezone.utc) + + +class DataDownloaderService: + def __init__(self, settings: Settings): + self.settings = settings + data_dir = Path(settings.data_dir).resolve() + data_dir.mkdir(parents=True, exist_ok=True) + + # Repositories keyed by season name (e.g. "WFR25") + self.runs_repos: Dict[str, RunsRepository] = {} + self.sensors_repos: Dict[str, SensorsRepository] = {} + + for season in settings.seasons: + # Suffix file with season name: runs_WFR25.json + self.runs_repos[season.name] = RunsRepository(data_dir, suffix=season.name) + self.sensors_repos[season.name] = SensorsRepository(data_dir, suffix=season.name) + + self.status_repo = ScannerStatusRepository(data_dir) + self._log_influx_connectivity() + + def get_runs(self, season: str | None = None) -> dict: + target_season = season or self._default_season() + repo = self.runs_repos.get(target_season) + if not repo: + return {"runs": [], "error": f"Season {target_season} not found"} + return repo.list_runs() + + def get_sensors(self, season: str | None = None) -> dict: + target_season = season or self._default_season() + repo = self.sensors_repos.get(target_season) + if not repo: + return {"sensors": [], "error": f"Season {target_season} not found"} + return repo.list_sensors() + + def update_note(self, key: str, note: str, season: str | None = None) -> dict | None: + target_season = season or self._default_season() + repo = self.runs_repos.get(target_season) + if not repo: + return None + return repo.update_note(key, note) + + def get_scanner_status(self) -> dict: + return self.status_repo.get_status() + + def get_seasons(self) -> List[dict]: + """Return list of available seasons.""" + return [ + {"name": s.name, "year": s.year, "database": s.database, "color": s.color} + for s in self.settings.seasons + ] + + def run_full_scan(self, source: str = "manual") -> Dict[str, dict]: + self.status_repo.mark_start(source) + results = {} + errors = [] + + try: + # Sort seasons by year descending to ensure most recent is scanned first + sorted_seasons = sorted(self.settings.seasons, key=lambda s: s.year, reverse=True) + for season in sorted_seasons: + try: + logger.info(f"Scanning season {season.name} (DB: {season.database})...") + + runs = scan_runs( + ScannerConfig( + host=self.settings.influx_host, + token=self.settings.influx_token, + database=season.database, + table=f"{self.settings.influx_schema}.{self.settings.influx_table}", + year=season.year, + bin_size=self.settings.scanner_bin, + include_counts=self.settings.scanner_include_counts, + initial_chunk_days=self.settings.scanner_initial_chunk_days, + ) + ) + + repo_runs = self.runs_repos[season.name] + runs_payload = repo_runs.merge_scanned_runs(runs) + + fallback_start, fallback_end = self._build_sensor_fallback_range(runs) + + sensors = fetch_unique_sensors( + SensorQueryConfig( + host=self.settings.influx_host, + token=self.settings.influx_token, + database=season.database, + schema=self.settings.influx_schema, + table=self.settings.influx_table, + window_days=self.settings.sensor_window_days, + lookback_days=self.settings.sensor_lookback_days, + fallback_start=fallback_start, + fallback_end=fallback_end, + ) + ) + repo_sensors = self.sensors_repos[season.name] + sensors_payload = repo_sensors.write_sensors(sensors) + + results[season.name] = { + "runs": len(runs_payload.get("runs", [])), + "sensors": len(sensors_payload.get("sensors", [])) + } + + except Exception as e: + logger.exception(f"Failed to scan season {season.name}") + errors.append(f"{season.name}: {str(e)}") + # Continue scanning other seasons even if one fails + + if errors: + self.status_repo.mark_finish(success=False, error="; ".join(errors)) + else: + self.status_repo.mark_finish(success=True) + + return results + + except Exception as exc: + self.status_repo.mark_finish(success=False, error=str(exc)) + raise + + def query_signal_series(self, signal: str, start: datetime, end: datetime, limit: Optional[int], season: str | None = None) -> dict: + target_season_name = season or self._default_season() + season_cfg = next((s for s in self.settings.seasons if s.name == target_season_name), None) + + if not season_cfg: + raise ValueError(f"Season {target_season_name} not configured") + + # Temporarily override settings with season database for the query + # This is a bit hacky but avoids refactoring fetch_signal_series signature deeper + # Ideally fetch_signal_series should take db name argument + + # Actually fetch_signal_series takes 'settings' object. + # We can construct a proxy or just rely on the existing signature if we modify it. + # But modify backend/influx_queries.py is safer. + # For now, let's assume fetch_signal_series uses settings.influx_database. + # We need to pass the correct DB. + + return fetch_signal_series(self.settings, signal, start, end, limit, database=season_cfg.database) + + def _default_season(self) -> str: + # Default to the first (newest) season if available + if self.settings.seasons: + return self.settings.seasons[0].name + return "WFR25" + + def _log_influx_connectivity(self) -> None: + # Check connectivity for the default season + season = self.settings.seasons[0] if self.settings.seasons else None + if not season: + return + + host = self.settings.influx_host + database = season.database + try: + logger.info("Checking InfluxDB connectivity (%s -> %s)", host, database) + with InfluxDBClient3(host=host, token=self.settings.influx_token, database=database) as client: + getattr(client, "ping", lambda: client.query("SELECT 1"))() + logger.info("InfluxDB connectivity OK") + except Exception: + logger.exception("InfluxDB connectivity check failed") + + @staticmethod + def _build_sensor_fallback_range(runs: List[dict]) -> tuple[Optional[datetime], Optional[datetime]]: + """Use the longest run discovered by the scanner for sensor fallback.""" + longest_run: Optional[dict] = None + longest_duration: Optional[float] = None + + for run in runs: + start_dt = _parse_iso(run.get("start_utc")) + end_dt = _parse_iso(run.get("end_utc")) + if start_dt is None or end_dt is None: + continue + duration = (end_dt - start_dt).total_seconds() + if longest_duration is None or duration > longest_duration: + longest_duration = duration + longest_run = run + + if longest_run is None: + return None, None + + fallback_start = _parse_iso(longest_run.get("start_utc")) + fallback_end = _parse_iso(longest_run.get("end_utc")) + return fallback_start, fallback_end diff --git a/installer/data-downloader/backend/sql.py b/installer/data-downloader/backend/sql.py new file mode 100644 index 0000000..d529e5f --- /dev/null +++ b/installer/data-downloader/backend/sql.py @@ -0,0 +1,80 @@ +"""Thin wrapper that delegates sensor discovery to the *slicks* package. + +The public API (``SensorQueryConfig`` + ``fetch_unique_sensors``) is +unchanged so ``services.py`` works without modification. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from datetime import datetime, timedelta, timezone +from typing import List + +import slicks +from slicks.discovery import discover_sensors + + +UTC = timezone.utc + + +@dataclass(frozen=True) +class SensorQueryConfig: + host: str + token: str + database: str + schema: str + table: str + window_days: int = 7 + lookback_days: int = 30 + fallback_start: datetime | None = None + fallback_end: datetime | None = None + + +def fetch_unique_sensors(config: SensorQueryConfig) -> List[str]: + """Collect distinct signal names by scanning recent history via *slicks*.""" + + # Configure slicks to point at the same InfluxDB instance + slicks.connect_influxdb3( + url=config.host, + token=config.token, + db=config.database, + schema=config.schema, + table=config.table, + ) + + end = datetime.now(UTC) + start = end - timedelta(days=config.lookback_days) + + # slicks 0.1.3 generates invalid SQL for InfluxDB 3 if passed timezone-aware datetimes + # because it appends 'Z' to an ISO string that already has an offset. + # Passing naive UTC datetimes works around this. + sensors = discover_sensors( + start_time=start.replace(tzinfo=None), + end_time=end.replace(tzinfo=None), + chunk_size_days=config.window_days, + show_progress=False, + ) + + if not sensors and config.fallback_start and config.fallback_end: + sensors = discover_sensors( + start_time=config.fallback_start.replace(tzinfo=None), + end_time=config.fallback_end.replace(tzinfo=None), + chunk_size_days=config.window_days, + show_progress=False, + ) + + return sensors + + +if __name__ == "__main__": # pragma: no cover + import json + import os + + cfg = SensorQueryConfig( + host=os.getenv("INFLUX_HOST", "http://localhost:9000"), + token=os.getenv("INFLUX_TOKEN", ""), + database=os.getenv("INFLUX_DATABASE", "WFR25"), + schema=os.getenv("INFLUX_SCHEMA", "iox"), + table=os.getenv("INFLUX_TABLE", "WFR25"), + ) + print(json.dumps(fetch_unique_sensors(cfg), indent=2)) diff --git a/installer/data-downloader/backend/storage.py b/installer/data-downloader/backend/storage.py new file mode 100644 index 0000000..1a103c9 --- /dev/null +++ b/installer/data-downloader/backend/storage.py @@ -0,0 +1,196 @@ +from __future__ import annotations + +import json +from pathlib import Path +from tempfile import NamedTemporaryFile +from threading import Lock +from typing import Dict, List, Optional +from datetime import datetime, timezone + + +def now_iso() -> str: + return datetime.now(timezone.utc).isoformat() + + +class JSONStore: + """Lightweight helper around json files with atomic writes.""" + + def __init__(self, path: Path, default_payload: dict): + self.path = path + self.default_payload = default_payload + self._lock = Lock() + self.path.parent.mkdir(parents=True, exist_ok=True) + if not self.path.exists(): + self._write_file(self.default_payload) + + def read(self) -> dict: + with self._lock: + with self.path.open("r", encoding="utf-8") as fh: + return json.load(fh) + + def write(self, payload: dict) -> None: + payload["updated_at"] = payload.get("updated_at") or now_iso() + with self._lock: + self._write_file(payload) + + def _write_file(self, payload: dict) -> None: + with NamedTemporaryFile("w", delete=False, dir=str(self.path.parent), encoding="utf-8") as tmp: + json.dump(payload, tmp, indent=2, ensure_ascii=True) + tmp.flush() + tmp_path = Path(tmp.name) + tmp_path.chmod(0o664) + tmp_path.replace(self.path) + + +class RunsRepository: + def __init__(self, data_dir: Path, suffix: str = ""): + filename = f"runs_{suffix}.json" if suffix else "runs.json" + default = {"updated_at": None, "runs": []} + self.store = JSONStore(data_dir / filename, default) + + def list_runs(self) -> dict: + return self.store.read() + + def merge_scanned_runs(self, scanned: List[dict]) -> dict: + current = self.store.read() + current_updated_at = current.get("updated_at") + existing: Dict[str, dict] = {r["key"]: r for r in current.get("runs", [])} + merged: Dict[str, dict] = {} + + for run in scanned: + key = run["key"] + note = existing.get(key, {}).get("note", "") + note_ts = existing.get(key, {}).get("note_updated_at") + merged[key] = { + **run, + "note": note, + "note_updated_at": note_ts, + } + + # Keep runs that vanished but still have notes to preserve manual metadata + for key, run in existing.items(): + if key not in merged: + merged[key] = run + + runs_list = sorted( + merged.values(), + key=lambda r: r.get("start_utc", ""), + reverse=True, + ) + payload = { + "updated_at": now_iso(), + "runs": runs_list, + } + payload = self._preserve_concurrent_note_updates(payload, current_updated_at) + self.store.write(payload) + return payload + + def update_note(self, key: str, note: str) -> Optional[dict]: + payload = self.store.read() + updated_run: Optional[dict] = None + for run in payload.get("runs", []): + if run["key"] == key: + run["note"] = note + run["note_updated_at"] = now_iso() + updated_run = run + break + if updated_run is not None: + payload["updated_at"] = now_iso() + self.store.write(payload) + return updated_run + + def _preserve_concurrent_note_updates(self, payload: dict, baseline_updated_at: Optional[str]) -> dict: + """Re-read the store to keep newer notes written while a scan was running.""" + latest = self.store.read() + latest_updated_at = latest.get("updated_at") + if not latest_updated_at or latest_updated_at == baseline_updated_at: + return payload + + latest_runs = {r["key"]: r for r in latest.get("runs", [])} + for run in payload.get("runs", []): + latest_run = latest_runs.get(run["key"]) + if latest_run and self._note_is_newer(latest_run, run): + run["note"] = latest_run.get("note", "") + run["note_updated_at"] = latest_run.get("note_updated_at") + return payload + + @staticmethod + def _note_is_newer(candidate: dict, current: dict) -> bool: + candidate_ts = RunsRepository._parse_timestamp(candidate.get("note_updated_at")) + current_ts = RunsRepository._parse_timestamp(current.get("note_updated_at")) + return candidate_ts > current_ts + + @staticmethod + def _parse_timestamp(value: Optional[str]) -> datetime: + if not value: + return datetime.min.replace(tzinfo=timezone.utc) + try: + return datetime.fromisoformat(value) + except ValueError: + return datetime.min.replace(tzinfo=timezone.utc) + + +class SensorsRepository: + def __init__(self, data_dir: Path, suffix: str = ""): + filename = f"sensors_{suffix}.json" if suffix else "sensors.json" + default = {"updated_at": None, "sensors": []} + self.store = JSONStore(data_dir / filename, default) + + def list_sensors(self) -> dict: + return self.store.read() + + def write_sensors(self, sensors: List[str]) -> dict: + payload = { + "updated_at": now_iso(), + "sensors": sorted(sensors), + } + self.store.write(payload) + return payload + + +class ScannerStatusRepository: + def __init__(self, data_dir: Path): + default = { + "updated_at": None, + "scanning": False, + "started_at": None, + "finished_at": None, + "source": None, + "last_result": None, + "error": None, + } + self.store = JSONStore(data_dir / "scanner_status.json", default) + + def get_status(self) -> dict: + return self.store.read() + + def mark_start(self, source: str) -> dict: + payload = self.store.read() + payload.update( + { + "scanning": True, + "source": source, + "started_at": now_iso(), + } + ) + payload.pop("error", None) + payload["updated_at"] = now_iso() + self.store.write(payload) + return payload + + def mark_finish(self, success: bool, error: str | None = None) -> dict: + payload = self.store.read() + payload.update( + { + "scanning": False, + "finished_at": now_iso(), + "last_result": "success" if success else "error", + } + ) + if success: + payload.pop("error", None) + else: + payload["error"] = error or "scan failed" + payload["updated_at"] = now_iso() + self.store.write(payload) + return payload diff --git a/installer/data-downloader/backend/table_utils.py b/installer/data-downloader/backend/table_utils.py new file mode 100644 index 0000000..73ffd03 --- /dev/null +++ b/installer/data-downloader/backend/table_utils.py @@ -0,0 +1,20 @@ +from __future__ import annotations + + +def quote_identifier(identifier: str) -> str: + trimmed = identifier.strip() + if trimmed.startswith('"') and trimmed.endswith('"'): + trimmed = trimmed[1:-1] + return f'"{trimmed}"' + + +def quote_table(identifier: str) -> str: + parts = [part for part in identifier.split(".") if part.strip()] + if not parts: + raise ValueError("Empty identifier") + return ".".join(quote_identifier(part) for part in parts) + + +def quote_literal(value: str) -> str: + escaped = value.replace("'", "''") + return f"'{escaped}'" diff --git a/installer/data-downloader/docker-compose.yml b/installer/data-downloader/docker-compose.yml new file mode 100644 index 0000000..670f099 --- /dev/null +++ b/installer/data-downloader/docker-compose.yml @@ -0,0 +1,44 @@ +services: + api: + build: + context: . + dockerfile: backend/Dockerfile + env_file: .env + ports: + - "8000:8000" + volumes: + - ./data:/app/data + restart: unless-stopped + networks: + - datalink + + scanner: + build: + context: . + dockerfile: backend/Dockerfile + env_file: .env + command: ["python", "-m", "backend.periodic_worker"] + depends_on: + - api + volumes: + - ./data:/app/data + restart: unless-stopped + networks: + - datalink + + frontend: + build: + context: . + dockerfile: frontend/Dockerfile + args: + VITE_API_BASE_URL: ${VITE_API_BASE_URL:-http://localhost:8000} + ports: + - "3000:80" + depends_on: + - api + networks: + - datalink + +networks: + datalink: + external: true diff --git a/installer/data-downloader/frontend/Dockerfile b/installer/data-downloader/frontend/Dockerfile new file mode 100644 index 0000000..08e0926 --- /dev/null +++ b/installer/data-downloader/frontend/Dockerfile @@ -0,0 +1,20 @@ +FROM node:18-alpine AS build + +ARG VITE_API_BASE_URL=http://localhost:8000 +ENV VITE_API_BASE_URL=${VITE_API_BASE_URL} + +WORKDIR /app + +COPY frontend/package.json /app/package.json +COPY frontend/tsconfig.json /app/tsconfig.json +COPY frontend/tsconfig.node.json /app/tsconfig.node.json +COPY frontend/vite.config.ts /app/vite.config.ts +COPY frontend/index.html /app/index.html +COPY frontend/src /app/src + +RUN npm install && npm run build + +FROM nginx:1.25-alpine +COPY --from=build /app/dist /usr/share/nginx/html +COPY frontend/nginx.conf /etc/nginx/conf.d/default.conf +EXPOSE 80 diff --git a/installer/data-downloader/frontend/index.html b/installer/data-downloader/frontend/index.html new file mode 100644 index 0000000..06ccb1c --- /dev/null +++ b/installer/data-downloader/frontend/index.html @@ -0,0 +1,13 @@ + + + + + + + DAQ Data Downloader + + +
+ + + diff --git a/installer/data-downloader/frontend/nginx.conf b/installer/data-downloader/frontend/nginx.conf new file mode 100644 index 0000000..d9acfba --- /dev/null +++ b/installer/data-downloader/frontend/nginx.conf @@ -0,0 +1,27 @@ +server { + listen 80; + server_name _; + + root /usr/share/nginx/html; + index index.html; + + # Proxy API + scanner-triggering endpoints to the FastAPI service + location /api/ { + proxy_pass http://data-downloader-api:8000; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_buffering off; + proxy_read_timeout 300; + } + + location = /api { + return 301 /api/; + } + + location / { + try_files $uri /index.html; + } +} diff --git a/installer/data-downloader/frontend/package.json b/installer/data-downloader/frontend/package.json new file mode 100644 index 0000000..011057a --- /dev/null +++ b/installer/data-downloader/frontend/package.json @@ -0,0 +1,27 @@ +{ + "name": "data-downloader-webapp", + "version": "0.1.0", + "private": true, + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview" + }, + "dependencies": { + "luxon": "^3.5.0", + "lucide-react": "^0.446.0", + "papaparse": "^5.4.1", + "react": "^18.3.1", + "react-dom": "^18.3.1", + "plotly.js-basic-dist": "^2.35.3", + "react-plotly.js": "^2.6.0" + }, + "devDependencies": { + "@types/react": "^18.2.45", + "@types/react-dom": "^18.2.18", + "@vitejs/plugin-react": "^4.3.2", + "typescript": "^5.3.3", + "vite": "^5.1.0" + } +} diff --git a/installer/data-downloader/frontend/src/App.tsx b/installer/data-downloader/frontend/src/App.tsx new file mode 100644 index 0000000..8a54cb9 --- /dev/null +++ b/installer/data-downloader/frontend/src/App.tsx @@ -0,0 +1,342 @@ +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; +import { fetchRuns, fetchSensors, fetchScannerStatus, triggerScan, updateNote, fetchSeasons } from "./api"; +import { RunRecord, RunsResponse, ScannerStatus, SensorsResponse, Season } from "./types"; +import { RunTable } from "./components/RunTable"; +import { DataDownload } from "./components/data-download"; + +type ScanState = "idle" | "running" | "success" | "error"; + +interface DownloaderSelection { + runKey?: string; + startUtc?: string; + endUtc?: string; + sensor?: string; + version: number; +} + +export default function App() { + const [seasons, setSeasons] = useState([]); + const [selectedSeason, setSelectedSeason] = useState(""); // season name + const [runs, setRuns] = useState(null); + const [sensors, setSensors] = useState(null); + const [loading, setLoading] = useState(true); + const [error, setError] = useState(null); + const [noteDrafts, setNoteDrafts] = useState>({}); + const [savingKey, setSavingKey] = useState(null); + const [scanState, setScanState] = useState("idle"); + const [downloaderSelection, setDownloaderSelection] = useState(null); + const [scannerStatus, setScannerStatus] = useState(null); + const sensorsSectionRef = useRef(null); + const downloaderSectionRef = useRef(null); + const statusFinishedRef = useRef(null); + + const loadData = useCallback(async () => { + try { + setLoading(true); + + let currentSeason = selectedSeason; + + // Initial load: fetch seasons if we don't have them + if (seasons.length === 0) { + const seasonsList = await fetchSeasons(); + setSeasons(seasonsList); + if (seasonsList.length > 0 && !currentSeason) { + currentSeason = seasonsList[0].name; + setSelectedSeason(currentSeason); + } + } + + // If we still don't have a season (e.g. no seasons configured), fetch with default (undefined) + const seasonArg = currentSeason || undefined; + + const [runsData, sensorsData] = await Promise.all([ + fetchRuns(seasonArg), + fetchSensors(seasonArg) + ]); + setRuns(runsData); + setSensors(sensorsData); + setError(null); + } catch (err) { + console.error(err); + setError(err instanceof Error ? err.message : "Failed to fetch data"); + } finally { + setLoading(false); + } + }, [selectedSeason, seasons.length]); + + const loadStatus = useCallback( + async (syncOnFinishChange: boolean) => { + try { + const status = await fetchScannerStatus(); + setScannerStatus(status); + const finished = status.finished_at ?? null; + const prevFinished = statusFinishedRef.current; + statusFinishedRef.current = finished; + if ( + syncOnFinishChange && + !status.scanning && + finished && + finished !== prevFinished + ) { + await loadData(); + } + } catch (err) { + console.error("Failed to load scanner status", err); + } + }, + [loadData] + ); + + useEffect(() => { + void loadData(); + void loadStatus(false); + }, [loadData, loadStatus]); + + useEffect(() => { + if (typeof window === "undefined") { + return; + } + const id = window.setInterval(() => { + void loadStatus(true); + }, 5000); + return () => window.clearInterval(id); + }, [loadStatus]); + + const handleScan = async () => { + setScanState("running"); + setScannerStatus((prev) => ({ + scanning: true, + started_at: new Date().toISOString(), + finished_at: prev?.finished_at ?? null, + source: "manual", + last_result: prev?.last_result ?? null, + error: null, + updated_at: new Date().toISOString() + })); + try { + await triggerScan(); + setScanState("success"); + if (typeof window !== "undefined") { + window.setTimeout(() => { + void loadStatus(false); + }, 1500); + } else { + void loadStatus(false); + } + } catch (err) { + console.error(err); + setScanState("error"); + setError(err instanceof Error ? err.message : "Failed to start scan"); + const message = err instanceof Error ? err.message : "Scan failed"; + setScannerStatus((prev) => + prev + ? { ...prev, scanning: false, last_result: "error", error: message } + : { + scanning: false, + started_at: null, + finished_at: null, + source: null, + last_result: "error", + error: message, + updated_at: new Date().toISOString() + } + ); + } finally { + setTimeout(() => setScanState("idle"), 5000); + } + }; + + const handleRefreshClick = async () => { + await loadData(); + await loadStatus(false); + }; + + const handleNoteChange = (key: string, value: string) => { + setNoteDrafts((prev) => ({ ...prev, [key]: value })); + }; + + const handleSaveNote = async (key: string) => { + const nextNote = noteDrafts[key] ?? runs?.runs.find((r) => r.key === key)?.note ?? ""; + setSavingKey(key); + try { + const updated = await updateNote(key, nextNote, selectedSeason); + setRuns((prev) => { + if (!prev) return prev; + const updatedRuns = prev.runs.map((run) => (run.key === key ? updated : run)); + return { ...prev, runs: updatedRuns, updated_at: updated.note_updated_at ?? prev.updated_at }; + }); + setNoteDrafts((prev) => { + const clone = { ...prev }; + delete clone[key]; + return clone; + }); + } catch (err) { + setError(err instanceof Error ? err.message : "Failed to save note"); + } finally { + setSavingKey(null); + } + }; + + const handleRunPick = (run: RunRecord) => { + setDownloaderSelection((prev) => ({ + runKey: run.key, + startUtc: run.start_utc, + endUtc: run.end_utc, + sensor: prev?.sensor, + version: (prev?.version ?? 0) + 1 + })); + sensorsSectionRef.current?.scrollIntoView({ behavior: "smooth", block: "start" }); + }; + + const handleSensorPick = (sensor: string) => { + setDownloaderSelection((prev) => ({ + runKey: prev?.runKey, + sensor, + version: (prev?.version ?? 0) + 1 + })); + downloaderSectionRef.current?.scrollIntoView({ behavior: "smooth", block: "start" }); + }; + + const sensorsPreview = useMemo(() => sensors?.sensors ?? [], [sensors]); + const scanningActive = scannerStatus?.scanning ?? false; + const scanButtonDisabled = scanningActive || scanState === "running"; + const scanButtonLabel = + scanState === "running" ? "Scanning..." : scanningActive ? "Scan Running..." : "Trigger Scan"; + + const lastRunsRefresh = runs?.updated_at + ? new Date(runs.updated_at).toLocaleString() + : "never"; + const lastSensorRefresh = sensors?.updated_at + ? new Date(sensors.updated_at).toLocaleString() + : "never"; + + const selectedSeasonColor = useMemo(() => { + return seasons.find(s => s.name === selectedSeason)?.color || "#0bf"; // Default blue + }, [seasons, selectedSeason]); + + return ( +
+
+
+
+

DAQ Data Downloader

+

+ Inspect historical scans, refresh availability, and capture run notes. +

+
+ + {seasons.length > 0 && ( +
+ + +
+ )} +
+
+ + {scanningActive && ( +
+ Scanning database. Do not click again. +
+ )} + +
+ + + {scanState !== "idle" && ( + + {scanState === "running" && "Scan in progress..."} + {scanState === "success" && "Scan queued and data refreshed"} + {scanState === "error" && "Scan failed"} + + )} +
+ + {error && ( +
+ Heads up: {error} +
+ )} + +
+

Past Runs

+

Last refresh: {lastRunsRefresh}

+ {loading && !runs ? ( +

Loading runs...

+ ) : runs ? ( + + ) : ( +

No data yet.

+ )} +
+ +
+

Unique Sensors

+

Last refresh: {lastSensorRefresh}

+ {loading && !sensors ? ( +

Loading sensors...

+ ) : ( +
+ {sensorsPreview.length === 0 &&

No sensors captured.

} + {sensorsPreview.map((sensor) => ( + + ))} +
+ )} +
+ +
+ +
+
+ ); +} diff --git a/installer/data-downloader/frontend/src/api.ts b/installer/data-downloader/frontend/src/api.ts new file mode 100644 index 0000000..bb585cd --- /dev/null +++ b/installer/data-downloader/frontend/src/api.ts @@ -0,0 +1,81 @@ +import { + RunRecord, + RunsResponse, + ScannerStatus, + Season, + SensorDataResponse, + SensorsResponse +} from "./types"; + +const RAW_API_BASE = import.meta.env.VITE_API_BASE_URL?.trim() ?? ""; +const SANITIZED_API_BASE = RAW_API_BASE.replace(/\/$/, ""); +const LOCAL_BASE_PATTERN = /:\/\/(localhost|127\.0\.0\.1|\[?::1]?)/i; +const LOCAL_HOSTS = new Set(["localhost", "127.0.0.1", "::1"]); +const runningOnLocalhost = typeof window !== "undefined" && LOCAL_HOSTS.has(window.location.hostname); +const preferRelativeBase = + SANITIZED_API_BASE === "" || (!runningOnLocalhost && LOCAL_BASE_PATTERN.test(SANITIZED_API_BASE)); +const API_BASE = preferRelativeBase ? "" : SANITIZED_API_BASE; + +async function request(path: string, init?: RequestInit): Promise { + const response = await fetch(`${API_BASE}${path}`, { + headers: { + "Content-Type": "application/json", + ...(init?.headers || {}) + }, + ...init + }); + if (!response.ok) { + const message = await response.text(); + throw new Error(message || `Request failed (${response.status})`); + } + if (response.status === 204) { + return {} as T; + } + return (await response.json()) as T; +} + +export function fetchSeasons(): Promise { + return request("/api/seasons"); +} + +export function fetchRuns(season?: string): Promise { + const query = season ? `?season=${encodeURIComponent(season)}` : ""; + return request(`/api/runs${query}`); +} + +export function fetchSensors(season?: string): Promise { + const query = season ? `?season=${encodeURIComponent(season)}` : ""; + return request(`/api/sensors${query}`); +} + +export function fetchScannerStatus(): Promise { + return request("/api/scanner-status"); +} + +export function triggerScan(): Promise<{ status: string }> { + return request("/api/scan", { method: "POST" }); +} + +export function updateNote(key: string, note: string, season?: string): Promise { + const query = season ? `?season=${encodeURIComponent(season)}` : ""; + return request(`/api/runs/${encodeURIComponent(key)}/note${query}`, { + method: "POST", + body: JSON.stringify({ note }) + }); +} + +export interface DataQueryPayload { + signal: string; + start: string; + end: string; + limit?: number; + no_limit?: boolean; +} + +export function querySensorData(payload: DataQueryPayload, season?: string): Promise { + const query = season ? `?season=${encodeURIComponent(season)}` : ""; + return request(`/api/query${query}`, { + method: "POST", + body: JSON.stringify(payload) + }); +} diff --git a/installer/data-downloader/frontend/src/components/RunTable.tsx b/installer/data-downloader/frontend/src/components/RunTable.tsx new file mode 100644 index 0000000..34a59a1 --- /dev/null +++ b/installer/data-downloader/frontend/src/components/RunTable.tsx @@ -0,0 +1,98 @@ +import { RunRecord } from "../types"; + +interface Props { + runs: RunRecord[]; + drafts: Record; + onChange: (key: string, value: string) => void; + onSave: (key: string) => void; + savingKey: string | null; + onPickRun?: (run: RunRecord) => void; +} + +const formatLocalDateTime = (iso: string) => + new Date(iso).toLocaleString(undefined, { + hour12: false + }); + +const formatUtcDateTime = (iso: string) => + new Date(iso).toLocaleString(undefined, { + hour12: false, + timeZone: "UTC" + }); + +export function RunTable({ runs, drafts, onChange, onSave, savingKey, onPickRun }: Props) { + if (runs.length === 0) { + return

No runs found yet.

; + } + + return ( +
+ + + + + + + + + + + + + {runs.map((run) => { + const draft = drafts[run.key] ?? run.note ?? ""; + return ( + { + if (!onPickRun) return; + const target = event.target as HTMLElement; + if (target.closest("textarea, button")) return; + onPickRun(run); + }} + > + + + + +
Window (local)UTC StartBinsRowsNote
+
{formatLocalDateTime(run.start_local)}
+
+ {formatLocalDateTime(run.end_local)} ({run.timezone ?? "local"}) +
+
+
{formatUtcDateTime(run.start_utc)}
+
UTC
+
+ {run.bins} + {run.row_count ?? "—"} +