diff --git a/.github/actions/aws_s3_helper/action.yml b/.github/actions/aws_s3_helper/action.yml new file mode 100644 index 000000000..561c1642b --- /dev/null +++ b/.github/actions/aws_s3_helper/action.yml @@ -0,0 +1,108 @@ +# name: AWS S3 Helper +description: Upload and download files from AWS S3 + +inputs: + s3_bucket: + description: S3 Bucket Name + required: true + local_file: + description: Local file paths + required: false + default: ../artifacts/file_list.txt + download_file: + description: Download file paths + required: false + default: '' + download_location: + description: File download location + required: false + default: . + mode: + description: Mode of operation (upload/download) + required: true + default: single-upload + upload_location: + description: Upload location + required: true + +outputs: + presigned_url: + description: Pre-signed URL for the uploaded file + value: ${{ steps.sync-data.outputs.presigned_url }} + s3_location: + description: Upload location + value: ${{ inputs.upload_location }} + +runs: + using: "composite" + steps: + - name: Sync Data + id: sync-data + shell: bash + env: + UPLOAD_LOCATION: ${{ inputs.upload_location }} + run: | + echo "::group::Uploading files to S3" + case "${{ inputs.mode }}" in + multi-upload) + if [ ! -s "${{ inputs.local_file }}" ]; then + echo "❌ File list is empty. No files to upload." + exit 1 + fi + + echo "📄 Contents of file list:" + cat "${{ inputs.local_file }}" + + first_line=true + manifest="${{ github.workspace }}/presigned_urls.json" + echo "{" > "${manifest}" + + while IFS= read -r file; do + resolved_file=$(readlink -f "$file") + if [ -f "$resolved_file" ]; then + filename=$(basename "$resolved_file") + echo "📤 Uploading $filename..." + aws s3 cp "$resolved_file" "s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}/$filename" + presigned_url=$(aws s3 presign "s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}/$filename" --expires-in 259200) + + if [ "$first_line" = true ]; then + first_line=false + else + echo "," >> "${manifest}" + fi + + # Key = filename, Value = presigned_url + echo " \"${filename}\": \"${presigned_url}\"" >> "${manifest}" + echo "✅ Pre-signed URL for $filename: $presigned_url" + else + echo "⚠️ Skipping: $file is not a regular file or not accessible." + fi + done < "${{ inputs.local_file }}" + + echo "}" >> "${manifest}" + ;; + single-upload) + resolved_file=$(readlink -f "${{ inputs.local_file }}") + filename=$(basename "$resolved_file") + aws s3 cp "$resolved_file" "s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}/$filename" + presigned_url=$(aws s3 presign "s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}/$filename" --expires-in 259200) + echo "presigned_url=${presigned_url}" >> "$GITHUB_OUTPUT" + ;; + download) + download_dir=$(realpath "${{ inputs.download_location }}") + aws s3 cp "s3://${{ inputs.s3_bucket }}/${{ inputs.download_file }}" "$download_dir" + ;; + *) + echo "Invalid mode. Use 'upload', 'multi-upload', or 'download'." + exit 1 + ;; + esac + echo "::endgroup::" + + - name: Upload presigned URL manifest + if: ${{ inputs.mode == 'multi-upload' }} + uses: actions/upload-artifact@v4 + with: + name: presigned_urls.json + path: ${{ github.workspace }}/presigned_urls.json + retention-days: 3 \ No newline at end of file diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml new file mode 100644 index 000000000..0ddbc84f8 --- /dev/null +++ b/.github/actions/build/action.yml @@ -0,0 +1,42 @@ +name: Build Workspace +description: | + Builds kernel and video-driver using a Docker image. + +inputs: + docker_image: + description: Docker image to use + required: true + workspace_path: + description: Path to workspace directory + required: true + +runs: + using: "composite" + steps: + - name: Build kernel + shell: bash + run: | + docker run --rm \ + -v "${{ inputs.workspace_path }}:${{ inputs.workspace_path }}" \ + -w "${{ inputs.workspace_path }}/kernel" \ + --user $(id -u):$(id -g) \ + ${{ inputs.docker_image }} \ + bash -c " + make O=../kobj ARCH=arm64 defconfig && + make O=../kobj -j\$(nproc) && + make O=../kobj -j\$(nproc) dir-pkg INSTALL_MOD_STRIP=1 + " + + - name: Build video-driver + shell: bash + run: | + docker run --rm \ + -v "${{ inputs.workspace_path }}:${{ inputs.workspace_path }}" \ + -w "${{ inputs.workspace_path }}/video-driver" \ + --user $(id -u):$(id -g) \ + ${{ inputs.docker_image }} \ + bash -c " + make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC=aarch64-linux-gnu-gcc-13 \ + -C ${{ inputs.workspace_path }}/kobj \ + M=\$(pwd) VIDEO_KERNEL_ROOT=\$(pwd) modules + " diff --git a/.github/actions/lava_job_render/action.yml b/.github/actions/lava_job_render/action.yml new file mode 100644 index 000000000..af88e6967 --- /dev/null +++ b/.github/actions/lava_job_render/action.yml @@ -0,0 +1,196 @@ +name: LAVA Job Render +inputs: + docker_image: + description: Docker image + required: true + default: kmake-image:ver.1.0 + +runs: + using: "composite" + steps: + - name: Process presigned_urls.json + id: process_urls + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const p = require('path'); + + const filePath = p.join(process.env.GITHUB_WORKSPACE, 'presigned_urls.json'); + if (!fs.existsSync(filePath)) { + core.setFailed(`File not found: ${filePath}`); + } + + // Read JSON mapping of uploaded file paths -> presigned URLs + const data = JSON.parse(fs.readFileSync(filePath, 'utf-8')); + + function findUrlByFilename(filename) { + for (const [path, url] of Object.entries(data)) { + if (path.endsWith(filename)) return url; + } + return null; + } + + const modulesTarUrl = findUrlByFilename('modules.tar.xz'); + const imageUrl = findUrlByFilename('Image'); + const mergedRamdiskUrl = findUrlByFilename('video-merged.cpio.gz'); + const vmlinuxUrl = findUrlByFilename('vmlinux'); + + // DTB is expected to be ".dtb" + const dtbFilename = `${process.env.MACHINE}.dtb`; + const dtbUrl = findUrlByFilename(dtbFilename); + + core.setOutput('modules_url', modulesTarUrl || ''); + core.setOutput('image_url', imageUrl || ''); + core.setOutput('vmlinux_url', vmlinuxUrl || ''); + core.setOutput('dtb_url', dtbUrl || ''); + core.setOutput('merged_ramdisk_url', mergedRamdiskUrl || ''); + + console.log(`Modules URL: ${modulesTarUrl}`); + console.log(`Image URL: ${imageUrl}`); + console.log(`Vmlinux URL: ${vmlinuxUrl}`); + console.log(`Dtb URL: ${dtbUrl}`); + console.log(`Merged Ramdisk URL: ${mergedRamdiskUrl}`); + + - name: Create metadata.json + id: create_metadata + shell: bash + run: | + echo "Creating metadata.json from job_render templates" + cd ../job_render + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e dtb_url="${{ steps.process_urls.outputs.dtb_url }}" \ + ${{ inputs.docker_image }} \ + jq '.artifacts["dtbs/qcom/${{ env.MACHINE }}.dtb"] = env.dtb_url' data/metadata.json > temp.json && mv temp.json data/metadata.json + + - name: Upload metadata.json + id: upload_metadata + uses: qualcomm-linux-stg/video-driver/.github/actions/aws_s3_helper@video.qclinux.main.stage + with: + local_file: ../job_render/data/metadata.json + s3_bucket: qli-stg-video-gh-artifacts + mode: single-upload + + - name: Create template json cloudData.json + shell: bash + run: | + echo "Populating cloudData.json with kernel, vmlinux, modules, metadata, ramdisk" + metadata_url="${{ steps.upload_metadata.outputs.presigned_url }}" + image_url="${{ steps.process_urls.outputs.image_url }}" + vmlinux_url="${{ steps.process_urls.outputs.vmlinux_url }}" + modules_url="${{ steps.process_urls.outputs.modules_url }}" + merged_ramdisk_url="${{ steps.process_urls.outputs.merged_ramdisk_url }}" + + cd ../job_render + + # metadata + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e metadata_url="$metadata_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.metadata = env.metadata_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + + # kernel Image + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e image_url="$image_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.kernel = env.image_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + + # vmlinux (set only if present) + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e vmlinux_url="$vmlinux_url" \ + ${{ inputs.docker_image }} \ + sh -c 'if [ -n "$vmlinux_url" ]; then jq ".artifacts.vmlinux = env.vmlinux_url" data/cloudData.json > temp.json && mv temp.json data/cloudData.json; fi' + + # modules + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e modules_url="$modules_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.modules = env.modules_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + + # ramdisk: use merged only here (fallback added in next step if missing) + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e merged_ramdisk_url="$merged_ramdisk_url" \ + ${{ inputs.docker_image }} \ + sh -c 'if [ -n "$merged_ramdisk_url" ]; then jq ".artifacts.ramdisk = env.merged_ramdisk_url" data/cloudData.json > temp.json && mv temp.json data/cloudData.json; fi' + + - name: Update firmware and ramdisk + shell: bash + run: | + set -euo pipefail + cd ../job_render + + # Fallback to stable kerneltest ramdisk only if merged ramdisk is not available + if [ -z "${{ steps.process_urls.outputs.merged_ramdisk_url }}" ]; then + echo "Merged ramdisk not found. Using stable kerneltest ramdisk fallback." + ramdisk_url="$(aws s3 presign s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/artifacts/initramfs/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)" + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e ramdisk_url="$ramdisk_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.ramdisk = env.ramdisk_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + else + echo "Ramdisk set from merged source; skipping kerneltest fallback." + fi + + # Optional board-specific firmware initramfs + if [ -n "${{ env.FIRMWARE }}" ]; then + case "${{ env.FIRMWARE }}" in + sm8750-mtp) + FW_FILE="initramfs-firmware-dragonboard410c-image-sm8750-mtp.cpio.gz" + ;; + *) + FW_FILE="initramfs-firmware-${{ env.FIRMWARE }}-image-qcom-armv8a.cpio.gz" + ;; + esac + + echo "Using firmware file: $FW_FILE" + + firmware_url="$(aws s3 presign s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/artifacts/initramfs/${FW_FILE} --expires 7600)" + + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e firmware_url="$firmware_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.firmware = env.firmware_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + else + echo "No FIRMWARE provided; skipping firmware artifact update." + fi + + - name: Create lava_job_definition + shell: bash + run: | + cd ../job_render + mkdir -p renders + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e TARGET="${{ env.LAVA_NAME }}" \ + -e TARGET_DTB="${{ env.MACHINE }}" \ + ${{ inputs.docker_image }} \ + sh -c 'export BOOT_METHOD=fastboot && \ + export TARGET=${TARGET} && \ + export TARGET_DTB=${TARGET_DTB} && \ + python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json --video_pre-merge' \ No newline at end of file diff --git a/.github/actions/loading/action.yml b/.github/actions/loading/action.yml new file mode 100644 index 000000000..cd5c7e083 --- /dev/null +++ b/.github/actions/loading/action.yml @@ -0,0 +1,68 @@ +--- +name: Load Parameters +description: Load parameters for the build job + +outputs: + build_matrix: + description: Build matrix + value: ${{ steps.set-matrix.outputs.build_matrix }} + full_matrix: + description: Full matrix containing lava details + value: ${{ steps.set-matrix.outputs.full_matrix }} + +runs: + using: "composite" + steps: + - name: Set Build Matrix + id: set-matrix + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const path = require('path'); + + // 1. Define possible paths for MACHINES.json + // Path A: Workspace/video-driver/ci/MACHINES.json (Nested) + const pathNested = path.join(process.env.GITHUB_WORKSPACE, 'video-driver', 'ci', 'MACHINES.json'); + // Path B: Workspace/ci/MACHINES.json (Root) + const pathRoot = path.join(process.env.GITHUB_WORKSPACE, 'ci', 'MACHINES.json'); + + let targetsPath = ''; + + // 2. Check which path exists + if (fs.existsSync(pathNested)) { + console.log(`Found config at nested path: ${pathNested}`); + targetsPath = pathNested; + } else if (fs.existsSync(pathRoot)) { + console.log(`Found config at root path: ${pathRoot}`); + targetsPath = pathRoot; + } else { + // 3. Debugging: If neither exists, list files to help us see what is happening + console.log('!!! Error: MACHINES.json not found in expected locations.'); + console.log(`Checked: ${pathNested}`); + console.log(`Checked: ${pathRoot}`); + + console.log('--- Workspace Root Contents ---'); + try { + console.log(fs.readdirSync(process.env.GITHUB_WORKSPACE)); + } catch (e) { console.log(e.message); } + + core.setFailed(`MACHINES.json not found.`); + return; + } + + // 4. Parse the file + let targets; + try { + targets = JSON.parse(fs.readFileSync(targetsPath, 'utf-8')); + } catch (err) { + core.setFailed(`Failed to parse MACHINES.json: ${err.message}`); + return; + } + + // 5. Generate Outputs + const build_matrix = Object.values(targets).map(({ machine, firmware }) => ({ machine, firmware })); + core.setOutput('build_matrix', JSON.stringify(build_matrix)); + + const full_matrix = Object.values(targets).map(({ machine, firmware, lavaname }) => ({ machine, firmware, lavaname })); + core.setOutput('full_matrix', JSON.stringify(full_matrix)); \ No newline at end of file diff --git a/.github/actions/sync/action.yml b/.github/actions/sync/action.yml new file mode 100644 index 000000000..770df835b --- /dev/null +++ b/.github/actions/sync/action.yml @@ -0,0 +1,66 @@ +--- +name: Sync Action +description: Checks out the correct code depending on event and repo context. + +inputs: + event_name: + description: Event type that triggered the workflow (e.g., pull_request_target, push, workflow_call) + required: true + pr_ref: + description: PR branch ref (e.g., feature/my-feature) + required: false + pr_repo: + description: PR repo full name (e.g., org/repo or user/repo) + required: false + base_ref: + description: Base branch ref (e.g., master) + required: false + caller_workflow: + description: Name of the workflow calling this actions + required: false + default: None + +runs: + using: 'composite' + steps: + - name: Checkout PR code (pull_request_target) + if: ${{ (inputs.event_name == 'pull_request_target' || inputs.event_name == 'workflow_call') && inputs.pr_ref != '' && inputs.pr_repo != '' }} + uses: actions/checkout@v5 + with: + fetch-depth: 0 + ref: ${{ inputs.pr_ref }} + repository: ${{ inputs.pr_repo }} + path: video-driver + + - name: Checkout master (push) + if: ${{ (inputs.event_name == 'push' || inputs.event_name == 'workflow_call') && inputs.base_ref != '' }} + uses: actions/checkout@v5 + with: + fetch-depth: 0 + ref: ${{ inputs.base_ref }} + path: video-driver + + - name: Checkout current repo and ref (Fallback) + if: ${{ (inputs.event_name == 'push' || inputs.event_name == 'workflow_call') && (inputs.base_ref == '' || inputs.pr_repo == '') }} + uses: actions/checkout@v5 + with: + fetch-depth: 0 + ref: ${{ github.ref }} + path: video-driver + + - name: Checkout for workflow_dispatch + if: ${{ inputs.event_name == 'workflow_dispatch' }} + uses: actions/checkout@v5 + with: + fetch-depth: 0 + ref: ${{ inputs.base_ref }} + path: video-driver + + - name: Clone kernel sources + if: ${{ inputs.caller_workflow == 'build' }} + uses: actions/checkout@v5 + with: + repository: qualcomm-linux/kernel + ref: qcom-next + path: kernel + fetch-depth: 0 \ No newline at end of file diff --git a/.github/workflows/commit-check.yml b/.github/workflows/commit-check.yml new file mode 100644 index 000000000..c815bb1db --- /dev/null +++ b/.github/workflows/commit-check.yml @@ -0,0 +1,19 @@ +name: Commit Msg Check Action + +on: + pull_request: + types: [opened, synchronize, reopened] + +jobs: + check-commits: + runs-on: ubuntu-latest + + steps: + - name: Run custom commit check + uses: qualcomm/commit-msg-check-action@main + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + body-char-limit: 72 + sub-char-limit: 50 + check-blank-line: true \ No newline at end of file diff --git a/.github/workflows/loading.yml b/.github/workflows/loading.yml new file mode 100644 index 000000000..85fc9d0cd --- /dev/null +++ b/.github/workflows/loading.yml @@ -0,0 +1,52 @@ +--- +name: _loading +description: Load required parameters for the subsequent jobs + +on: + workflow_call: + inputs: + target_branch: + description: "Branch to checkout (optional)" + required: false + type: string + default: "" + outputs: + build_matrix: + description: Build matrix + value: ${{ jobs.loading.outputs.build_matrix }} + full_matrix: + description: Full Matrix containing lava description + value: ${{ jobs.loading.outputs.full_matrix }} + +jobs: + loading: + runs-on: ubuntu-latest + outputs: + build_matrix: ${{ steps.loading.outputs.build_matrix }} + full_matrix: ${{ steps.loading.outputs.full_matrix }} + steps: + # SCENARIO 1: Pull Request (Pre-Merge) + # Uses your custom sync action to merge PR code with base + - name: Sync codebase (PR) + if: github.event_name == 'pull_request' + uses: qualcomm-linux-stg/video-driver/.github/actions/sync@video.qclinux.main.stage + with: + event_name: ${{ github.event_name }} + pr_ref: ${{ github.event.pull_request.head.ref }} + pr_repo: ${{ github.event.pull_request.head.repo.full_name }} + base_ref: ${{ github.ref_name }} + + # SCENARIO 2: Schedule or Manual (Post-Merge) + # Uses standard checkout because there is no PR to sync + - name: Checkout Code (Schedule) + if: github.event_name != 'pull_request' + uses: actions/checkout@v4 + with: + # Use the input branch if provided, otherwise default to current ref + ref: ${{ inputs.target_branch || github.ref_name }} + # Check out into 'video-driver' folder so the script finds the nested path + path: video-driver + + - name: Load Parameters + id: loading + uses: qualcomm-linux-stg/video-driver/.github/actions/loading@video.qclinux.main.stage \ No newline at end of file diff --git a/.github/workflows/post_merge.yml b/.github/workflows/post_merge.yml new file mode 100644 index 000000000..07cd48af6 --- /dev/null +++ b/.github/workflows/post_merge.yml @@ -0,0 +1,31 @@ +name: Post Merge Weekly +description: | + Runs post-merge CI for the video-driver repository on a weekly schedule. + Reuses loading, build and test workflows. + +on: + schedule: + - cron: '0 0 * * *' + workflow_dispatch: + +jobs: + loading: + uses: qualcomm-linux-stg/video-driver/.github/workflows/loading.yml@video.qclinux.main.stage + secrets: inherit + + build: + needs: loading + uses: qualcomm-linux-stg/video-driver/.github/workflows/sync-and-build.yml@video.qclinux.main.stage + secrets: inherit + with: + docker_image: kmake-image:ver.1.0 + build_matrix: ${{ needs.loading.outputs.build_matrix }} + + lava-test: + needs: [loading, build] + uses: qualcomm-linux-stg/video-driver/.github/workflows/test.yml@video.qclinux.main.stage + secrets: inherit + with: + docker_image: kmake-image:ver.1.0 + build_matrix: ${{ needs.loading.outputs.build_matrix }} + full_matrix: ${{ needs.loading.outputs.full_matrix }} \ No newline at end of file diff --git a/.github/workflows/pre_merge.yml b/.github/workflows/pre_merge.yml new file mode 100644 index 000000000..c68211adc --- /dev/null +++ b/.github/workflows/pre_merge.yml @@ -0,0 +1,36 @@ +name: pre_merge +description: | + Orchestrates pre-merge CI for the video-driver repository a matrix + in the caller workflow. Builds and tests using reusable workflows. + +on: + push: + branches: + - video.qclinux.main.stage + workflow_dispatch: + pull_request_target: + types: [opened, synchronize, reopened] + branches: + - video.qclinux.main.stage + +jobs: + loading: + uses: qualcomm-linux-stg/video-driver/.github/workflows/loading.yml@video.qclinux.main.stage + secrets: inherit + + build: + needs: loading + uses: qualcomm-linux-stg/video-driver/.github/workflows/sync-and-build.yml@video.qclinux.main.stage + secrets: inherit + with: + docker_image: kmake-image:ver.1.0 + build_matrix: ${{ needs.loading.outputs.build_matrix }} + + lava-test: + needs: [loading, build] + uses: qualcomm-linux-stg/video-driver/.github/workflows/test.yml@video.qclinux.main.stage + secrets: inherit + with: + docker_image: kmake-image:ver.1.0 + build_matrix: ${{ needs.loading.outputs.build_matrix }} + full_matrix: ${{ needs.loading.outputs.full_matrix }} \ No newline at end of file diff --git a/.github/workflows/qcom-preflight-checks.yml b/.github/workflows/qcom-preflight-checks.yml new file mode 100644 index 000000000..bff981323 --- /dev/null +++ b/.github/workflows/qcom-preflight-checks.yml @@ -0,0 +1,24 @@ +name: Qualcomm Preflight Checks +on: + pull_request_target: + branches: [ video.qclinux.main.stage ] + push: + branches: [ video.qclinux.main.stage ] + workflow_dispatch: + +permissions: + contents: read + security-events: write + +jobs: + qcom-preflight-checks: + uses: qualcomm/qcom-reusable-workflows/.github/workflows/qcom-preflight-checks-reusable-workflow.yml@v1.1.4 + with: + # ✅ Preflight Checkers + repolinter: true # default: true + semgrep: true # default: true + copyright-license-detector: true # default: true + pr-check-emails: true # default: true + dependency-review: true # default: true + secrets: + SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/stale-issues.yaml b/.github/workflows/stale-issues.yaml new file mode 100644 index 000000000..29d426d1a --- /dev/null +++ b/.github/workflows/stale-issues.yaml @@ -0,0 +1,24 @@ +name: 'Close stale issues and pull requests with no recent activity' +on: + schedule: + - cron: "30 1 * * *" + +permissions: + issues: write + pull-requests: write + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v9 + with: + stale-issue-message: 'This issue has been marked as stale due to 60 days of inactivity.' + stale-pr-message: 'This pull request has been marked as stale due to 60 days of inactivity.' + exempt-issue-labels: bug,enhancement + exempt-pr-labels: bug,enhancement + days-before-stale: 60 + days-before-close: -1 + remove-stale-when-updated: true + remove-issue-stale-when-updated: true + remove-pr-stale-when-updated: true \ No newline at end of file diff --git a/.github/workflows/sync-and-build.yml b/.github/workflows/sync-and-build.yml new file mode 100644 index 000000000..b82393ea9 --- /dev/null +++ b/.github/workflows/sync-and-build.yml @@ -0,0 +1,309 @@ +name: Sync and Build + +on: + workflow_dispatch: + workflow_call: + inputs: + docker_image: + description: Docker image to use for the build + required: false + type: string + default: kmake-image:ver.1.0 + build_matrix: + description: Build matrix for multi target builds + type: string + required: true + +permissions: + packages: read + +jobs: + sync-and-build: + runs-on: + group: GHA-video-Stg-SelfHosted-RG + labels: [self-hosted, video-stg-u2204-x64-large-od-ephem] + + steps: + - name: Pull Docker image + uses: qualcomm-linux/kernel-config/.github/actions/pull_docker_image@main + with: + image: ${{ inputs.docker_image }} + + # ------------------------------------------------------------------------ + # ✅ CRITICAL FIX: Explicitly checkout the driver code. + # This ensures the source code exists for Post-Merge/Scheduled runs. + # ------------------------------------------------------------------------ + - name: Checkout Video Driver + uses: actions/checkout@v4 + with: + path: video-driver + fetch-depth: 0 + + - name: Sync codebase + uses: qualcomm-linux-stg/video-driver/.github/actions/sync@video.qclinux.main.stage + with: + event_name: ${{ github.event_name }} + pr_ref: ${{ github.event.pull_request.head.ref }} + pr_repo: ${{ github.event.pull_request.head.repo.full_name }} + base_ref: ${{ github.ref_name }} + caller_workflow: build + + + - name: Build workspace + uses: qualcomm-linux-stg/video-driver/.github/actions/build@video.qclinux.main.stage + with: + docker_image: kmake-image:ver.1.0 + workspace_path: ${{ github.workspace }} + + - name: Fix Workspace Ownership + if: always() + shell: bash + run: | + echo "🔧 Fixing file ownership (root -> runner user)..." + sudo chown -R $(id -u):$(id -g) ${{ github.workspace }} + + - name: Download iris_test_app from the s3 + shell: bash + run: | + set -euo pipefail + echo ${{github.workspace }} + mkdir -p "${{github.workspace }}/v4l-video-test-app/build/" + echo " syncing files from s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/iris_test_app/" + aws s3 cp "s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/iris_test_app/" "${{ github.workspace }}/v4l-video-test-app/build/" --recursive + echo " ✅ Download complete" + ls ${{ github.workspace }}/v4l-video-test-app/build/ + + - name: Download firmware file from S3 + shell: bash + run: | + set -euo pipefail + mkdir -p "${{ github.workspace }}/downloads" + echo "📥 Syncing files from S3 path: s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/artifacts/" + aws s3 cp "s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/artifacts/vpu20_1v.mbn" "${{ github.workspace }}/downloads" + echo "✅ Download complete" + [ -f "${{ github.workspace }}/downloads/vpu20_1v.mbn" ] || { echo "❌ Missing vpu20_1v.mbn"; exit 1; } + + - name: Download the video-contents for testing + shell: bash + run: | + set -euo pipefail + mkdir -p "${{ github.workspace }}/downloads" + echo "Downloading the video-content files" + wget -q https://github.com/qualcomm-linux/qcom-linux-testkit/releases/download/IRIS-Video-Files-v1.0/video_clips_iris.tar.gz \ + -O "${{ github.workspace }}/downloads/video_clips_iris.tar.gz" + [ -f "${{ github.workspace }}/downloads/video_clips_iris.tar.gz" ] || { echo "❌ Failed to download video_clips_iris.tar.gz"; exit 1; } + + - name: Prepare /data/vendor/iris_test_app and list contents + shell: bash + run: | + set -euo pipefail + data_dir="${{ github.workspace }}/kobj/tar-install/data/vendor/iris_test_app" + mkdir -p "$data_dir" + data_dir2="${{ github.workspace }}/kobj/tar-install/data/vendor/iris_test_app/firmware" + mkdir -p "$data_dir2" + firmware_version=$(ls kobj/tar-install/lib/modules/) + mkdir -p "kobj/tar-install/lib/modules/$firmware_version/updates" + cp video-driver/video/iris_vpu.ko kobj/tar-install/lib/modules/$firmware_version/updates/ + # Copy test app, firmware blob, and video clips tar into data/vendor/iris_test_app + cp "v4l-video-test-app/build/iris_v4l2_test" "$data_dir/" + cp "${{ github.workspace }}/downloads/vpu20_1v.mbn" "$data_dir2/" + #cp video-driver/iris_vpu.ko "$data_dir/" + cp "${{ github.workspace }}/downloads/video_clips_iris.tar.gz" "$data_dir/" + + echo "📂 Contents of $data_dir:" + ls -lh "$data_dir" + + - name: Create compressed kernel ramdisk archives + shell: bash + run: | + set -euo pipefail + cd "${{ github.workspace }}/kobj/tar-install" + find lib/modules data | cpio -o -H newc --owner=0:0 | gzip -9 > "${{ github.workspace }}/local-kernel-ramdisk.cpio.gz" + cd - > /dev/null + ls -lh "${{ github.workspace }}/local-kernel-ramdisk.cpio.gz" + + - name: Download meta-qcom stable initramfs artifacts from S3 + shell: bash + run: | + set -euo pipefail + mkdir -p "${{ github.workspace }}/downloads" + echo "🔍 Fetching initramfs files from S3 bucket: s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/artifacts/initramfs/" + aws s3 cp s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/artifacts/initramfs/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz "${{ github.workspace }}/downloads/" + echo "Initramfs files downloaded to: ${{ github.workspace }}/downloads" + + - name: Decompress ramdisk files and rename .cpio.gz files + shell: bash + run: | + set -euo pipefail + cd "${{ github.workspace }}/downloads" + echo " Decompressing and renaming .cpio.gz files..." + gunzip -c initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz > kerneltest.cpio + + - name: Merge and repackage initramfs + shell: bash + run: | + set -euo pipefail + echo "🔧 Starting repackaging process" + + workspace="${{ github.workspace }}" + mkdir -p "$workspace/combineramdisk" + cp "$workspace/local-kernel-ramdisk.cpio.gz" "$workspace/combineramdisk/" + cd "$workspace/combineramdisk" + + # Decompress local-kernel-ramdisk + mv local-kernel-ramdisk.cpio.gz local-kernel-ramdisk.cpio.gz.bak + gunzip -c local-kernel-ramdisk.cpio.gz.bak > local-kernel-ramdisk.cpio + + # Copy kerneltest from downloads + cp "$workspace/downloads/kerneltest.cpio" . + + # Merge kerneltest and local-kernel-ramdisk + cat kerneltest.cpio local-kernel-ramdisk.cpio > video-merged.cpio + gzip -9 video-merged.cpio + + # Create temp workspace to clean up archive + mkdir -p temp_merge + cd temp_merge + cpio -id --no-absolute-filenames < ../kerneltest.cpio + cpio -id --no-absolute-filenames < ../local-kernel-ramdisk.cpio + cd .. + + # Remove old merged archive + rm -f video-merged.cpio.gz + + # Repackage clean archive + cd temp_merge + find . | cpio -o -H newc --owner=0:0 > ../video-merged.cpio + cd .. + gzip -9 video-merged.cpio + + # Cleanup + rm -rf temp_merge kerneltest.cpio local-kernel-ramdisk.cpio + echo "Final archive: $workspace/combineramdisk/video-merged.cpio.gz" + ls -lh "$workspace/combineramdisk/video-merged.cpio.gz" + + - name: Validate build_matrix and jq + shell: bash + run: | + set -euo pipefail + machines_json='${{ inputs.build_matrix }}' + if ! command -v jq >/dev/null 2>&1; then + echo "❌ jq is not installed on this runner. Please install jq." + exit 1 + fi + echo "$machines_json" | jq -e . >/dev/null + [ "$(echo "$machines_json" | jq length)" -gt 0 ] || { echo "❌ build_matrix is empty"; exit 1; } + echo "✅ build_matrix is valid JSON" + + - name: Append artifacts to S3 upload list + shell: bash + run: | + set -euo pipefail + workspace="${{ github.workspace }}" + file_list="$workspace/artifacts/file_list.txt" + mkdir -p "$workspace/artifacts" + + # Fresh file_list + : > "$file_list" + + # Package lib/modules (xz-compressed) — exclude risky symlinks + mod_root="$workspace/kobj/tar-install/lib/modules" + [ -d "$mod_root" ] || { echo "❌ Missing directory: $mod_root"; exit 1; } + tar -C "$workspace/kobj/tar-install" \ + --exclude='lib/modules/*/build' \ + --exclude='lib/modules/*/source' \ + --numeric-owner --owner=0 --group=0 \ + -cJf "$workspace/modules.tar.xz" lib/modules + + # Safety checks on the tar + if tar -Jtvf "$workspace/modules.tar.xz" | grep -q ' -> '; then + echo "❌ Symlinks found in modules archive (should be none)"; exit 1 + fi + if tar -Jtf "$workspace/modules.tar.xz" | grep -Eq '^/|(^|/)\.\.(/|$)'; then + echo "❌ Unsafe paths found in modules archive"; exit 1 + fi + + echo "$workspace/modules.tar.xz" >> "$file_list" + echo "✅ Queued for upload: $workspace/modules.tar.xz" + + # Kernel Image + merged video ramdisk (no local ramdisk) + IMAGE_PATH="$workspace/kobj/arch/arm64/boot/Image" + VMLINUX_PATH="$workspace/kobj/vmlinux" + MERGED_PATH="$workspace/combineramdisk/video-merged.cpio.gz" + + [ -f "$IMAGE_PATH" ] || { echo "❌ Missing expected file: $IMAGE_PATH"; exit 1; } + [ -f "$VMLINUX_PATH" ] || { echo "❌ Missing expected file: $VMLINUX_PATH"; exit 1; } + [ -f "$MERGED_PATH" ] || { echo "❌ Missing merged cpio: $MERGED_PATH"; exit 1; } + + echo "$IMAGE_PATH" >> "$file_list" + echo "✅ Queued for upload: $IMAGE_PATH" + echo "$VMLINUX_PATH" >> "$file_list" + echo "✅ Queued for upload: $VMLINUX_PATH" + echo "$MERGED_PATH" >> "$file_list" + echo "✅ Queued for upload: $MERGED_PATH" + + # Loop through all machines from the build_matrix input and add DTBs + machines='${{ inputs.build_matrix }}' + for machine in $(echo "$machines" | jq -r '.[].machine'); do + dtb="$workspace/kobj/arch/arm64/boot/dts/qcom/${machine}.dtb" + if [ -f "$dtb" ]; then + echo "$dtb" >> "$file_list" + echo "✅ Queued for upload: $dtb" + else + echo "❌ Missing DTB: $dtb" + exit 1 + fi + done + + echo "----- Files queued for S3 upload -----" + cat "$file_list" + + - name: Upload all artifacts to S3 + uses: qualcomm-linux-stg/video-driver/.github/actions/aws_s3_helper@video.qclinux.main.stage + with: + s3_bucket: qli-stg-video-gh-artifacts + local_file: ${{ github.workspace }}/artifacts/file_list.txt + mode: multi-upload + upload_location: ${{ github.repository_owner }}/${{ github.event.repository.name }}/${{ github.run_id }}-${{ github.run_attempt }} + + + - name: Clean up + if: always() + shell: bash + run: | + set -euo pipefail + ws="${{ github.workspace }}" + rm -rf "$ws/artqifacts" || true + rm -rf "$ws/combineramdisk" || true + rm -rf "$ws/downloads" || true + rm -rf "$ws/kobj" || true + rm -f "$ws/modules.tar.xz" || true + rm -f "$ws/local-kernel-ramdisk.cpio.gz" || true + + + - name: Update summary + if: success() || failure() + shell: bash + run: | + status="${{ steps.build_workspace.outcome }}" + if [ "$status" = "success" ]; then + summary=":heavy_check_mark: Build Success" + else + summary=":x: Build Failed" + fi + + ws="${{ github.workspace }}" + file_list="$ws/artifacts/file_list.txt" + + { + echo "
Build Summary" + echo "$summary" + if [ -f "$file_list" ]; then + echo "" + echo "Artifacts queued for upload:" + while IFS= read -r line; do + echo "- $line" + done < "$file_list" + fi + echo "
" + } >> "$GITHUB_STEP_SUMMARY" \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 000000000..e65696362 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,345 @@ +name: _test +description: Run tests on LAVA and generate summary + +on: + workflow_call: + inputs: + docker_image: + description: Docker image + type: string + required: true + default: kmake-image:ver.1.0 + + build_matrix: + description: Build matrix for multi target builds (stringified JSON) + type: string + required: true + + full_matrix: + description: Full matrix containing lava description (stringified JSON) + type: string + required: true + +jobs: + test: + runs-on: + group: GHA-video-Stg-SelfHosted-RG + labels: [ self-hosted, video-stg-u2204-x64-large-od-ephem ] + strategy: + fail-fast: false + matrix: + build_matrix: ${{ fromJson(inputs.build_matrix) }} + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.ref }} + fetch-depth: 0 + + - name: Pull docker image + uses: qualcomm-linux/kernel-config/.github/actions/pull_docker_image@main + with: + image: ${{ inputs.docker_image }} + + - name: Download URLs list (presigned_urls.json) + uses: actions/download-artifact@v4 + with: + name: presigned_urls.json + merge-multiple: true + path: ${{ github.workspace }} + + - name: Clone lava job render scripts + run: cd .. && git clone https://github.com/qualcomm-linux/job_render + + - name: Extract the LAVA machine name + id: get_lavaname + uses: actions/github-script@v7 + with: + script: | + const fullMatrix = JSON.parse(`${{ inputs.full_matrix }}`); + const currentMachine = `${{ matrix.build_matrix.machine }}`; + + const entry = fullMatrix.find(item => item.machine === currentMachine); + if (!entry) { + core.setFailed(`No entry found in full matrix for machine: ${currentMachine}`); + return; + } + + const lavaname = entry.lavaname; + console.log(`Lavaname for ${currentMachine} is ${lavaname}`); + core.setOutput("LAVANAME", lavaname); + + - name: Create lava job definition + id: create_job_definition + uses: qualcomm-linux-stg/video-driver/.github/actions/lava_job_render@video.qclinux.main.stage + with: + docker_image: ${{ inputs.docker_image }} + env: + FIRMWARE: ${{ matrix.build_matrix.firmware }} + MACHINE: ${{ matrix.build_matrix.machine }} + LAVA_NAME: ${{ steps.get_lavaname.outputs.LAVANAME }} + + - name: Submit lava job + id: submit_job + run: | + cd ../job_render + job_id=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{ secrets.LAVA_OSS_TOKEN }} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{ secrets.LAVA_OSS_USER }} production && lavacli -i production jobs submit ./renders/lava_job_definition.yaml") + job_url="https://lava-oss.qualcomm.com/scheduler/job/$job_id" + echo "job_id=$job_id" >> $GITHUB_OUTPUT + echo "job_url=$job_url" >> $GITHUB_OUTPUT + echo "Lava Job: $job_url" + echo "JOB_ID=$job_id" >> $GITHUB_ENV + + # ------------------------------------------------------------------------ + # NEW STEP: Save Job ID to JSON for the Summary Job + # ------------------------------------------------------------------------ + - name: Save Job ID for Reporting + if: always() + run: | + # Save ID and Machine Name to a JSON file + echo "{\"id\": \"$JOB_ID\", \"machine\": \"${{ matrix.build_matrix.machine }}\"}" > lava-job-${{ matrix.build_matrix.machine }}.json + + - name: Upload Job ID Artifact + if: always() + uses: actions/upload-artifact@v4 + with: + name: lava-job-data-${{ matrix.build_matrix.machine }} + path: lava-job-${{ matrix.build_matrix.machine }}.json + + - name: Check lava job results + id: check_job + run: | + STATE="" + START_TIME=$(date +%s) + + # Wait for job to finish + while [ "$STATE" != "Finished" ]; do + state=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" \ + ${{ inputs.docker_image }} sh -c \ + "lavacli identities add --token ${{ secrets.LAVA_OSS_TOKEN }} \ + --uri https://lava-oss.qualcomm.com/RPC2 \ + --username ${{ secrets.LAVA_OSS_USER }} production && \ + lavacli -i production jobs show $JOB_ID" | grep state) + + STATE=$(echo "$state" | cut -d':' -f2 | sed 's/^ *//;s/ *$//') + echo "Current status: $STATE" + + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$(((CURRENT_TIME - START_TIME)/3600)) + if [ $ELAPSED_TIME -ge 2 ]; then + echo "Timeout: 2 hours exceeded." + summary=":x: Lava job exceeded time limit." + echo "summary=$summary" >> $GITHUB_OUTPUT + exit 1 + fi + sleep 30 + done + + # Check job health + health=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" \ + ${{ inputs.docker_image }} sh -c \ + "lavacli identities add --token ${{ secrets.LAVA_OSS_TOKEN }} \ + --uri https://lava-oss.qualcomm.com/RPC2 \ + --username ${{ secrets.LAVA_OSS_USER }} production && \ + lavacli -i production jobs show $JOB_ID" | grep Health) + + HEALTH=$(echo "$health" | cut -d':' -f2 | sed 's/^ *//;s/ *$//') + echo "Health: $HEALTH" + + if [[ "$HEALTH" != "Complete" ]]; then + echo "Lava job health is not Complete." + summary=":x: Lava job failed (Health: $HEALTH)." + echo "summary=$summary" >> $GITHUB_OUTPUT + exit 1 + fi + + # Fetch detailed results once + docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" \ + ${{ inputs.docker_image }} sh -c \ + "lavacli identities add --token ${{ secrets.LAVA_OSS_TOKEN }} \ + --uri https://lava-oss.qualcomm.com/RPC2 \ + --username ${{ secrets.LAVA_OSS_USER }} production && \ + lavacli -i production results $JOB_ID" > lava_results.txt + + echo "=== LAVA RESULTS (first 200 lines) ===" + head -200 lava_results.txt || true + + # Decide pass/fail based ONLY on 0_video_pre-merge-tests / Video_V4L2_Runner + if awk ' + # track whether we are inside definition: 0_video_pre-merge-tests + /^definition: 0_video_pre-merge-tests$/ { in_def=1; next } + /^definition:/ && $2 != "0_video_pre-merge-tests" { in_def=0 } + + # inside that definition, track Video_V4L2_Runner case and its result + in_def && /^case: Video_V4L2_Runner$/ { in_case=1; next } + in_def && in_case && /^result:/ { + if ($2 == "fail") { + print "Found FAIL for 0_video_pre-merge-tests / Video_V4L2_Runner"; + exit 1; + } + in_case=0; # reset for next block + } + END { exit 0 } + ' lava_results.txt; then + echo "Lava job passed (0_video_pre-merge-tests base + overlay)." + summary=":heavy_check_mark: Lava job passed." + echo "summary=$summary" >> $GITHUB_OUTPUT + exit 0 + else + echo "Lava job failed in 0_video_pre-merge-tests / Video_V4L2_Runner." + summary=":x: Lava job failed." + echo "summary=$summary" >> $GITHUB_OUTPUT + exit 1 + fi + + # ------------------------------------------------------------------------ + # NEW JOB: Generate Summary + # Aggregates results from all matrix runs and publishes a table + # ------------------------------------------------------------------------ + generate-summary: + needs: test + if: always() # Run even if one of the tests failed + runs-on: ubuntu-latest + steps: + - name: Download all Job IDs + uses: actions/download-artifact@v4 + with: + pattern: lava-job-data-* + merge-multiple: true + path: artifacts + + - name: Install dependencies + run: sudo apt-get update && sudo apt-get install -y jq curl + + - name: Generate LAVA Test Job Summary + id: generate + shell: bash + env: + # Configuration for your specific LAVA instance + LAVA_URL: "https://lava-oss.qualcomm.com" + LAVA_TOKEN: ${{ secrets.LAVA_OSS_TOKEN }} + SUMMARY_FILE: step-summary.md + run: | + # 1. FETCH DATA AND BUILD JSON STRUCTURE + # This loop queries the API for each Job ID found in artifacts + INPUT=$(for TESTJOB in $(find artifacts -name "lava-job-*.json") + do + JOB_ID=$(cat "${TESTJOB}" | jq -r ".id") + AUTH_HEADER="Authorization: Token $LAVA_TOKEN" + + # Fetch Job Details + JOB_DETAILS=$(curl -s -H "$AUTH_HEADER" "$LAVA_URL/api/v0.2/jobs/$JOB_ID/") + JOB_STATE=$(echo "$JOB_DETAILS" | jq -r ".state // empty") + JOB_DEVICE_TYPE=$(echo "$JOB_DETAILS" | jq -r ".requested_device_type // empty") + + if [ -z "$JOB_DEVICE_TYPE" ] || [ "$JOB_DEVICE_TYPE" = "null" ]; then + JOB_DEVICE_TYPE="unknown-$JOB_ID" + fi + + TEST_RESULTS="{}" + + if [ "${JOB_STATE}" = "Finished" ]; then + # Fetch Suites + JOB_SUITES=$(curl -s -H "$AUTH_HEADER" "$LAVA_URL/api/v0.2/jobs/$JOB_ID/suites/") + + # Iterate through suites to find test cases + TEST_RESULTS=$(for SUITE in $(echo "$JOB_SUITES" | jq -r -c ".results[]?") + do + SUITE_NAME=$(echo "$SUITE" | jq -r ".name") + SUITE_ID=$(echo "$SUITE" | jq -r ".id") + SUITE_TESTS=$(curl -s -H "$AUTH_HEADER" "$LAVA_URL/api/v0.2/jobs/$JOB_ID/suites/$SUITE_ID/tests/") + + if [ "$SUITE_NAME" != "lava" ]; then + # Normal Test Suites + # FIX: Loop through each result item individually to handle multiple tests per suite (e.g., base + overlay) + echo "$SUITE_TESTS" | jq -c ".results[]" | while read -r TEST_ITEM; do + T_NAME=$(echo "$TEST_ITEM" | jq -r ".name") + T_RESULT=$(echo "$TEST_ITEM" | jq -r ".result") + TEST_URL="$LAVA_URL/results/$JOB_ID/$SUITE_ID" + + # Construct JSON Object + TEST_RESULT_OBJ=$(jq --arg url "$TEST_URL" --arg result "$T_RESULT" -n -c '$ARGS.named') + if [ -n "$T_NAME" ]; then + jq -n -c --arg key "$T_NAME" --argjson value "$TEST_RESULT_OBJ" '$ARGS.named' + fi + done + else + # "lava" suite usually implies the boot/provisioning process + TEST_NAME="boot" + TEST_URL="$LAVA_URL/results/$JOB_ID" + TEST_RESULT_OBJ=$(jq --arg url "$TEST_URL" --arg result "pass" -n -c '$ARGS.named') + jq -n -c --arg key "$TEST_NAME" --argjson value "$TEST_RESULT_OBJ" '$ARGS.named' + fi + done | jq -s -c --sort-keys 'from_entries') + fi + + echo "{\"key\": \"$JOB_DEVICE_TYPE\", \"value\": $TEST_RESULTS}" | jq -c . + done | jq -s -c 'reduce .[] as $i ({}; .[$i.key] = ((.[$i.key] // {}) + $i.value))') + + # 2. GENERATE MARKDOWN TABLE + + DEVICES=$(echo "$INPUT" | jq -r 'keys[]' | sort) + RESULTS=$(echo "$INPUT" | jq -r '.[] | keys[]?' | sort -u) + + echo "### LAVA Test Summary" > $SUMMARY_FILE + echo "" >> $SUMMARY_FILE + + # Table Header + printf "| Test Case |" >> $SUMMARY_FILE + for D in $DEVICES; do printf " %s |" "$D" >> $SUMMARY_FILE; done + echo "" >> $SUMMARY_FILE + + # Table Separator + printf "| :--- |" >> $SUMMARY_FILE + for _ in $DEVICES; do printf " :---: |" >> $SUMMARY_FILE; done + echo "" >> $SUMMARY_FILE + + # Table Body + for R in $RESULTS; do + printf "| **%s** |" "$R" >> $SUMMARY_FILE + for D in $DEVICES; do + VALUE=$(echo "$INPUT" | jq -r --arg d "$D" --arg r "$R" '.[$d][$r].result // ""') + URL=$(echo "$INPUT" | jq -r --arg d "$D" --arg r "$R" '.[$d][$r].url // ""') + + ICON=":no_entry_sign:" + if [ "${VALUE}" = "pass" ]; then ICON=":white_check_mark:"; fi + if [ "${VALUE}" = "fail" ]; then ICON=":x:"; fi + if [ "${VALUE}" = "skip" ]; then ICON=":warning:"; fi + + if [ -n "$URL" ] && [ "$URL" != "null" ]; then + printf " [%s](%s) |" "$ICON" "$URL" >> $SUMMARY_FILE + else + printf " %s |" "$ICON" >> $SUMMARY_FILE + fi + done + echo "" >> $SUMMARY_FILE + done + + # 3. JOB DETAILS LIST + echo "" >> $SUMMARY_FILE + echo "#### Job Details" >> $SUMMARY_FILE + echo "| Job ID | Device | State | Health | Link |" >> $SUMMARY_FILE + echo "| :--- | :--- | :--- | :--- | :--- |" >> $SUMMARY_FILE + + for TESTJOB in $(find artifacts -name "lava-job-*.json"); do + JOB_ID=$(cat "${TESTJOB}" | jq -r ".id") + AUTH_HEADER="Authorization: Token $LAVA_TOKEN" + JOB_DETAILS=$(curl -s -H "$AUTH_HEADER" "$LAVA_URL/api/v0.2/jobs/$JOB_ID/") + + HEALTH=$(echo "$JOB_DETAILS" | jq -r ".health") + STATE=$(echo "$JOB_DETAILS" | jq -r ".state") + DEVICE=$(echo "$JOB_DETAILS" | jq -r ".requested_device_type") + URL="$LAVA_URL/results/$JOB_ID" + + echo "| $JOB_ID | $DEVICE | $STATE | $HEALTH | [View]($URL) |" >> $SUMMARY_FILE + done + + # 4. PUBLISH TO GITHUB SUMMARY + cat $SUMMARY_FILE >> $GITHUB_STEP_SUMMARY + + - name: Upload Summary Artifact + uses: actions/upload-artifact@v4 + with: + name: lava-summary-report + path: step-summary.md \ No newline at end of file diff --git a/.github/workflows/uapi-check.yml b/.github/workflows/uapi-check.yml new file mode 100644 index 000000000..b7bf2270b --- /dev/null +++ b/.github/workflows/uapi-check.yml @@ -0,0 +1,30 @@ +name: UAPI and Driver Checks + +on: + pull_request: + # Optional: limit to main branch + branches: video.qclinux.main.stage + +jobs: + uapi-check: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository with full history + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Determine base and head SHAs + id: shas + run: | + # PR base commit + echo "base_sha=${{ github.event.pull_request.base.sha }}" >> "$GITHUB_OUTPUT" + # PR head (current) commit + echo "head_sha=${{ github.sha }}" >> "$GITHUB_OUTPUT" + + - name: Run UAPI + driver checks + run: | + export BASE_SHA="${{ steps.shas.outputs.base_sha }}" + export HEAD_SHA="${{ steps.shas.outputs.head_sha }}" + ./ci/check-uapi-and-driver.sh diff --git a/ci/MACHINES.json b/ci/MACHINES.json new file mode 100644 index 000000000..fb996c270 --- /dev/null +++ b/ci/MACHINES.json @@ -0,0 +1,42 @@ +{ + "qcs6490-rb3gen2": { + "machine": "qcs6490-rb3gen2", + "firmware": "rb3gen2", + "lavaname": "qcs6490-rb3gen2", + "target": "qcs6490-rb3gen2", + "buildid": "QCM6490.LE.1.0-00376-STD.PROD-1", + "firmwareid": "rb3gen2" + }, + "qcs9100-ride-r3": { + "machine": "qcs9100-ride-r3", + "firmware": "sa8775p-ride", + "lavaname": "qcs9100-ride", + "target": "qcs9100-ride-r3", + "buildid": "QCS9100.LE.1.0-00243-STD.PROD-1", + "firmwareid": "sa8775p-ride" + }, + "qcs8300-ride": { + "machine": "qcs8300-ride", + "firmware": "qcs8300-ride", + "lavaname": "qcs8300-ride", + "target": "qcs8300-ride", + "buildid": "QCS8300.LE.1.0-00137-STD.PROD-1", + "firmwareid": "qcs8300-ride" + }, + "qcs615-ride": { + "machine": "qcs615-ride", + "firmware": "qcs615-ride", + "lavaname": "qcs615-ride", + "target": "qcs615-ride", + "buildid": "QCS615.LE.1.0-00016-STD.PROD-1", + "firmwareid": "qcs615-ride" + }, + "sm8750-mtp": { + "machine": "sm8750-mtp", + "firmware": "sm8750-mtp", + "lavaname": "sm8750-mtp", + "target": "sm8750-mtp", + "buildid": "YOUR_BUILD_ID", + "firmwareid": "sm8750-mtp" +} +} \ No newline at end of file