Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
108 changes: 108 additions & 0 deletions .github/actions/aws_s3_helper/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
# name: AWS S3 Helper
description: Upload and download files from AWS S3

inputs:
s3_bucket:
description: S3 Bucket Name
required: true
local_file:
description: Local file paths
required: false
default: ../artifacts/file_list.txt
download_file:
description: Download file paths
required: false
default: ''
download_location:
description: File download location
required: false
default: .
mode:
description: Mode of operation (upload/download)
required: true
default: single-upload
upload_location:
description: Upload location
required: true

outputs:
presigned_url:
description: Pre-signed URL for the uploaded file
value: ${{ steps.sync-data.outputs.presigned_url }}
s3_location:
description: Upload location
value: ${{ inputs.upload_location }}

runs:
using: "composite"
steps:
- name: Sync Data
id: sync-data
shell: bash
env:
UPLOAD_LOCATION: ${{ inputs.upload_location }}
run: |
echo "::group::Uploading files to S3"
case "${{ inputs.mode }}" in
multi-upload)
if [ ! -s "${{ inputs.local_file }}" ]; then
echo "❌ File list is empty. No files to upload."
exit 1
fi

echo "📄 Contents of file list:"
cat "${{ inputs.local_file }}"

first_line=true
manifest="${{ github.workspace }}/presigned_urls.json"
echo "{" > "${manifest}"

while IFS= read -r file; do
resolved_file=$(readlink -f "$file")
if [ -f "$resolved_file" ]; then
filename=$(basename "$resolved_file")
echo "📤 Uploading $filename..."
aws s3 cp "$resolved_file" "s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}/$filename"
presigned_url=$(aws s3 presign "s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}/$filename" --expires-in 259200)

if [ "$first_line" = true ]; then
first_line=false
else
echo "," >> "${manifest}"
fi

# Key = filename, Value = presigned_url
echo " \"${filename}\": \"${presigned_url}\"" >> "${manifest}"
echo "✅ Pre-signed URL for $filename: $presigned_url"
else
echo "⚠️ Skipping: $file is not a regular file or not accessible."
fi
done < "${{ inputs.local_file }}"

echo "}" >> "${manifest}"
;;
single-upload)
resolved_file=$(readlink -f "${{ inputs.local_file }}")
filename=$(basename "$resolved_file")
aws s3 cp "$resolved_file" "s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}/$filename"
presigned_url=$(aws s3 presign "s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}/$filename" --expires-in 259200)
echo "presigned_url=${presigned_url}" >> "$GITHUB_OUTPUT"
;;
download)
download_dir=$(realpath "${{ inputs.download_location }}")
aws s3 cp "s3://${{ inputs.s3_bucket }}/${{ inputs.download_file }}" "$download_dir"
;;
*)
echo "Invalid mode. Use 'upload', 'multi-upload', or 'download'."
exit 1
;;
esac
echo "::endgroup::"

- name: Upload presigned URL manifest
if: ${{ inputs.mode == 'multi-upload' }}
uses: actions/upload-artifact@v4
with:
name: presigned_urls.json
path: ${{ github.workspace }}/presigned_urls.json
retention-days: 3
42 changes: 42 additions & 0 deletions .github/actions/build/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
name: Build Workspace
description: |
Builds kernel and video-driver using a Docker image.

inputs:
docker_image:
description: Docker image to use
required: true
workspace_path:
description: Path to workspace directory
required: true

runs:
using: "composite"
steps:
- name: Build kernel
shell: bash
run: |
docker run --rm \
-v "${{ inputs.workspace_path }}:${{ inputs.workspace_path }}" \
-w "${{ inputs.workspace_path }}/kernel" \
--user $(id -u):$(id -g) \
${{ inputs.docker_image }} \
bash -c "
make O=../kobj ARCH=arm64 defconfig &&
make O=../kobj -j\$(nproc) &&
make O=../kobj -j\$(nproc) dir-pkg INSTALL_MOD_STRIP=1
"

- name: Build video-driver
shell: bash
run: |
docker run --rm \
-v "${{ inputs.workspace_path }}:${{ inputs.workspace_path }}" \
-w "${{ inputs.workspace_path }}/video-driver" \
--user $(id -u):$(id -g) \
${{ inputs.docker_image }} \
bash -c "
make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- CC=aarch64-linux-gnu-gcc-13 \
-C ${{ inputs.workspace_path }}/kobj \
M=\$(pwd) VIDEO_KERNEL_ROOT=\$(pwd) modules
"
196 changes: 196 additions & 0 deletions .github/actions/lava_job_render/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,196 @@
name: LAVA Job Render
inputs:
docker_image:
description: Docker image
required: true
default: kmake-image:ver.1.0

runs:
using: "composite"
steps:
- name: Process presigned_urls.json
id: process_urls
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const p = require('path');

const filePath = p.join(process.env.GITHUB_WORKSPACE, 'presigned_urls.json');
if (!fs.existsSync(filePath)) {
core.setFailed(`File not found: ${filePath}`);
}

// Read JSON mapping of uploaded file paths -> presigned URLs
const data = JSON.parse(fs.readFileSync(filePath, 'utf-8'));

function findUrlByFilename(filename) {
for (const [path, url] of Object.entries(data)) {
if (path.endsWith(filename)) return url;
}
return null;
}

const modulesTarUrl = findUrlByFilename('modules.tar.xz');
const imageUrl = findUrlByFilename('Image');
const mergedRamdiskUrl = findUrlByFilename('video-merged.cpio.gz');
const vmlinuxUrl = findUrlByFilename('vmlinux');

// DTB is expected to be "<MACHINE>.dtb"
const dtbFilename = `${process.env.MACHINE}.dtb`;
const dtbUrl = findUrlByFilename(dtbFilename);

core.setOutput('modules_url', modulesTarUrl || '');
core.setOutput('image_url', imageUrl || '');
core.setOutput('vmlinux_url', vmlinuxUrl || '');
core.setOutput('dtb_url', dtbUrl || '');
core.setOutput('merged_ramdisk_url', mergedRamdiskUrl || '');

console.log(`Modules URL: ${modulesTarUrl}`);
console.log(`Image URL: ${imageUrl}`);
console.log(`Vmlinux URL: ${vmlinuxUrl}`);
console.log(`Dtb URL: ${dtbUrl}`);
console.log(`Merged Ramdisk URL: ${mergedRamdiskUrl}`);

- name: Create metadata.json
id: create_metadata
shell: bash
run: |
echo "Creating metadata.json from job_render templates"
cd ../job_render
docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir="$PWD" \
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
-e dtb_url="${{ steps.process_urls.outputs.dtb_url }}" \
${{ inputs.docker_image }} \
jq '.artifacts["dtbs/qcom/${{ env.MACHINE }}.dtb"] = env.dtb_url' data/metadata.json > temp.json && mv temp.json data/metadata.json

- name: Upload metadata.json
id: upload_metadata
uses: qualcomm-linux-stg/video-driver/.github/actions/aws_s3_helper@video.qclinux.main.stage
with:
local_file: ../job_render/data/metadata.json
s3_bucket: qli-stg-video-gh-artifacts
mode: single-upload

- name: Create template json cloudData.json
shell: bash
run: |
echo "Populating cloudData.json with kernel, vmlinux, modules, metadata, ramdisk"
metadata_url="${{ steps.upload_metadata.outputs.presigned_url }}"
image_url="${{ steps.process_urls.outputs.image_url }}"
vmlinux_url="${{ steps.process_urls.outputs.vmlinux_url }}"
modules_url="${{ steps.process_urls.outputs.modules_url }}"
merged_ramdisk_url="${{ steps.process_urls.outputs.merged_ramdisk_url }}"

cd ../job_render

# metadata
docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir="$PWD" \
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
-e metadata_url="$metadata_url" \
${{ inputs.docker_image }} \
jq '.artifacts.metadata = env.metadata_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json

# kernel Image
docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir="$PWD" \
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
-e image_url="$image_url" \
${{ inputs.docker_image }} \
jq '.artifacts.kernel = env.image_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json

# vmlinux (set only if present)
docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir="$PWD" \
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
-e vmlinux_url="$vmlinux_url" \
${{ inputs.docker_image }} \
sh -c 'if [ -n "$vmlinux_url" ]; then jq ".artifacts.vmlinux = env.vmlinux_url" data/cloudData.json > temp.json && mv temp.json data/cloudData.json; fi'

# modules
docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir="$PWD" \
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
-e modules_url="$modules_url" \
${{ inputs.docker_image }} \
jq '.artifacts.modules = env.modules_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json

# ramdisk: use merged only here (fallback added in next step if missing)
docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir="$PWD" \
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
-e merged_ramdisk_url="$merged_ramdisk_url" \
${{ inputs.docker_image }} \
sh -c 'if [ -n "$merged_ramdisk_url" ]; then jq ".artifacts.ramdisk = env.merged_ramdisk_url" data/cloudData.json > temp.json && mv temp.json data/cloudData.json; fi'

- name: Update firmware and ramdisk
shell: bash
run: |
set -euo pipefail
cd ../job_render

# Fallback to stable kerneltest ramdisk only if merged ramdisk is not available
if [ -z "${{ steps.process_urls.outputs.merged_ramdisk_url }}" ]; then
echo "Merged ramdisk not found. Using stable kerneltest ramdisk fallback."
ramdisk_url="$(aws s3 presign s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/artifacts/initramfs/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)"
docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir="$PWD" \
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
-e ramdisk_url="$ramdisk_url" \
${{ inputs.docker_image }} \
jq '.artifacts.ramdisk = env.ramdisk_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
else
echo "Ramdisk set from merged source; skipping kerneltest fallback."
fi

# Optional board-specific firmware initramfs
if [ -n "${{ env.FIRMWARE }}" ]; then
case "${{ env.FIRMWARE }}" in
sm8750-mtp)
FW_FILE="initramfs-firmware-dragonboard410c-image-sm8750-mtp.cpio.gz"
;;
*)
FW_FILE="initramfs-firmware-${{ env.FIRMWARE }}-image-qcom-armv8a.cpio.gz"
;;
esac

echo "Using firmware file: $FW_FILE"

firmware_url="$(aws s3 presign s3://qli-stg-video-gh-artifacts/qualcomm-linux-stg/video-driver/artifacts/initramfs/${FW_FILE} --expires 7600)"

docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir="$PWD" \
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
-e firmware_url="$firmware_url" \
${{ inputs.docker_image }} \
jq '.artifacts.firmware = env.firmware_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json
else
echo "No FIRMWARE provided; skipping firmware artifact update."
fi

- name: Create lava_job_definition
shell: bash
run: |
cd ../job_render
mkdir -p renders
docker run -i --rm \
--user "$(id -u):$(id -g)" \
--workdir="$PWD" \
-v "$(dirname "$PWD")":"$(dirname "$PWD")" \
-e TARGET="${{ env.LAVA_NAME }}" \
-e TARGET_DTB="${{ env.MACHINE }}" \
${{ inputs.docker_image }} \
sh -c 'export BOOT_METHOD=fastboot && \
export TARGET=${TARGET} && \
export TARGET_DTB=${TARGET_DTB} && \
python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json --video_pre-merge'
Loading
Loading