-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllm-cpu-example-values.yaml
More file actions
43 lines (35 loc) · 1.46 KB
/
llm-cpu-example-values.yaml
File metadata and controls
43 lines (35 loc) · 1.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
# Example parameter values for LLM CPU Serving - HR Assistant template
# Based on rh-ai-quickstart/llm-cpu-serving repository
# Use these as a reference when filling out the template form
name: "hr-assistant"
owner: "hr-team"
description: "AI-powered HR Assistant on CPU using TinyLlama"
# OpenShift Configuration
openshiftCluster: "https://api.prod-cluster.example.com:6443"
namespace: "hr-assistant"
openshiftAIVersion: "2.16.2+"
# Model Configuration
llmModel: "TinyLlama-1.1B-Chat-v1.0" # Only supported model for CPU serving
vllmImage: "quay.io/rh-aiservices-bu/vllm-cpu-openshift-ubi9:latest"
# Resource Configuration
cpuRequest: "4" # Options: "2" (min), "4" (recommended), "8" (optimal)
memoryRequest: "8Gi" # Options: "4Gi" (min), "8Gi" (recommended)
cpuArchitecture: "x86_64-avx512" # Use AVX-512 for best performance
# AnythingLLM Configuration
enableAnythingLLM: true
anythingLLMWorkspace: "Assistant to the HR Representative"
# Prerequisites Check (REQUIRED!)
hasServiceMesh: true # OpenShift Service Mesh must be installed
hasServerless: true # OpenShift Serverless must be installed
# Repository
createGitRepo: true
repoUrl: "github.com/your-org/hr-assistant"
# ---
# Deployment will create these pods:
# - anythingllm (Chat UI workbench)
# - anythingllm-seed (Workspace setup)
# - tinyllama-1b-cpu-predictor (vLLM inference server)
#
# Access via OpenShift AI Dashboard → hr-assistant project → AnythingLLM workbench
#
# Estimated deployment time: 5-10 minutes