diff --git a/installation-scripts-onm/MASTER_START_SCRIPT.sh b/installation-scripts-onm/MASTER_START_SCRIPT.sh index 87cd7ff..767616d 100644 --- a/installation-scripts-onm/MASTER_START_SCRIPT.sh +++ b/installation-scripts-onm/MASTER_START_SCRIPT.sh @@ -12,6 +12,9 @@ echo "NEBULOUS_SCRIPTS_BRANCH is set to: $NEBULOUS_SCRIPTS_BRANCH" if [[ "$CONTAINERIZATION_FLAVOR" == "k3s" ]]; then export KUBECONFIG=/etc/rancher/k3s/k3s.yaml echo "KUBECONFIG=${KUBECONFIG}" | sudo tee -a /etc/environment +else + export KUBECONFIG=/home/ubuntu/.kube/config + echo "KUBECONFIG=${KUBECONFIG}" | sudo tee -a /etc/environment fi while true; do @@ -38,38 +41,170 @@ then else echo "User Ubuntu is not found" fi -#$dau kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml; -$dau bash -c 'helm repo add cilium https://helm.cilium.io/ && helm repo update' -$dau bash -c 'helm install cilium cilium/cilium --namespace kube-system --set encryption.enabled=true --set encryption.type=wireguard' +$dau bash -c 'kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml'; +#$dau bash -c 'helm repo add cilium https://helm.cilium.io/ && helm repo update' +#$#dau bash -c 'helm install cilium cilium/cilium --namespace kube-system --set encryption.enabled=true --set encryption.type=wireguard' echo "Installing Vela CLI" $dau bash -c 'curl -fsSl https://kubevela.io/script/install.sh | bash' echo "Configuration complete." +cat > /home/ubuntu/kubevela-values.yaml << EOF +nodeSelector: + "node-role.kubernetes.io/control-plane": "" +tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" +EOF + +$dau bash -c 'helm repo add kubevela https://kubevela.github.io/chart && helm repo update' + +cat > /home/ubuntu/patch-pin-to-control-plane.yaml << EOF +apiVersion: apps/v1 +kind: Deployment +spec: + template: + spec: + nodeSelector: + "node-role.kubernetes.io/control-plane": "" + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" +EOF + echo "Setting KubeVela..." +# Delete the flag file if it exists +$dau bash -c 'rm -f /tmp/vela_ready.flag' # Function to check for worker nodes and install KubeVela cat > /home/ubuntu/install_kubevela.sh << 'EOF' #!/bin/bash +echo "Start install_kubevela.sh" +echo "-----${KUBECONFIG}---------" +sudo cat ${KUBECONFIG} +echo "--------------" +# Retry vela install with a 10-second delay between attempts +attempt=1 +until sudo -H -E -u ubuntu bash -c 'helm upgrade --install --create-namespace -n vela-system kubevela kubevela/vela-core --version 1.9.11 --values /home/ubuntu/kubevela-values.yaml --wait'; do + echo "Vela install failed. Retrying in 10 seconds... ($attempt/)" + attempt=$((attempt+1)) + sleep 10 +done +echo "Vela installation done." +if [ "$SERVERLESS_ENABLED" == "yes" ]; then + echo "Serverless installation." + + # Install Cosign + export COSIGN_VERSION=$(curl -s https://api.github.com/repos/sigstore/cosign/releases/latest | jq -r '.tag_name') + sudo curl -LO "https://github.com/sigstore/cosign/releases/download/${COSIGN_VERSION}/cosign-linux-amd64" + sudo mv cosign-linux-amd64 /usr/local/bin/cosign + sudo chmod +x /usr/local/bin/cosign + + # Update system and install jq + sudo apt update + sudo apt install -y jq + + # Apply Knative Serving CRDs and core components + kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.12.4/serving-crds.yaml + kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.12.4/serving-core.yaml + kubectl patch deployment -n knative-serving activator --patch "$(cat /home/ubuntu/patch-pin-to-control-plane.yaml)" + kubectl patch deployment -n knative-serving autoscaler --patch "$(cat /home/ubuntu/patch-pin-to-control-plane.yaml)" + kubectl patch deployment -n knative-serving controller --patch "$(cat /home/ubuntu/patch-pin-to-control-plane.yaml)" + kubectl patch deployment -n knative-serving webhook --patch "$(cat /home/ubuntu/patch-pin-to-control-plane.yaml)" + + # Download and apply Kourier + sudo wget https://raw.githubusercontent.com/eu-nebulous/sal-scripts/$NEBULOUS_SCRIPTS_BRANCH/serverless/kourier.yaml + kubectl apply -f kourier.yaml + + sudo wget https://raw.githubusercontent.com/eu-nebulous/sal-scripts/$NEBULOUS_SCRIPTS_BRANCH/serverless/serverless-platform-definition.yaml + kubectl apply -f serverless-platform-definition.yaml + + + sudo wget https://raw.githubusercontent.com/eu-nebulous/sal-scripts/$NEBULOUS_SCRIPTS_BRANCH/serverless/knative-serving-definition.yaml + kubectl apply -f knative-serving-definition.yaml + + sudo wget https://raw.githubusercontent.com/eu-nebulous/sal-scripts/$NEBULOUS_SCRIPTS_BRANCH/serverless/config-features.yaml + kubectl apply -f config-features.yaml + + # Patch config-domain with PUBLIC_IP + MASTER_IP=$(curl -s ifconfig.me) + + # Patch config-domain with MASTER_IP + kubectl patch configmap/config-domain \ + --namespace knative-serving \ + --type merge \ + --patch "{\"data\":{\"${MASTER_IP}.sslip.io\":\"\"}}" + + # Patch config-network to use Kourier ingress + kubectl patch configmap/config-network \ + --namespace knative-serving \ + --type merge \ + --patch '{"data":{"ingress-class":"kourier.ingress.networking.knative.dev"}}' + + # Apply default domain configuration + kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.12.4/serving-default-domain.yaml + + if [ -n "$LOCAL_SERVERLESS_SERVICES" ]; then + echo "LOCAL_SERVERLESS_SERVICES is set to: $LOCAL_SERVERLESS_SERVICES" + + sudo wget -q -O /usr/local/bin/label-serverless-services.sh \ + https://raw.githubusercontent.com/eu-nebulous/sal-scripts/$NEBULOUS_SCRIPTS_BRANCH/serverless/label-serverless-services.sh + + sudo chmod +x /usr/local/bin/label-serverless-services.sh + + sudo touch /var/log/label-serverless-services.log + sudo chown ubuntu:ubuntu /var/log/label-serverless-services.log + + nohup /usr/local/bin/label-serverless-services.sh \ + >> /var/log/label-serverless-services.log 2>&1 & + fi +fi +echo "End install_kubevela.sh" +EOF + +chmod +x /home/ubuntu/install_kubevela.sh + +cat > /home/ubuntu/kubevela_installer_service.sh << 'EOF' +#!/bin/bash + +is_vela_installed() { + if vela ls &>/dev/null; then + return 0 + else + return 1 + fi +} # Wait for at least one worker node to be ready while true; do WORKER_NODES=$(sudo -H -E -u ubuntu kubectl get nodes --selector='!node-role.kubernetes.io/control-plane' -o json | jq '.items | length') if [ "$WORKER_NODES" -gt 0 ]; then echo "$(date '+%Y-%m-%d %H:%M:%S') - Found $WORKER_NODES worker node(s), proceeding with KubeVela installation..." >> /home/ubuntu/vela.txt - sudo -H -E -u ubuntu bash -c 'nohup vela install --version 1.9.11 >> /home/ubuntu/vela.txt 2>&1' - # Disable the service after successful installation - sudo systemctl disable kubevela-installer.service - exit 0 + /home/ubuntu/install_kubevela.sh >> /home/ubuntu/vela.txt 2>&1 + if is_vela_installed; then + echo "Vela installation successful" >> /home/ubuntu/vela.txt + # Disable the service after successful installation + echo "Disabling kubevela-installer.service" >> /home/ubuntu/vela.txt + sudo systemctl disable kubevela-installer.service + # Create a flag file to indicate that vela is ready. This flag will be read by the script that runs `vela up -f ...`. + # This is is needed to avoid the vela up command to fail if the vela installation has not completed yet. + echo "touching /tmp/vela_ready.flag" >> /home/ubuntu/vela.txt + touch /tmp/vela_ready.flag + exit 0 + else + echo "'vela ls' returned an error. Trying again in 30 seconds..." >> /home/ubuntu/vela.txt + sleep 30 + fi fi echo "$(date '+%Y-%m-%d %H:%M:%S') - Waiting for worker nodes to be ready..." >> /home/ubuntu/vela.txt sleep 10 done EOF - -chmod +x /home/ubuntu/install_kubevela.sh +chmod +x /home/ubuntu/kubevela_installer_service.sh # Create systemd service file -cat << 'EOF' | sudo tee /etc/systemd/system/kubevela-installer.service +cat << EOF | sudo tee /etc/systemd/system/kubevela-installer.service [Unit] Description=KubeVela One-time Installer Service After=network.target @@ -77,8 +212,13 @@ After=network.target [Service] Type=simple User=ubuntu -ExecStart=/home/ubuntu/install_kubevela.sh +ExecStart=/home/ubuntu/kubevela_installer_service.sh Restart=no +Environment="LOCAL_SERVERLESS_SERVICES=${LOCAL_SERVERLESS_SERVICES}" +Environment="SERVERLESS_ENABLED=${SERVERLESS_ENABLED}" +Environment="APPLICATION_ID=${APPLICATION_ID}" +Environment="NEBULOUS_SCRIPTS_BRANCH=${NEBULOUS_SCRIPTS_BRANCH}" +Environment="KUBECONFIG=${KUBECONFIG}" [Install] WantedBy=multi-user.target @@ -106,7 +246,8 @@ $dau bash -c 'helm install ems nebulous/ems-server \ --set tolerations[0].effect="NoSchedule" \ --set app_uuid=$APPLICATION_ID \ --set broker_address=$BROKER_ADDRESS \ - --set image.tag="latest" \ + --set image.tag=$NEBULOUS_SCRIPTS_BRANCH \ + --set client.image.tag="ems-client-$NEBULOUS_SCRIPTS_BRANCH" \ --set broker_port=$BROKER_PORT' @@ -126,104 +267,11 @@ $dau bash -c 'helm install solver nebulous/nebulous-optimiser-solver \ echo "Add volumes provisioner" $dau bash -c "kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.27/deploy/local-path-storage.yaml" -if [ "$SERVERLESS_ENABLED" == "yes" ]; then - echo "Serverless installation." - - # Install Cosign - export COSIGN_VERSION=$(curl -s https://api.github.com/repos/sigstore/cosign/releases/latest | jq -r '.tag_name') - curl -LO "https://github.com/sigstore/cosign/releases/download/${COSIGN_VERSION}/cosign-linux-amd64" - sudo mv cosign-linux-amd64 /usr/local/bin/cosign - sudo chmod +x /usr/local/bin/cosign - - # Update system and install jq - sudo apt update - sudo apt install -y jq - - # Apply Knative Serving CRDs and core components - kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.12.4/serving-crds.yaml - kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.12.4/serving-core.yaml - - # Download and apply Kourier - wget https://raw.githubusercontent.com/eu-nebulous/sal-scripts/$NEBULOUS_SCRIPTS_BRANCH/serverless/kourier.yaml - kubectl apply -f kourier.yaml - - wget https://raw.githubusercontent.com/eu-nebulous/sal-scripts/$NEBULOUS_SCRIPTS_BRANCH/serverless/serverless-platform-definition.yaml - kubectl apply -f serverless-platform-definition.yaml - - wget https://raw.githubusercontent.com/eu-nebulous/sal-scripts/$NEBULOUS_SCRIPTS_BRANCH/serverless/config-features.yaml - kubectl apply -f config-features.yaml - - # Patch config-domain with PUBLIC_IP - MASTER_IP=$(curl -s ifconfig.me) - - # Patch config-domain with MASTER_IP - kubectl patch configmap/config-domain \ - --namespace knative-serving \ - --type merge \ - --patch "{\"data\":{\"${MASTER_IP}.sslip.io\":\"\"}}" - - # Patch config-network to use Kourier ingress - kubectl patch configmap/config-network \ - --namespace knative-serving \ - --type merge \ - --patch '{"data":{"ingress-class":"kourier.ingress.networking.knative.dev"}}' - - # Apply default domain configuration - kubectl apply -f https://github.com/knative/serving/releases/download/knative-v1.12.4/serving-default-domain.yaml - - kubectl apply -f https://raw.githubusercontent.com/kubevela/samples/master/06.Knative_App/componentdefinition-knative-serving.yaml - - if [ -n "$LOCAL_SERVERLESS_SERVICES" ]; then - echo "LOCAL_SERVERLESS_SERVICES is set to: $LOCAL_SERVERLESS_SERVICES" - - sudo wget -q -O /usr/local/bin/label-serverless-services.sh \ - https://raw.githubusercontent.com/eu-nebulous/sal-scripts/$NEBULOUS_SCRIPTS_BRANCH/serverless/label-serverless-services.sh - - sudo chmod +x /usr/local/bin/label-serverless-services.sh - - sudo touch /var/log/label-serverless-services.log - sudo chown ubuntu:ubuntu /var/log/label-serverless-services.log - - nohup /usr/local/bin/label-serverless-services.sh \ - >> /var/log/label-serverless-services.log 2>&1 & - fi -fi if [ "$WORKFLOW_ENABLED" == "yes" ]; then echo "Workflow installation."; - $dau bash -c 'helm install argo-workflows argo-workflows \ - --repo https://argoproj.github.io/argo-helm \ - --namespace argo \ - --create-namespace \ - --set crds.install=true \ - --set crds.keep=false \ - --set workflow.serviceAccount.create=true \ - --set workflow.serviceAccount.name="argo" \ - --set workflow.rbac.create=true \ - --set "controller.workflowNamespaces={argo}" \ - --set controller.metricsConfig.enabled=true \ - --set controller.telemetryConfig.enabled=true \ - --set controller.serviceMonitor.enabled=true \ - --set "server.authModes={server}" \ - --set "controller.tolerations[0].effect=NoSchedule" \ - --set "controller.tolerations[0].key=node.kubernetes.io/unschedulable" \ - --set "controller.tolerations[0].operator=Exists" \ - --set "controller.tolerations[1].effect=NoSchedule" \ - --set "controller.tolerations[1].operator=Exists" \ - --set "controller.priorityClassName=system-node-critical" \ - --set controller.nodeSelector.node-role\\.kubernetes\\.io/control-plane="" \ - --set "server.tolerations[0].effect=NoSchedule" \ - --set "server.tolerations[0].key=node.kubernetes.io/unschedulable" \ - --set "server.tolerations[0].operator=Exists" \ - --set "server.tolerations[1].effect=NoSchedule" \ - --set "server.tolerations[1].operator=Exists" \ - --set "server.priorityClassName=system-node-critical" \ - --set server.nodeSelector.node-role\\.kubernetes\\.io/control-plane=""' - - sudo -H -E -u ubuntu bash -c 'kubectl -n argo create rolebinding argo-workflows-server --role=argo-workflows-workflow --serviceaccount=argo:argo-workflows-server' - sudo -H -E -u ubuntu bash -c 'kubectl -n argo create rolebinding argo-workflows-workflow-controller --role=argo-workflows-workflow --serviceaccount=argo:argo-workflows-workflow-controller' - sudo -H -E -u ubuntu bash -c 'kubectl -n argo create rolebinding default --role=argo-workflows-workflow --serviceaccount=argo:default' + $dau bash -c 'helm install -n argo nebulous-workflow-executor nebulous/nebulous-workflow-executor --create-namespace' $dau bash -c "kubectl -n argo create secret docker-registry regcred --docker-server=$PRIVATE_DOCKER_REGISTRY_SERVER --docker-username=$PRIVATE_DOCKER_REGISTRY_USERNAME --docker-password=$PRIVATE_DOCKER_REGISTRY_PASSWORD --docker-email=$PRIVATE_DOCKER_REGISTRY_EMAIL" $dau bash -c 'kubectl -n argo patch serviceaccount default -p "{\"imagePullSecrets\": [{\"name\": \"regcred\"}]}"' @@ -231,3 +279,15 @@ if [ "$WORKFLOW_ENABLED" == "yes" ]; then echo "Workflow installation completed."; fi +echo "Installing OPA Gatekeeper..." +wget https://raw.githubusercontent.com/eu-nebulous/security-manager/dev/OPA-GATEKEEPER-INSTALL.sh +chmod +x OPA-GATEKEEPER-INSTALL.sh +./OPA-GATEKEEPER-INSTALL.sh + +echo "Installing Security Manager..." +$dau bash -c 'helm install security-manager nebulous/nebulous-security-manager \ + --set-file configMap.k3sConfig="$KUBECONFIG" \ + --set tolerations[0].key="node-role.kubernetes.io/control-plane" \ + --set tolerations[0].operator="Exists" \ + --set tolerations[0].effect="NoSchedule"' + diff --git a/k8s/install-kube-u22-wg.sh b/k8s/install-kube-u22-wg.sh index 0483d71..dd50ae6 100644 --- a/k8s/install-kube-u22-wg.sh +++ b/k8s/install-kube-u22-wg.sh @@ -146,7 +146,7 @@ if [ `grep Swap /proc/meminfo | grep SwapTotal: | cut -d" " -f14` == "0" ]; then log_print INFO "The swap memory is Off" else - sudo swapoff –a || { log_print ERROR "swap memory can't be turned off "; exit $EXITCODE; } + sudo swapoff -a || { log_print ERROR "swap memory can't be turned off "; exit $EXITCODE; } fi diff --git a/k8s/install-kube-u22.sh b/k8s/install-kube-u22.sh index 3c745b7..c014fd4 100644 --- a/k8s/install-kube-u22.sh +++ b/k8s/install-kube-u22.sh @@ -123,7 +123,7 @@ if [ `grep Swap /proc/meminfo | grep SwapTotal: | cut -d" " -f14` == "0" ]; then log_print INFO "The swap memory is Off" else - sudo swapoff –a || { log_print ERROR "swap memory can't be turned off "; exit $EXITCODE; } + sudo swapoff -a || { log_print ERROR "swap memory can't be turned off "; exit $EXITCODE; } fi diff --git a/serverless/config-features.yaml b/serverless/config-features.yaml index f131259..7c7118a 100644 --- a/serverless/config-features.yaml +++ b/serverless/config-features.yaml @@ -50,13 +50,13 @@ data: # # WARNING: Cannot safely be disabled once enabled. # See: https://knative.dev/docs/serving/feature-flags/#kubernetes-node-selector - kubernetes.podspec-nodeselector: "disabled" + kubernetes.podspec-nodeselector: "enabled" # Indicates whether Kubernetes tolerations support is enabled # # WARNING: Cannot safely be disabled once enabled # See: https://knative.dev/docs/serving/feature-flags/#kubernetes-toleration - kubernetes.podspec-tolerations: "disabled" + kubernetes.podspec-tolerations: "enabled" # Indicates whether Kubernetes FieldRef support is enabled # diff --git a/serverless/knative-serving-definition.yaml b/serverless/knative-serving-definition.yaml new file mode 100644 index 0000000..195d919 --- /dev/null +++ b/serverless/knative-serving-definition.yaml @@ -0,0 +1,75 @@ +apiVersion: core.oam.dev/v1beta1 +kind: ComponentDefinition +metadata: + name: knative-serving + annotations: + definition.oam.dev/description: "Knative serving." +spec: + workload: + definition: + apiVersion: serving.knative.dev/v1 + kind: Service + schematic: + cue: + template: | + output: { + apiVersion: "serving.knative.dev/v1" + kind: "Service" + metadata: { + name: context.name + labels: { + "app.oam.dev/component": context.name + } + } + spec: { + template: + spec: + containers: [{ + name: context.name + + image: parameter.image + + if parameter.imagePullPolicy != _|_ { + imagePullPolicy: parameter.imagePullPolicy + } + + if parameter.env != _|_ { + env: parameter.env + } + + if parameter.resources != _|_ { + resources: parameter.resources + } + }] + } + } + parameter: { + image: string + imagePullPolicy?: string + env?: [...{ + // +usage=Environment variable name + name: string + // +usage=The value of the environment variable + value?: string + // +usage=Specifies a source the value of this var should come from + valueFrom?: { + // +usage=Selects a key of a secret in the pod's namespace + secretKeyRef: { + // +usage=The name of the secret in the pod's namespace to select from + name: string + // +usage=The key of the secret to select from. Must be a valid secret key + key: string + } + } + }] + resources?: { + limits?: { + cpu?: string + memory?: string + } + requests?: { + cpu?: string + memory?: string + } + } + } diff --git a/serverless/kourier.yaml b/serverless/kourier.yaml index ea146fd..38597e3 100644 --- a/serverless/kourier.yaml +++ b/serverless/kourier.yaml @@ -399,6 +399,12 @@ spec: memory: 500Mi restartPolicy: Always serviceAccountName: net-kourier + nodeSelector: + "node-role.kubernetes.io/control-plane": "" + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" --- apiVersion: v1 kind: Service @@ -543,6 +549,12 @@ spec: configMap: name: kourier-bootstrap restartPolicy: Always + nodeSelector: + "node-role.kubernetes.io/control-plane": "" + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" --- apiVersion: v1 kind: Service