diff --git a/.gitignore b/.gitignore index 17cb070..6a8de38 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,5 @@ .vagrant* +*qcow +*box +*disks* +*iso* diff --git a/vagrant-qcow2-harvester/README.md b/vagrant-qcow2-harvester/README.md new file mode 100644 index 0000000..725bd0c --- /dev/null +++ b/vagrant-qcow2-harvester/README.md @@ -0,0 +1,41 @@ +Harvester Boot Using Qcow2 images and Vagrant Libvirt +========================================= + +Introduction +------------ + +Utilizing [Vagrant][vagrant], [KVM][kvm], and [Ansible][ansible] to create a +ready-to-play virtual Harvester environment for evaluation and testing +purposes. A single-node Harvester will be created by default. + +Prerequisites +------------- + +- Ansible \>= 2.9.0. This environment was tested with Ansible 2.9.5. +- Vagrant \>= 2.0.3. +- vagrant-libvirt plugin \>= 0.0.43. +- KVM (i.e. qemu-kvm), preferably the latest and greatest. This + environment was tested with qemu-kvm 2.11. +- Host with at least 16 CPU, 64GB RAM, and 500GB free disk space. +- cloud-localds to package iso files for injecting cloud-init. + +Quick Start +----------- + +1. Edit `settings.yml` to make sure the configuration satisfies your + needs. The options are self-documented. +2. Run `setup_harvester.sh`. This may take awhile (i.e. 30 minutes + depending on configuration). +3. If successful, run `vagrant status` to see the status of the Vagrant + boxes. +4. Point your browser to `https://:30443` to + access the Harvester UI. Just ignore the scary SSL warnings for now + as it is using self-signed certificates for demo purposes. + *NOTE*: by default `harvester_vip` is `192.168.0.131`. However, it is + configureable in `settings.yml`. + +Acknowledgements +---------------- +[ansible]: https://www.ansible.com +[kvm]: https://www.linux-kvm.org +[vagrant]: https://www.vagrantup.com diff --git a/vagrant-qcow2-harvester/Vagrantfile b/vagrant-qcow2-harvester/Vagrantfile new file mode 100644 index 0000000..87d33f7 --- /dev/null +++ b/vagrant-qcow2-harvester/Vagrantfile @@ -0,0 +1,65 @@ +# vi: set ft=ruby ts=2 : + +require 'yaml' + +VAGRANTFILE_API_VERSION = "2" + +# check for required plugins +_required_plugins_list = %w{vagrant-libvirt} +exit(1) unless _required_plugins_list.all? do |plugin| + Vagrant.has_plugin?(plugin) || ( + STDERR.puts "Required plugin '#{plugin}' is missing; please install using:" + STDERR.puts " % vagrant plugin install #{plugin}" + false + ) +end + +# ensure libvirt is the default provider in case the vagrant box config +# doesn't specify it +ENV['VAGRANT_DEFAULT_PROVIDER'] = "libvirt" + +@root_dir = File.dirname(File.expand_path(__FILE__)) +@settings = YAML.load_file(File.join(@root_dir, "settings.yml")) + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + + # continerd is taking more than 60 seconds to shutdown in SUSE platforms + # so increase the timeout to 120 seconds + config.vm.graceful_halt_timeout = 120 + config.vm.box = "harvester" + config.ssh.username = "rancher" + config.vm.synced_folder ".", "/vagrant", disabled: true + + cluster_node_index = @settings['harvester_cluster_nodes'] - 1 + (0..cluster_node_index).each do |node_number| + vm_name = "harvester-node-#{node_number}" + iso = File.join(@root_dir, "/disks/create.iso") + if node_number > 0 + iso = File.join(@root_dir, "/disks/join-#{node_number}.iso") + end + config.vm.define vm_name, autostart: false do |harvester_node| + harvester_node.vm.hostname = "harvester-node-#{node_number}" + harvester_node.vm.guest = "linux" + harvester_node.vm.network 'private_network', + libvirt__network_name: "harvester", + mac: @settings['harvester_network_config']['cluster'][node_number]['mac'], + ip: @settings['harvester_network_config']['cluster'][node_number]['ip'], + auto_config: false, + libvirt__network_name: "harvester", + libvirt__ip_address: @settings['harvester_network_config']['dhcp_server']['subnet'], + libvirt__network_address: @settings['harvester_network_config']['dhcp_server']['subnet'], + libvirt__dhcp_start: @settings['harvester_network_config']['dhcp_server']['dhcp_start'], + libvirt__dhcp_stop: @settings['harvester_network_config']['dhcp_server']['dhcp_stop'], + libvirt__netmask: @settings['harvester_network_config']['dhcp_server']['netmask'], + libvirt__guest_ipv6: "no" + harvester_node.vm.provider :libvirt do |libvirt| + libvirt.cpu_mode = 'host-passthrough' + libvirt.memory = @settings['harvester_network_config']['cluster'][node_number].key?('memory') ? @settings['harvester_network_config']['cluster'][node_number]['memory'] : @settings['harvester_node_config']['memory'] + libvirt.cpus = @settings['harvester_network_config']['cluster'][node_number].key?('cpu') ? @settings['harvester_network_config']['cluster'][node_number]['cpu'] : @settings['harvester_node_config']['cpu'] + libvirt.boot 'hd' + libvirt.nic_model_type = 'e1000' + libvirt.storage :file, :device => :cdrom, :bus => :ide, :type => :raw, :path => iso + end + end + end +end diff --git a/vagrant-qcow2-harvester/ansible.cfg b/vagrant-qcow2-harvester/ansible.cfg new file mode 100644 index 0000000..d2b29ee --- /dev/null +++ b/vagrant-qcow2-harvester/ansible.cfg @@ -0,0 +1,3 @@ +[defaults] +stdout_callback = yaml +interpreter_python = auto_silent diff --git a/vagrant-qcow2-harvester/ansible/_create.yaml.j2 b/vagrant-qcow2-harvester/ansible/_create.yaml.j2 new file mode 100644 index 0000000..24c5589 --- /dev/null +++ b/vagrant-qcow2-harvester/ansible/_create.yaml.j2 @@ -0,0 +1,36 @@ +#cloud-config +token: {{ harvester_config['token'] }} +os: + hostname: harvester-node-0 + ssh_authorized_keys: +{% for ssh_key in harvester_config['ssh_authorized_keys'] %} + - {{ ssh_key }} +{% endfor %} + password: {{ harvester_config['password'] }} + ntp_servers: +{% for ntp_server in harvester_config['ntp_servers'] %} + - {{ ntp_server }} +{% endfor %} + dns_nameservers: +{% for dns_server in harvester_config['dns_servers'] %} + - {{ dns_server }} +{% endfor %} +install: + automatic: true + mode: create + networks: + harvester-mgmt: + interfaces: + - name: {{ harvester_network_config['cluster'][0]['mgmt_interface'] }} + method: static + ip: {{ harvester_network_config['cluster'][0]['ip'] }} + gateway: {{ harvester_network_config['dhcp_server']['gateway'] }} + subnetMask: 255.255.255.0 + bond0: + interfaces: + - name: {{ harvester_network_config['cluster'][0]['vagrant_interface'] }} + method: dhcp + tty: ttyS0 + vip: {{ harvester_network_config['vip']['ip'] }} + vip_mode: {{ harvester_network_config['vip']['mode'] }} + vip_hw_addr: {{ harvester_network_config['vip']['mac'] }} diff --git a/vagrant-qcow2-harvester/ansible/_download_media.yml b/vagrant-qcow2-harvester/ansible/_download_media.yml new file mode 100644 index 0000000..f98794d --- /dev/null +++ b/vagrant-qcow2-harvester/ansible/_download_media.yml @@ -0,0 +1,31 @@ +--- +- name: download Harvester media + get_url: + url: "{{ harvester_media_url }}" + dest: "../disks/box.img" + +- name: copy files for box packaging + copy: + src: metadata.json + dest: ../disks + +- name: copy files for box packaging + copy: + src: Vagrantfile + dest: ../disks + +- name: create harvester box + shell: tar -czf ../disks/harvester.box -C ../disks box.img metadata.json Vagrantfile + +- name: add harvester box + shell: vagrant box add harvester ../disks/harvester.box --force + +- name: remove old image file + file: + path: "../disks/box.img" + state: absent + +- name: remove box + file: + path: "../disks/harvester.box" + state: absent diff --git a/vagrant-qcow2-harvester/ansible/_join.yaml.j2 b/vagrant-qcow2-harvester/ansible/_join.yaml.j2 new file mode 100644 index 0000000..83a974f --- /dev/null +++ b/vagrant-qcow2-harvester/ansible/_join.yaml.j2 @@ -0,0 +1,35 @@ +# example from https://github.com/harvester/ipxe-examples/blob/main/general/config-join.yaml + +server_url: https://{{ harvester_network_config['vip']['ip'] }}:443 +token: {{ harvester_config['token'] }} +os: + hostname: harvester-node-{{ node_number }} + ssh_authorized_keys: +{% for ssh_key in harvester_config['ssh_authorized_keys'] %} + - {{ ssh_key }} +{% endfor %} + password: {{ harvester_config['password'] }} + ntp_servers: +{% for ntp_server in harvester_config['ntp_servers'] %} + - {{ ntp_server }} +{% endfor %} + dns_nameservers: +{% for dns_server in harvester_config['dns_servers'] %} + - {{ dns_server }} +{% endfor %} +install: + mode: join + automatic: true + networks: + harvester-mgmt: + interfaces: + - name: {{ harvester_network_config['cluster'][node_number | int]['mgmt_interface'] }} # The management interface name + method: static + ip: {{ harvester_network_config['cluster'][node_number | int]['ip'] }} + gateway: {{ harvester_network_config['dhcp_server']['gateway'] }} + subnetMask: 255.255.255.0 + bond0: + interfaces: + - name: {{ harvester_network_config['cluster'][node_number | int]['vagrant_interface'] }} + method: dhcp + tty: ttyS0 diff --git a/vagrant-qcow2-harvester/ansible/boot_harvester_node.yml b/vagrant-qcow2-harvester/ansible/boot_harvester_node.yml new file mode 100644 index 0000000..5878b84 --- /dev/null +++ b/vagrant-qcow2-harvester/ansible/boot_harvester_node.yml @@ -0,0 +1,29 @@ +--- +- name: create "Booting Node {{ node_number}}" message + shell: > + figlet "Booting Node {{ node_number }}" 2>/dev/null || echo "Booting Node {{ node_number }}" + register: figlet_result + +- name: print "Booting Node {{ node_number }}" + debug: + msg: "{{ figlet_result.stdout }}" + +- name: set Harvester Node IP fact + set_fact: + harvester_node_ip: "{{ harvester_network_config['cluster'][node_number | int]['ip'] }}" + +- name: boot Harvester Node {{ node_number }} + shell: > + vagrant up harvester-node-{{ node_number }} + register: harvester_node_boot_result + +- name: wait for Harvester Node {{ harvester_node_ip }} to get ready + uri: + url: "https://{{ harvester_node_ip }}" + validate_certs: no + status_code: 200 + timeout: 120 + register: auth_modes_lookup_result + until: auth_modes_lookup_result.status == 200 + retries: 20 + delay: 120 diff --git a/vagrant-qcow2-harvester/ansible/files/Vagrantfile b/vagrant-qcow2-harvester/ansible/files/Vagrantfile new file mode 100644 index 0000000..95920ec --- /dev/null +++ b/vagrant-qcow2-harvester/ansible/files/Vagrantfile @@ -0,0 +1,63 @@ +# frozen_string_literal: true + +# -*- mode: ruby -*- +# vi: set ft=ruby : + +Vagrant.configure("2") do |config| + + # Example configuration of new VM.. + # + #config.vm.define :test_vm do |test_vm| + # Box name + # + #test_vm.vm.box = "centos64" + + # Domain Specific Options + # + # See README for more info. + # + #test_vm.vm.provider :libvirt do |domain| + # domain.memory = 2048 + # domain.cpus = 2 + #end + + # Interfaces for VM + # + # Networking features in the form of `config.vm.network` + # + #test_vm.vm.network :private_network, :ip => '10.20.30.40' + #test_vm.vm.network :public_network, :ip => '10.20.30.41' + #end + + # Options for Libvirt Vagrant provider. + config.vm.provider :libvirt do |libvirt| + + # A hypervisor name to access. Different drivers can be specified, but + # this version of provider creates KVM machines only. Some examples of + # drivers are KVM (QEMU hardware accelerated), QEMU (QEMU emulated), + # Xen (Xen hypervisor), lxc (Linux Containers), + # esx (VMware ESX), vmwarews (VMware Workstation) and more. Refer to + # documentation for available drivers (http://libvirt.org/drivers.html). + libvirt.driver = "kvm" + + # The name of the server, where Libvirtd is running. + # libvirt.host = "localhost" + + # If use ssh tunnel to connect to Libvirt. + libvirt.connect_via_ssh = false + + # The username and password to access Libvirt. Password is not used when + # connecting via ssh. + libvirt.username = "root" + #libvirt.password = "secret" + + # Libvirt storage pool name, where box image and instance snapshots will + # be stored. + libvirt.storage_pool_name = "default" + + # Set a prefix for the machines that's different than the project dir name. + #libvirt.default_prefix = '' + end + end + + \ No newline at end of file diff --git a/vagrant-qcow2-harvester/ansible/files/metadata.json b/vagrant-qcow2-harvester/ansible/files/metadata.json new file mode 100644 index 0000000..de7a05b --- /dev/null +++ b/vagrant-qcow2-harvester/ansible/files/metadata.json @@ -0,0 +1,6 @@ +{ + "provider" : "libvirt", + "format" : "qcow2", + "virtual_size" : 150 + } + \ No newline at end of file diff --git a/vagrant-qcow2-harvester/ansible/reinstall_harvester_node.yml b/vagrant-qcow2-harvester/ansible/reinstall_harvester_node.yml new file mode 100644 index 0000000..73847d5 --- /dev/null +++ b/vagrant-qcow2-harvester/ansible/reinstall_harvester_node.yml @@ -0,0 +1,27 @@ +--- +- name: Reinstall Harvester Node + hosts: localhost + connection: local + gather_facts: false + + tasks: + - name: create "Reinstalling Harvester Node" message + shell: > + figlet "Reinstalling Harvester Node {{ node_number }}" 2>/dev/null || echo "Reinstalling Harvester Node {{ node_number }}" + register: figlet_result + + - name: print "Reinstalling Harvester Node" message + debug: + msg: "{{ figlet_result.stdout }}" + + - name: boot Harvester nodes + include: boot_harvester_node.yml + + - name: create "Installation Completed" message + shell: > + figlet "Installation Completed" 2>/dev/null || echo "Installation Completed" + register: figlet_result + + - name: print "Installation Completed" + debug: + msg: "{{ figlet_result.stdout }}" diff --git a/vagrant-qcow2-harvester/ansible/setup_harvester.yml b/vagrant-qcow2-harvester/ansible/setup_harvester.yml new file mode 100644 index 0000000..7c6af05 --- /dev/null +++ b/vagrant-qcow2-harvester/ansible/setup_harvester.yml @@ -0,0 +1,61 @@ +--- +- name: Setup Harvester + hosts: localhost + connection: local + gather_facts: false + + tasks: + - name: copy config-create.yaml + template: + src: "_create.yaml.j2" + dest: "/tmp/create.yaml" + + - name: set node sequence fact + set_fact: + end_sequence: "{{ harvester_cluster_nodes - 1 if harvester_cluster_nodes > 1 else 1 }}" + + - name: copy config-join.yaml + template: + src: "_join.yaml.j2" + dest: "/tmp/join-{{ item }}.yaml" + vars: + node_number: "{{ item }}" + with_sequence: "start=1 end={{ end_sequence }}" + + - name: create directory for disks + file: + path: ../disks + mode: '0755' + state: directory + + - name: generate create iso + shell: cloud-localds -v ../disks/create.iso /tmp/create.yaml + + - name: generate join iso + shell: "cloud-localds -v ../disks/join-{{ item }}.iso /tmp/join-{{ item }}.yaml" + vars: + node_number: "{{ item }}" + with_sequence: "start=1 end={{ end_sequence }}" + + - name: setup harvester images + include: _download_media.yml + vars: + harvester_media_url: "{{ harvester_qcow_url }}" + tags: + - packagebox + + - name: boot Harvester nodes + include: boot_harvester_node.yml + vars: + node_number: "{{ item }}" + with_sequence: 0-{{ harvester_cluster_nodes - 1 if harvester_cluster_nodes > 1 else 0 }} + + - name: create "Installation Completed" message + shell: > + figlet "Installation Completed" 2>/dev/null || echo "Installation Completed" + register: figlet_result + + - name: print "Installation Completed" + debug: + msg: "{{ figlet_result.stdout }}" + diff --git a/vagrant-qcow2-harvester/reinstall_harvester_node.sh b/vagrant-qcow2-harvester/reinstall_harvester_node.sh new file mode 100755 index 0000000..cf1f952 --- /dev/null +++ b/vagrant-qcow2-harvester/reinstall_harvester_node.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +MYNAME=$0 +ROOTDIR=$(dirname $(readlink -e $MYNAME)) + +USAGE="${0}: + +Where: + + : node to re-install. Node number starts with zero (0). For + example, if you want to re-install the 3rd node, the node + number given should be 2. +" + +if [ $# -ne 1 ] ; then + echo "$USAGE" + exit 1 +fi + +NODE_NUMBER=$1 +NODE_NAME="harvester-node-${NODE_NUMBER}" + +# check to make sure the node has not been created +NOT_CREATED=`vagrant status ${NODE_NAME} | grep "^${NODE_NAME}" | grep "not created" || true` + +if [ "${NOT_CREATED}" == "" ] ; then + echo "Harvester node ${NODE_NAME} already created." + exit 1 +fi + +pushd $ROOTDIR +ansible-playbook ansible/reinstall_harvester_node.yml --extra-vars "@settings.yml" --extra-vars "node_number=${NODE_NUMBER}" +popd diff --git a/vagrant-qcow2-harvester/settings.yml b/vagrant-qcow2-harvester/settings.yml new file mode 100644 index 0000000..7e6e81f --- /dev/null +++ b/vagrant-qcow2-harvester/settings.yml @@ -0,0 +1,128 @@ +--- +########################################################################## +# NOTE: this is a YAML file so please pay close attention to the leading # +# spaces as they are significant. # +########################################################################## + +# +# harvester_iso_url +# harvester_kernel_url +# harvester_initrd_url +# +# Harvester media to install. The URL scheme can be either 'http', 'https', or +# 'file'. If the URL scheme is 'file', the given media will be copied from the +# local file system instead of downloading from a remote location. +harvester_iso_url: https://releases.rancher.com/harvester/master/harvester-master-amd64.iso +harvester_kernel_url: https://releases.rancher.com/harvester/master/harvester-master-vmlinuz-amd64 +harvester_ramdisk_url: https://releases.rancher.com/harvester/master/harvester-master-initrd-amd64 +harvester_rootfs_url: https://releases.rancher.com/harvester/master/harvester-master-rootfs-amd64.squashfs +harvester_qcow_url: https://releases.rancher.com/harvester/master/harvester-master-amd64.qcow2 + +# +# harvester_cluster_nodes +# +# NOTE: keep in mind that you need at least 3 nodes to make a cluster +# +harvester_cluster_nodes: 2 + +# +# network_config +# +# Harvester network configurations. Make sure the cluster IPs are on the same +# subnet as the DHCP server. Pre-assign the IPs and MACs for the Harvester +# nodes. +# +# NOTE: Random MAC addresses are generated with the following command: +# printf '02:00:00:%02X:%02X:%02X\n' $((RANDOM%256)) $((RANDOM%256)) $((RANDOM%256)) +# Thanks to https://stackoverflow.com/questions/8484877/mac-address-generator-in-python +# If any of the generated MAC addresses is in conflict with an existing one in +# your environment, please use the above command to regenerate and replace +# the conflicting one. +# +harvester_network_config: + # Run as an airgapped environment that only has internet connectivity through an HTTP proxy. + # The HTTP proxy runs on DHCP server using port 3128 + offline: false + + dhcp_server: + ip: 192.168.0.254 + subnet: 192.168.0.0 + netmask: 255.255.255.0 + dhcp_start: 192.168.0.50 + dhcp_stop: 192.168.0.130 + gateway: 192.168.0.1 + https: false + # Reserve these IPs for the Harvester cluster. Make sure these are outside + # the range of DHCP so they don't get served out by the DHCP server + vip: + ip: 192.168.0.131 + mode: static + mac: 02:00:00:03:3D:61 + cluster: + - ip: 192.168.0.30 + mac: 02:00:00:0D:62:E2 + cpu: 4 + memory: 16384 + disk_size: 150G + vagrant_interface: ens5 + mgmt_interface: ens6 + - ip: 192.168.0.31 + mac: 02:00:00:35:86:92 + cpu: 4 + memory: 8192 + disk_size: 150G + vagrant_interface: ens5 + mgmt_interface: ens6 + - ip: 192.168.0.32 + mac: 02:00:00:2F:F2:2A + cpu: 6 + memory: 16384 + disk_size: 150G + vagrant_interface: ens5 + mgmt_interface: ens6 + - ip: 192.168.0.33 + mac: 02:00:00:A7:E6:FF + cpu: 4 + memory: 8192 + disk_size: 150G + vagrant_interface: ens5 + mgmt_interface: ens6 + +# +# harvester_config +# +# Harvester system configurations. +# +harvester_config: + # static token for cluster authentication + token: token + + # Public keys to add to authorized_keys of each node. + ssh_authorized_keys: + # Vagrant default unsecured SSH public key + - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key + + # password to for the `rancher` user to login to the Harvester nodes + password: p@ssword + + # NTP servers + ntp_servers: + - 0.suse.pool.ntp.org + - 1.suse.pool.ntp.org + + dns_servers: + - 8.8.8.8 +# +# harvester_node_config +# +# Harvester node-specific configurations. +# +harvester_node_config: + # number of CPUs assigned to each node + cpu: 4 + + # memory size for each node, in MBytes + memory: 8192 + + # disk size for each node + disk_size: 150G diff --git a/vagrant-qcow2-harvester/setup_harvester.sh b/vagrant-qcow2-harvester/setup_harvester.sh new file mode 100755 index 0000000..da27fc5 --- /dev/null +++ b/vagrant-qcow2-harvester/setup_harvester.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +MYNAME=$0 +ROOTDIR=$(dirname $(readlink -e $MYNAME)) + +pushd $ROOTDIR +ansible-playbook ansible/setup_harvester.yml --extra-vars "@settings.yml" +ANSIBLE_PLAYBOOK_RESULT=$? +popd +exit $ANSIBLE_PLAYBOOK_RESULT