Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,5 @@
.vagrant*
*qcow
*box
*disks*
*iso*
41 changes: 41 additions & 0 deletions vagrant-qcow2-harvester/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
Harvester Boot Using Qcow2 images and Vagrant Libvirt
=========================================

Introduction
------------

Utilizing [Vagrant][vagrant], [KVM][kvm], and [Ansible][ansible] to create a
ready-to-play virtual Harvester environment for evaluation and testing
purposes. A single-node Harvester will be created by default.

Prerequisites
-------------

- Ansible \>= 2.9.0. This environment was tested with Ansible 2.9.5.
- Vagrant \>= 2.0.3.
- vagrant-libvirt plugin \>= 0.0.43.
- KVM (i.e. qemu-kvm), preferably the latest and greatest. This
environment was tested with qemu-kvm 2.11.
- Host with at least 16 CPU, 64GB RAM, and 500GB free disk space.
- cloud-localds to package iso files for injecting cloud-init.

Quick Start
-----------

1. Edit `settings.yml` to make sure the configuration satisfies your
needs. The options are self-documented.
2. Run `setup_harvester.sh`. This may take awhile (i.e. 30 minutes
depending on configuration).
3. If successful, run `vagrant status` to see the status of the Vagrant
boxes.
4. Point your browser to `https://<harvester_vip>:30443` to
access the Harvester UI. Just ignore the scary SSL warnings for now
as it is using self-signed certificates for demo purposes.
*NOTE*: by default `harvester_vip` is `192.168.0.131`. However, it is
configureable in `settings.yml`.

Acknowledgements
----------------
[ansible]: https://www.ansible.com
[kvm]: https://www.linux-kvm.org
[vagrant]: https://www.vagrantup.com
65 changes: 65 additions & 0 deletions vagrant-qcow2-harvester/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
# vi: set ft=ruby ts=2 :

require 'yaml'

VAGRANTFILE_API_VERSION = "2"

# check for required plugins
_required_plugins_list = %w{vagrant-libvirt}
exit(1) unless _required_plugins_list.all? do |plugin|
Vagrant.has_plugin?(plugin) || (
STDERR.puts "Required plugin '#{plugin}' is missing; please install using:"
STDERR.puts " % vagrant plugin install #{plugin}"
false
)
end

# ensure libvirt is the default provider in case the vagrant box config
# doesn't specify it
ENV['VAGRANT_DEFAULT_PROVIDER'] = "libvirt"

@root_dir = File.dirname(File.expand_path(__FILE__))
@settings = YAML.load_file(File.join(@root_dir, "settings.yml"))

Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|

# continerd is taking more than 60 seconds to shutdown in SUSE platforms
# so increase the timeout to 120 seconds
config.vm.graceful_halt_timeout = 120
config.vm.box = "harvester"
config.ssh.username = "rancher"
config.vm.synced_folder ".", "/vagrant", disabled: true

cluster_node_index = @settings['harvester_cluster_nodes'] - 1
(0..cluster_node_index).each do |node_number|
vm_name = "harvester-node-#{node_number}"
iso = File.join(@root_dir, "/disks/create.iso")
if node_number > 0
iso = File.join(@root_dir, "/disks/join-#{node_number}.iso")
end
config.vm.define vm_name, autostart: false do |harvester_node|
harvester_node.vm.hostname = "harvester-node-#{node_number}"
harvester_node.vm.guest = "linux"
harvester_node.vm.network 'private_network',
libvirt__network_name: "harvester",
mac: @settings['harvester_network_config']['cluster'][node_number]['mac'],
ip: @settings['harvester_network_config']['cluster'][node_number]['ip'],
auto_config: false,
libvirt__network_name: "harvester",
libvirt__ip_address: @settings['harvester_network_config']['dhcp_server']['subnet'],
libvirt__network_address: @settings['harvester_network_config']['dhcp_server']['subnet'],
libvirt__dhcp_start: @settings['harvester_network_config']['dhcp_server']['dhcp_start'],
libvirt__dhcp_stop: @settings['harvester_network_config']['dhcp_server']['dhcp_stop'],
libvirt__netmask: @settings['harvester_network_config']['dhcp_server']['netmask'],
libvirt__guest_ipv6: "no"
harvester_node.vm.provider :libvirt do |libvirt|
libvirt.cpu_mode = 'host-passthrough'
libvirt.memory = @settings['harvester_network_config']['cluster'][node_number].key?('memory') ? @settings['harvester_network_config']['cluster'][node_number]['memory'] : @settings['harvester_node_config']['memory']
libvirt.cpus = @settings['harvester_network_config']['cluster'][node_number].key?('cpu') ? @settings['harvester_network_config']['cluster'][node_number]['cpu'] : @settings['harvester_node_config']['cpu']
libvirt.boot 'hd'
libvirt.nic_model_type = 'e1000'
libvirt.storage :file, :device => :cdrom, :bus => :ide, :type => :raw, :path => iso
end
end
end
end
3 changes: 3 additions & 0 deletions vagrant-qcow2-harvester/ansible.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[defaults]
stdout_callback = yaml
interpreter_python = auto_silent
36 changes: 36 additions & 0 deletions vagrant-qcow2-harvester/ansible/_create.yaml.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
#cloud-config
token: {{ harvester_config['token'] }}
os:
hostname: harvester-node-0
ssh_authorized_keys:
{% for ssh_key in harvester_config['ssh_authorized_keys'] %}
- {{ ssh_key }}
{% endfor %}
password: {{ harvester_config['password'] }}
ntp_servers:
{% for ntp_server in harvester_config['ntp_servers'] %}
- {{ ntp_server }}
{% endfor %}
dns_nameservers:
{% for dns_server in harvester_config['dns_servers'] %}
- {{ dns_server }}
{% endfor %}
install:
automatic: true
mode: create
networks:
harvester-mgmt:
interfaces:
- name: {{ harvester_network_config['cluster'][0]['mgmt_interface'] }}
method: static
ip: {{ harvester_network_config['cluster'][0]['ip'] }}
gateway: {{ harvester_network_config['dhcp_server']['gateway'] }}
subnetMask: 255.255.255.0
bond0:
interfaces:
- name: {{ harvester_network_config['cluster'][0]['vagrant_interface'] }}
method: dhcp
tty: ttyS0
vip: {{ harvester_network_config['vip']['ip'] }}
vip_mode: {{ harvester_network_config['vip']['mode'] }}
vip_hw_addr: {{ harvester_network_config['vip']['mac'] }}
31 changes: 31 additions & 0 deletions vagrant-qcow2-harvester/ansible/_download_media.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
---
- name: download Harvester media
get_url:
url: "{{ harvester_media_url }}"
dest: "../disks/box.img"

- name: copy files for box packaging
copy:
src: metadata.json
dest: ../disks

- name: copy files for box packaging
copy:
src: Vagrantfile
dest: ../disks

- name: create harvester box
shell: tar -czf ../disks/harvester.box -C ../disks box.img metadata.json Vagrantfile

- name: add harvester box
shell: vagrant box add harvester ../disks/harvester.box --force

- name: remove old image file
file:
path: "../disks/box.img"
state: absent

- name: remove box
file:
path: "../disks/harvester.box"
state: absent
35 changes: 35 additions & 0 deletions vagrant-qcow2-harvester/ansible/_join.yaml.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# example from https://github.com/harvester/ipxe-examples/blob/main/general/config-join.yaml

server_url: https://{{ harvester_network_config['vip']['ip'] }}:443
token: {{ harvester_config['token'] }}
os:
hostname: harvester-node-{{ node_number }}
ssh_authorized_keys:
{% for ssh_key in harvester_config['ssh_authorized_keys'] %}
- {{ ssh_key }}
{% endfor %}
password: {{ harvester_config['password'] }}
ntp_servers:
{% for ntp_server in harvester_config['ntp_servers'] %}
- {{ ntp_server }}
{% endfor %}
dns_nameservers:
{% for dns_server in harvester_config['dns_servers'] %}
- {{ dns_server }}
{% endfor %}
install:
mode: join
automatic: true
networks:
harvester-mgmt:
interfaces:
- name: {{ harvester_network_config['cluster'][node_number | int]['mgmt_interface'] }} # The management interface name
method: static
ip: {{ harvester_network_config['cluster'][node_number | int]['ip'] }}
gateway: {{ harvester_network_config['dhcp_server']['gateway'] }}
subnetMask: 255.255.255.0
bond0:
interfaces:
- name: {{ harvester_network_config['cluster'][node_number | int]['vagrant_interface'] }}
method: dhcp
tty: ttyS0
29 changes: 29 additions & 0 deletions vagrant-qcow2-harvester/ansible/boot_harvester_node.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
---
- name: create "Booting Node {{ node_number}}" message
shell: >
figlet "Booting Node {{ node_number }}" 2>/dev/null || echo "Booting Node {{ node_number }}"
register: figlet_result

- name: print "Booting Node {{ node_number }}"
debug:
msg: "{{ figlet_result.stdout }}"

- name: set Harvester Node IP fact
set_fact:
harvester_node_ip: "{{ harvester_network_config['cluster'][node_number | int]['ip'] }}"

- name: boot Harvester Node {{ node_number }}
shell: >
vagrant up harvester-node-{{ node_number }}
register: harvester_node_boot_result

- name: wait for Harvester Node {{ harvester_node_ip }} to get ready
uri:
url: "https://{{ harvester_node_ip }}"
validate_certs: no
status_code: 200
timeout: 120
register: auth_modes_lookup_result
until: auth_modes_lookup_result.status == 200
retries: 20
delay: 120
63 changes: 63 additions & 0 deletions vagrant-qcow2-harvester/ansible/files/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
# frozen_string_literal: true

# -*- mode: ruby -*-
# vi: set ft=ruby :

Vagrant.configure("2") do |config|

# Example configuration of new VM..
#
#config.vm.define :test_vm do |test_vm|
# Box name
#
#test_vm.vm.box = "centos64"

# Domain Specific Options
#
# See README for more info.
#
#test_vm.vm.provider :libvirt do |domain|
# domain.memory = 2048
# domain.cpus = 2
#end

# Interfaces for VM
#
# Networking features in the form of `config.vm.network`
#
#test_vm.vm.network :private_network, :ip => '10.20.30.40'
#test_vm.vm.network :public_network, :ip => '10.20.30.41'
#end

# Options for Libvirt Vagrant provider.
config.vm.provider :libvirt do |libvirt|

# A hypervisor name to access. Different drivers can be specified, but
# this version of provider creates KVM machines only. Some examples of
# drivers are KVM (QEMU hardware accelerated), QEMU (QEMU emulated),
# Xen (Xen hypervisor), lxc (Linux Containers),
# esx (VMware ESX), vmwarews (VMware Workstation) and more. Refer to
# documentation for available drivers (http://libvirt.org/drivers.html).
libvirt.driver = "kvm"

# The name of the server, where Libvirtd is running.
# libvirt.host = "localhost"

# If use ssh tunnel to connect to Libvirt.
libvirt.connect_via_ssh = false

# The username and password to access Libvirt. Password is not used when
# connecting via ssh.
libvirt.username = "root"
#libvirt.password = "secret"

# Libvirt storage pool name, where box image and instance snapshots will
# be stored.
libvirt.storage_pool_name = "default"

# Set a prefix for the machines that's different than the project dir name.
#libvirt.default_prefix = ''
end
end


6 changes: 6 additions & 0 deletions vagrant-qcow2-harvester/ansible/files/metadata.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"provider" : "libvirt",
"format" : "qcow2",
"virtual_size" : 150
}

27 changes: 27 additions & 0 deletions vagrant-qcow2-harvester/ansible/reinstall_harvester_node.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
---
- name: Reinstall Harvester Node
hosts: localhost
connection: local
gather_facts: false

tasks:
- name: create "Reinstalling Harvester Node" message
shell: >
figlet "Reinstalling Harvester Node {{ node_number }}" 2>/dev/null || echo "Reinstalling Harvester Node {{ node_number }}"
register: figlet_result

- name: print "Reinstalling Harvester Node" message
debug:
msg: "{{ figlet_result.stdout }}"

- name: boot Harvester nodes
include: boot_harvester_node.yml

- name: create "Installation Completed" message
shell: >
figlet "Installation Completed" 2>/dev/null || echo "Installation Completed"
register: figlet_result

- name: print "Installation Completed"
debug:
msg: "{{ figlet_result.stdout }}"
Loading