From 77afc86aeb88a4a875c45ed7b24a0fb0084a35dc Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 16:28:34 -0700 Subject: [PATCH 01/32] (POOLER-72) Add Dummy Provider Previously the only VM Provider was vSphere however this made testing and making changes difficult as it required a functioning vSphere instance. This commit adds a Dummy Provider which presents a VM provider to Pool Manager but manages provisioned "VM"s in a hashtable. The Dummy Provider can also be configured to randomly fail operations and take random amounts of time to perform operations, such as cloning a VM, which is useful to see how the Pool Manager copes with these events. This commit also updates the configuration YAML documentation and adds appropriate unit tests. --- lib/vmpooler/providers.rb | 2 +- lib/vmpooler/providers/dummy.rb | 360 +++++++++++++++++++++ spec/unit/providers/dummy_spec.rb | 509 ++++++++++++++++++++++++++++++ vmpooler.yaml.example | 92 ++++++ 4 files changed, 962 insertions(+), 1 deletion(-) create mode 100644 lib/vmpooler/providers/dummy.rb create mode 100644 spec/unit/providers/dummy_spec.rb diff --git a/lib/vmpooler/providers.rb b/lib/vmpooler/providers.rb index f640163..c1c2071 100644 --- a/lib/vmpooler/providers.rb +++ b/lib/vmpooler/providers.rb @@ -1,4 +1,4 @@ -%w(base vsphere).each do |lib| +%w(base dummy vsphere).each do |lib| begin require "vmpooler/providers/#{lib}" rescue LoadError diff --git a/lib/vmpooler/providers/dummy.rb b/lib/vmpooler/providers/dummy.rb new file mode 100644 index 0000000..2b124a3 --- /dev/null +++ b/lib/vmpooler/providers/dummy.rb @@ -0,0 +1,360 @@ +require 'yaml' + +module Vmpooler + class PoolManager + class Provider + class Dummy < Vmpooler::PoolManager::Provider::Base + # Fake VM Provider for testing + + def initialize(config, logger, metrics, name, options) + super(config, logger, metrics, name, options) + dummyfilename = provider_config['filename'] + + # This initial_state option is only intended to be used by spec tests + @dummylist = provider_options['initial_state'].nil? ? {} : provider_options['initial_state'] + + @dummylist = YAML.load_file(dummyfilename) if !dummyfilename.nil? && File.exist?(dummyfilename) + + # Even though this code is using Mutexes, it's still no 100% atomic i.e. it's still possible for + # duplicate actions to put the @dummylist hashtable into a bad state, for example; + # Deleting a VM while it's in the middle of adding a disk. + @write_lock = Mutex.new + end + + def name + 'dummy' + end + + def vms_in_pool(pool_name) + vmlist = [] + get_dummy_pool_object(pool_name).each do |vm| + vmlist << { 'name' => vm['name'] } + end + + vmlist + end + + def get_vm_host(pool_name, vm_name) + current_vm = get_dummy_vm(pool_name, vm_name) + + current_vm.nil? ? raise("VM #{vm_name} does not exist") : current_vm['vm_host'] + end + + def find_least_used_compatible_host(pool_name, vm_name) + current_vm = get_dummy_vm(pool_name, vm_name) + + # Unless migratevm_couldmove_percent is specified, don't migrate + return current_vm['vm_host'] if provider_config['migratevm_couldmove_percent'].nil? + + # Only migrate if migratevm_couldmove_percent is met + return current_vm['vm_host'] if 1 + rand(100) > provider_config['migratevm_couldmove_percent'] + + # Simulate a 10 node cluster and randomly pick a different one + new_host = 'HOST' + (1 + rand(10)).to_s while new_host == current_vm['vm_host'] + + new_host + end + + def migrate_vm_to_host(pool_name, vm_name, dest_host_name) + current_vm = get_dummy_vm(pool_name, vm_name) + + # Inject migration delay + unless provider_config['migratevm_max_time'].nil? + migrate_time = 1 + rand(provider_config['migratevm_max_time']) + sleep(migrate_time) + end + + # Inject clone failure + unless provider_config['migratevm_fail_percent'].nil? + raise('Dummy Failure for migratevm_fail_percent') if 1 + rand(100) <= provider_config['migratevm_fail_percent'] + end + + @write_lock.synchronize do + current_vm = get_dummy_vm(pool_name, vm_name) + current_vm['vm_host'] = dest_host_name + write_backing_file + end + + true + end + + def get_vm(pool_name, vm_name) + dummy = get_dummy_vm(pool_name, vm_name) + return nil if dummy.nil? + + # Randomly power off the VM + unless dummy['powerstate'] != 'PoweredOn' || provider_config['getvm_poweroff_percent'].nil? + if 1 + rand(100) <= provider_config['getvm_poweroff_percent'] + @write_lock.synchronize do + dummy = get_dummy_vm(pool_name, vm_name) + dummy['powerstate'] = 'PoweredOff' + write_backing_file + end + logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy Powered Off") + end + end + + # Randomly rename the host + unless dummy['hostname'] != dummy['name'] || provider_config['getvm_rename_percent'].nil? + if 1 + rand(100) <= provider_config['getvm_rename_percent'] + @write_lock.synchronize do + dummy = get_dummy_vm(pool_name, vm_name) + dummy['hostname'] = 'DUMMY' + dummy['name'] + write_backing_file + end + logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy renamed") + end + end + + obj = {} + obj['name'] = dummy['name'] + obj['hostname'] = dummy['hostname'] + obj['boottime'] = dummy['boottime'] + obj['template'] = dummy['template'] + obj['poolname'] = dummy['poolname'] + obj['powerstate'] = dummy['powerstate'] + obj['snapshots'] = dummy['snapshots'] + + obj + end + + def create_vm(pool_name, dummy_hostname) + pool = pool_config(pool_name) + raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil? + + template_name = pool['template'] + + vm = {} + vm['name'] = dummy_hostname + vm['hostname'] = dummy_hostname + vm['domain'] = 'dummy.local' + # 'vm_template' is the name of the template to use to clone the VM from <----- Do we need this?!?!? + vm['vm_template'] = template_name + # 'template' is the Template name in VM Pooler API, in our case that's the poolname. + vm['template'] = pool_name + vm['poolname'] = pool_name + vm['ready'] = false + vm['boottime'] = Time.now + vm['powerstate'] = 'PoweredOn' + vm['vm_host'] = 'HOST1' + vm['dummy_state'] = 'UNKNOWN' + vm['snapshots'] = [] + vm['disks'] = [] + + # Make sure the pool exists in the dummy list + @write_lock.synchronize do + get_dummy_pool_object(pool_name) + @dummylist['pool'][pool_name] << vm + write_backing_file + end + + logger.log('d', "[ ] [#{pool_name}] '#{dummy_hostname}' is being cloned from '#{template_name}'") + + # Inject clone time delay + unless provider_config['createvm_max_time'].nil? + @write_lock.synchronize do + vm['dummy_state'] = 'CLONING' + write_backing_file + end + clone_time = 1 + rand(provider_config['createvm_max_time']) + sleep(clone_time) + end + + begin + # Inject clone failure + unless provider_config['createvm_fail_percent'].nil? + raise('Dummy Failure for createvm_fail_percent') if 1 + rand(100) <= provider_config['createvm_fail_percent'] + end + + # Assert the VM is ready for use + @write_lock.synchronize do + vm['dummy_state'] = 'RUNNING' + write_backing_file + end + rescue => _err + @write_lock.synchronize do + remove_dummy_vm(pool_name, dummy_hostname) + write_backing_file + end + raise + end + + get_vm(pool_name, dummy_hostname) + end + + def create_disk(pool_name, vm_name, disk_size) + vm_object = get_dummy_vm(pool_name, vm_name) + raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil? + + # Inject create time delay + unless provider_config['createdisk_max_time'].nil? + delay = 1 + rand(provider_config['createdisk_max_time']) + sleep(delay) + end + + # Inject create failure + unless provider_config['createdisk_fail_percent'].nil? + raise('Dummy Failure for createdisk_fail_percent') if 1 + rand(100) <= provider_config['createdisk_fail_percent'] + end + + @write_lock.synchronize do + vm_object = get_dummy_vm(pool_name, vm_name) + vm_object['disks'] << disk_size + write_backing_file + end + + true + end + + def create_snapshot(pool_name, vm_name, snapshot_name) + vm_object = get_dummy_vm(pool_name, vm_name) + raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil? + + # Inject create time delay + unless provider_config['createsnapshot_max_time'].nil? + delay = 1 + rand(provider_config['createsnapshot_max_time']) + sleep(delay) + end + + # Inject create failure + unless provider_config['createsnapshot_fail_percent'].nil? + raise('Dummy Failure for createsnapshot_fail_percent') if 1 + rand(100) <= provider_config['createsnapshot_fail_percent'] + end + + @write_lock.synchronize do + vm_object = get_dummy_vm(pool_name, vm_name) + vm_object['snapshots'] << snapshot_name + write_backing_file + end + + true + end + + def revert_snapshot(pool_name, vm_name, snapshot_name) + vm_object = get_dummy_vm(pool_name, vm_name) + raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil? + + # Inject create time delay + unless provider_config['revertsnapshot_max_time'].nil? + delay = 1 + rand(provider_config['revertsnapshot_max_time']) + sleep(delay) + end + + # Inject create failure + unless provider_config['revertsnapshot_fail_percent'].nil? + raise('Dummy Failure for revertsnapshot_fail_percent') if 1 + rand(100) <= provider_config['revertsnapshot_fail_percent'] + end + + vm_object['snapshots'].include?(snapshot_name) + end + + def destroy_vm(pool_name, vm_name) + vm = get_dummy_vm(pool_name, vm_name) + return false if vm.nil? + return false if vm['poolname'] != pool_name + + # Shutdown down the VM if it's poweredOn + if vm['powerstate'] == 'PoweredOn' + logger.log('d', "[ ] [#{pool_name}] '#{vm_name}' is being shut down") + + # Inject shutdown delay time + unless provider_config['destroyvm_max_shutdown_time'].nil? + shutdown_time = 1 + rand(provider_config['destroyvm_max_shutdown_time']) + sleep(shutdown_time) + end + + @write_lock.synchronize do + vm = get_dummy_vm(pool_name, vm_name) + vm['powerstate'] = 'PoweredOff' + write_backing_file + end + end + + # Inject destroy VM delay + unless provider_config['destroyvm_max_time'].nil? + destroy_time = 1 + rand(provider_config['destroyvm_max_time']) + sleep(destroy_time) + end + + # Inject destroy VM failure + unless provider_config['destroyvm_fail_percent'].nil? + raise('Dummy Failure for migratevm_fail_percent') if 1 + rand(100) <= provider_config['destroyvm_fail_percent'] + end + + # 'Destroy' the VM + @write_lock.synchronize do + remove_dummy_vm(pool_name, vm_name) + write_backing_file + end + + true + end + + def vm_ready?(pool_name, vm_name) + vm_object = get_dummy_vm(pool_name, vm_name) + return false if vm_object.nil? + return false if vm_object['poolname'] != pool_name + return true if vm_object['ready'] + + timeout = provider_config['is_ready_timeout'] || 5 + + Timeout.timeout(timeout) do + while vm_object['dummy_state'] != 'RUNNING' + sleep(2) + vm_object = get_dummy_vm(pool_name, vm_name) + end + end + + # Simulate how long it takes from a VM being powered on until + # it's ready to receive a connection + sleep(2) + + unless provider_config['vmready_fail_percent'].nil? + raise('Dummy Failure for vmready_fail_percent') if 1 + rand(100) <= provider_config['vmready_fail_percent'] + end + + @write_lock.synchronize do + vm_object['ready'] = true + write_backing_file + end + + true + end + + private + + # Note - NEVER EVER use the @write_lock object in the private methods!!!! Deadlocks will ensue + + def remove_dummy_vm(pool_name, vm_name) + return if @dummylist['pool'][pool_name].nil? + new_poollist = @dummylist['pool'][pool_name].delete_if { |vm| vm['name'] == vm_name } + @dummylist['pool'][pool_name] = new_poollist + end + + # Get's the pool config safely from the in-memory hashtable + def get_dummy_pool_object(pool_name) + @dummylist['pool'] = {} if @dummylist['pool'].nil? + @dummylist['pool'][pool_name] = [] if @dummylist['pool'][pool_name].nil? + + @dummylist['pool'][pool_name] + end + + def get_dummy_vm(pool_name, vm_name) + return nil if @dummylist['pool'][pool_name].nil? + + @dummylist['pool'][pool_name].each do |poolvm| + return poolvm if poolvm['name'] == vm_name + end + + nil + end + + def write_backing_file + dummyfilename = provider_config['filename'] + return if dummyfilename.nil? + File.open(dummyfilename, 'w') { |file| file.write(YAML.dump(@dummylist)) } + end + end + end + end +end diff --git a/spec/unit/providers/dummy_spec.rb b/spec/unit/providers/dummy_spec.rb new file mode 100644 index 0000000..5baf092 --- /dev/null +++ b/spec/unit/providers/dummy_spec.rb @@ -0,0 +1,509 @@ +require 'spec_helper' + +describe 'Vmpooler::PoolManager::Provider::Dummy' do + let(:logger) { MockLogger.new } + let(:metrics) { Vmpooler::DummyStatsd.new } + let(:pool_name) { 'pool1' } + let(:other_pool_name) { 'pool2' } + let(:vm_name) { 'vm1' } + + let(:running_vm_name) { 'vm2' } + let(:notready_vm_name) { 'vm3' } + + let (:provider_options) { + # Construct an initial state for testing + dummylist = {} + dummylist['pool'] = {} + # pool1 is a pool of "normal" VMs + dummylist['pool'][pool_name] = [] + # A normal running VM + vm = {} + vm['name'] = vm_name + vm['hostname'] = vm_name + vm['domain'] = 'dummy.local' + vm['vm_template'] = 'template1' + vm['template'] = pool_name + vm['poolname'] = pool_name + vm['ready'] = true + vm['boottime'] = Time.now + vm['powerstate'] = 'PoweredOn' + vm['vm_host'] = 'HOST1' + vm['snapshots'] = [] + vm['disks'] = [] + vm['dummy_state'] = 'RUNNING' + dummylist['pool'][pool_name] << vm + + # pool2 is a pool of "abnormal" VMs e.g. PoweredOff etc. + dummylist['pool'][other_pool_name] = [] + # A freshly provisioned VM that is not ready + vm = {} + vm['name'] = running_vm_name + vm['hostname'] = running_vm_name + vm['domain'] = 'dummy.local' + vm['vm_template'] = 'template1' + vm['template'] = other_pool_name + vm['poolname'] = other_pool_name + vm['ready'] = false + vm['boottime'] = Time.now + vm['powerstate'] = 'PoweredOn' + vm['vm_host'] = 'HOST1' + vm['snapshots'] = [] + vm['disks'] = [] + vm['dummy_state'] = 'UNKNOWN' + dummylist['pool'][other_pool_name] << vm + # A freshly provisioned VM that is running but not ready + vm = {} + vm['name'] = notready_vm_name + vm['hostname'] = notready_vm_name + vm['domain'] = 'dummy.local' + vm['vm_template'] = 'template1' + vm['template'] = other_pool_name + vm['poolname'] = other_pool_name + vm['ready'] = false + vm['boottime'] = Time.now + vm['powerstate'] = 'PoweredOn' + vm['vm_host'] = 'HOST1' + vm['snapshots'] = [] + vm['disks'] = [] + vm['dummy_state'] = 'RUNNING' + dummylist['pool'][other_pool_name] << vm + + { + 'initial_state' => dummylist + } + } + + let(:config) { YAML.load(<<-EOT +--- +:config: + max_tries: 3 + retry_factor: 10 +:providers: + :dummy: + key1: 'value1' +:pools: + - name: '#{pool_name}' + size: 5 + - name: 'pool2' + size: 5 +EOT + ) + } + + subject { Vmpooler::PoolManager::Provider::Dummy.new(config, logger, metrics, 'dummy', provider_options) } + + describe '#name' do + it 'should be dummy' do + expect(subject.name).to eq('dummy') + end + end + + describe '#vms_in_pool' do + it 'should return [] when pool does not exist' do + vm_list = subject.vms_in_pool('missing_pool') + + expect(vm_list).to eq([]) + end + + it 'should return an array of VMs when pool exists' do + vm_list = subject.vms_in_pool(pool_name) + + expect(vm_list.count).to eq(1) + end + end + + describe '#get_vm_host' do + it 'should return the hostname when VM exists' do + expect(subject.get_vm_host(pool_name, vm_name)).to eq('HOST1') + end + + it 'should error when VM does not exist' do + expect{subject.get_vm_host(pool_name, 'doesnotexist')}.to raise_error(RuntimeError) + end + end + + describe '#find_least_used_compatible_host' do + it 'should return the current host' do + new_host = subject.find_least_used_compatible_host(pool_name, vm_name) + expect(new_host).to eq('HOST1') + end + + context 'using migratevm_couldmove_percent' do + describe 'of zero' do + before(:each) do + config[:providers][:dummy]['migratevm_couldmove_percent'] = 0 + end + + it 'should return the current host' do + new_host = subject.find_least_used_compatible_host(pool_name, vm_name) + expect(new_host).to eq('HOST1') + end + end + + describe 'of 100' do + before(:each) do + config[:providers][:dummy]['migratevm_couldmove_percent'] = 100 + end + + it 'should return a different host' do + new_host = subject.find_least_used_compatible_host(pool_name, vm_name) + expect(new_host).to_not eq('HOST1') + end + end + + end + end + + describe '#migrate_vm_to_host' do + it 'should move to the new host' do + expect(subject.migrate_vm_to_host(pool_name, 'vm1','NEWHOST')).to eq(true) + expect(subject.get_vm_host(pool_name, 'vm1')).to eq('NEWHOST') + end + + context 'using migratevm_fail_percent' do + describe 'of zero' do + before(:each) do + config[:providers][:dummy]['migratevm_fail_percent'] = 0 + end + + it 'should move to the new host' do + expect(subject.migrate_vm_to_host(pool_name, 'vm1','NEWHOST')).to eq(true) + expect(subject.get_vm_host(pool_name, 'vm1')).to eq('NEWHOST') + end + end + + describe 'of 100' do + before(:each) do + config[:providers][:dummy]['migratevm_fail_percent'] = 100 + end + + it 'should raise an error' do + expect{subject.migrate_vm_to_host(pool_name, 'vm1','NEWHOST')}.to raise_error(/migratevm_fail_percent/) + end + end + end + end + + describe '#get_vm' do + it 'should return the VM when VM exists' do + vm = subject.get_vm(pool_name, vm_name) + expect(vm['name']).to eq(vm_name) + expect(vm['powerstate']).to eq('PoweredOn') + expect(vm['hostname']).to eq(vm['name']) + end + + it 'should return nil when VM does not exist' do + expect(subject.get_vm(pool_name, 'doesnotexist')).to eq(nil) + end + + context 'using getvm_poweroff_percent' do + describe 'of zero' do + before(:each) do + config[:providers][:dummy]['getvm_poweroff_percent'] = 0 + end + + it 'will not power off a VM' do + vm = subject.get_vm(pool_name, vm_name) + expect(vm['name']).to eq(vm_name) + expect(vm['powerstate']).to eq('PoweredOn') + end + end + + describe 'of 100' do + before(:each) do + config[:providers][:dummy]['getvm_poweroff_percent'] = 100 + end + + it 'will power off a VM' do + vm = subject.get_vm(pool_name, vm_name) + expect(vm['name']).to eq(vm_name) + expect(vm['powerstate']).to eq('PoweredOff') + end + end + end + + context 'using getvm_rename_percent' do + describe 'of zero' do + before(:each) do + config[:providers][:dummy]['getvm_rename_percent'] = 0 + end + + it 'will not rename a VM' do + vm = subject.get_vm(pool_name, vm_name) + expect(vm['name']).to eq(vm_name) + expect(vm['hostname']).to eq(vm['name']) + end + end + + describe 'of 100' do + before(:each) do + config[:providers][:dummy]['getvm_rename_percent'] = 100 + end + + it 'will rename a VM' do + vm = subject.get_vm(pool_name, vm_name) + expect(vm['name']).to eq(vm_name) + expect(vm['hostname']).to_not eq(vm['name']) + end + end + end + end + + describe '#create_vm' do + let(:new_vm_name) { 'newvm' } + + it 'should return a new VM' do + expect(subject.create_vm(pool_name, new_vm_name)['name']).to eq(new_vm_name) + end + + it 'should increase the number of VMs in the pool' do + old_pool_count = subject.vms_in_pool(pool_name).count + + new_vm = subject.create_vm(pool_name, new_vm_name) + + expect(subject.vms_in_pool(pool_name).count).to eq(old_pool_count + 1) + end + + context 'using createvm_fail_percent' do + describe 'of zero' do + before(:each) do + config[:providers][:dummy]['createvm_fail_percent'] = 0 + end + + it 'should return a new VM' do + expect(subject.create_vm(pool_name, new_vm_name)['name']).to eq(new_vm_name) + end + end + + describe 'of 100' do + before(:each) do + config[:providers][:dummy]['createvm_fail_percent'] = 100 + end + + it 'should raise an error' do + expect{subject.create_vm(pool_name, new_vm_name)}.to raise_error(/createvm_fail_percent/) + end + + it 'new VM should not exist' do + begin + subject.create_vm(pool_name, new_vm_name) + rescue + end + expect(subject.get_vm(pool_name, new_vm_name)).to eq(nil) + end + end + end + end + + describe '#create_disk' do + let(:disk_size) { 10 } + + it 'should return true when the disk is created' do + expect(subject.create_disk(pool_name, vm_name,disk_size)).to be true + end + + it 'should raise an error when VM does not exist' do + expect{ subject.create_disk(pool_name, 'doesnotexist',disk_size) }.to raise_error(/VM doesnotexist does not exist/) + end + + context 'using createdisk_fail_percent' do + describe 'of zero' do + before(:each) do + config[:providers][:dummy]['createdisk_fail_percent'] = 0 + end + + it 'should return true when the disk is created' do + expect(subject.create_disk(pool_name, vm_name,disk_size)).to be true + end + end + + describe 'of 100' do + before(:each) do + config[:providers][:dummy]['createdisk_fail_percent'] = 100 + end + + it 'should raise an error' do + expect{subject.create_disk(pool_name, vm_name,disk_size)}.to raise_error(/createdisk_fail_percent/) + end + end + end + end + + describe '#create_snapshot' do + let(:snapshot_name) { 'newsnapshot' } + + it 'should return true when the snapshot is created' do + expect(subject.create_snapshot(pool_name, vm_name, snapshot_name)).to be true + end + + it 'should raise an error when VM does not exist' do + expect{ subject.create_snapshot(pool_name, 'doesnotexist', snapshot_name) }.to raise_error(/VM doesnotexist does not exist/) + end + + context 'using createsnapshot_fail_percent' do + describe 'of zero' do + before(:each) do + config[:providers][:dummy]['createsnapshot_fail_percent'] = 0 + end + + it 'should return true when the disk is created' do + expect(subject.create_snapshot(pool_name, vm_name, snapshot_name)).to be true + end + end + + describe 'of 100' do + before(:each) do + config[:providers][:dummy]['createsnapshot_fail_percent'] = 100 + end + + it 'should raise an error' do + expect{ subject.create_snapshot(pool_name, vm_name, snapshot_name) }.to raise_error(/createsnapshot_fail_percent/) + end + end + end + end + + describe '#revert_snapshot' do + let(:snapshot_name) { 'newsnapshot' } + + before(:each) do + # Create a snapshot + subject.create_snapshot(pool_name, vm_name, snapshot_name) + end + + it 'should return true when the snapshot is reverted' do + expect(subject.revert_snapshot(pool_name, vm_name, snapshot_name)).to be true + end + + it 'should raise an error when VM does not exist' do + expect{ subject.revert_snapshot(pool_name, 'doesnotexist', snapshot_name) }.to raise_error(/VM doesnotexist does not exist/) + end + + it 'should return false when the snapshot does not exist' do + expect(subject.revert_snapshot(pool_name, vm_name, 'doesnotexist')).to be false + end + + context 'using revertsnapshot_fail_percent' do + describe 'of zero' do + before(:each) do + config[:providers][:dummy]['revertsnapshot_fail_percent'] = 0 + end + + it 'should return true when the snapshot is reverted' do + expect(subject.revert_snapshot(pool_name, vm_name, snapshot_name)).to be true + end + end + + describe 'of 100' do + before(:each) do + config[:providers][:dummy]['revertsnapshot_fail_percent'] = 100 + end + + it 'should raise an error when VM does not exist' do + expect{ subject.revert_snapshot(pool_name, vm_name, snapshot_name) }.to raise_error(/revertsnapshot_fail_percent/) + end + end + end + end + + describe '#destroy_vm' do + it 'should return true when destroyed' do + expect(subject.destroy_vm(pool_name, vm_name)).to eq(true) + end + + it 'should log if the VM is powered off' do + allow(logger).to receive(:log) + expect(logger).to receive(:log).with('d', "[ ] [pool1] 'vm1' is being shut down") + expect(subject.destroy_vm(pool_name, vm_name)).to eq(true) + end + + it 'should return false if VM does not exist' do + expect(subject.destroy_vm('doesnotexist',vm_name)).to eq(false) + end + + it 'should return false if VM is not in the correct pool' do + expect(subject.destroy_vm(other_pool_name, vm_name)).to eq(false) + end + + context 'using destroyvm_fail_percent' do + describe 'of zero' do + before(:each) do + config[:providers][:dummy]['destroyvm_fail_percent'] = 0 + end + + it 'should return true when destroyed' do + expect(subject.destroy_vm(pool_name, vm_name)).to eq(true) + end + end + + describe 'of 100' do + before(:each) do + config[:providers][:dummy]['destroyvm_fail_percent'] = 100 + end + + it 'should raise an error' do + expect{subject.destroy_vm(pool_name, vm_name)}.to raise_error(/migratevm_fail_percent/) + end + end + end + end + + describe '#vm_ready?' do + before(:each) do + # Speed up tests and ignore sleeping + allow(subject).to receive(:sleep) + end + + it 'should return true if ready' do + expect(subject.vm_ready?(pool_name, vm_name)).to eq(true) + end + + it 'should return false if VM does not exist' do + expect(subject.vm_ready?(pool_name, 'doesnotexist')).to eq(false) + end + + it 'should return false if VM is not in the correct pool' do + expect(subject.vm_ready?(other_pool_name, vm_name)).to eq(false) + end + + it 'should raise an error if timeout expires' do + expect{subject.vm_ready?(other_pool_name, running_vm_name)}.to raise_error(Timeout::Error) + end + + it 'should return true if VM becomes ready' do + expect(subject.vm_ready?(other_pool_name, notready_vm_name)).to eq(true) + end + + context 'using vmready_fail_percent' do + describe 'of zero' do + before(:each) do + config[:providers][:dummy]['vmready_fail_percent'] = 0 + end + + it 'should return true if VM becomes ready' do + expect(subject.vm_ready?(other_pool_name, notready_vm_name)).to eq(true) + end + end + + describe 'of 100' do + before(:each) do + config[:providers][:dummy]['vmready_fail_percent'] = 100 + end + + it 'should raise an error' do + expect{subject.vm_ready?(other_pool_name, notready_vm_name)}.to raise_error(/vmready_fail_percent/) + end + end + end + end + + describe '#vm_exists?' do + it 'should return true when VM exists' do + expect(subject.vm_exists?(pool_name, vm_name)).to eq(true) + end + + it 'should return true when VM does not exist' do + expect(subject.vm_exists?(pool_name, 'doesnotexist')).to eq(false) + end + end +end diff --git a/vmpooler.yaml.example b/vmpooler.yaml.example index c128b5d..2aab60c 100644 --- a/vmpooler.yaml.example +++ b/vmpooler.yaml.example @@ -25,6 +25,98 @@ username: 'vmpooler' password: 'swimsw1msw!m' +:providers: +# :providers: +# +# This section contains the VM providers for VMs and Pools +# The currently supported backing services are: +# - dummy + +# :dummy: +# +# The dummy backing service is a simple text file service that can be used +# to test vmpooler operations in a development or test environment +# +# Available configuration parameters: +# +# - filename (Optional) +# The filename used to store the backing text file. If this is not specified the VM state is only +# kept in memory, and is lost when the Provider is shutdown +# +# - migratevm_couldmove_percent +# Percent chance that a VM could be moved to another host +# (optional; default 0%) +# +# - migratevm_max_time +# Maximum amount of random time a VM migration action will take in seconds +# (optional; default 0 seconds) +# +# - migratevm_fail_percent +# Percent chance that a VM migration action will fail +# (optional; default 0%) +# +# - getvm_poweroff_percent +# Percent chance that when the VM information is gathered that the VM will be powered off +# (optional; default 0%) +# +# - getvm_rename_percent +# Percent chance that when the VM information is gathered that the VM will be renamed +# (optional; default 0%) +# +# - createvm_max_time +# Maximum amount of random time a VM creation action will take, in seconds +# (optional; default 0 seconds) +# +# - createvm_fail_percent +# Percent chance that a VM creation action will fail +# (optional; default 0%) +# +# - createdisk_max_time +# Maximum amount of random time a VM create disk action will take, in seconds +# (optional; default 0 seconds) +# +# - createdisk_fail_percent +# Percent chance that a VM create disk action will fail +# (optional; default 0%) +# +# - createsnapshot_max_time +# Maximum amount of random time a VM create snapshot action will take, in seconds +# (optional; default 0 seconds) +# +# - createsnapshot_fail_percent +# Percent chance that a VM create snapshot action will fail +# (optional; default 0%) +# +# - revertsnapshot_max_time +# Maximum amount of random time a VM revert snapshot action will take, in seconds +# (optional; default 0 seconds) +# +# - revertsnapshot_fail_percent +# Percent chance that a VM revert snapshot action will fail +# (optional; default 0%) +# +# - destroyvm_max_shutdown_time +# Maximum amount of random time a VM shutdown action will take during destroy, in seconds +# (optional; default 0 seconds) +# +# - destroyvm_max_time +# Maximum amount of random time a VM destroy action will take, in seconds +# (optional; default 0 seconds) +# +# - destroyvm_fail_percent +# Percent chance that a VM destroy action will fail +# (optional; default 0%) +# +# - vmready_fail_percent +# Percent chance that an error is raised when vm_ready? is called +# (optional; default 0%) + +# Example: + + :dummy: + filename: '/tmp/dummy-backing.yaml' + + # :redis: # # This section contains the server hostname and authentication credentials From 5aa5019822dc2cca84c7967717993097470d1d7e Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Sat, 1 Apr 2017 20:52:56 -0700 Subject: [PATCH 02/32] (POOLER-70) Add mock provider test fixture This commit creates a VM Provider test fixture for spec tests that merely uses the Base Provider class with a name of mock_provider. This will then be used by unit tests in further commits. --- spec/unit/pool_manager_spec.rb | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index 125f357..6a6c226 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -13,13 +13,27 @@ describe 'Pool Manager' do let(:logger) { MockLogger.new } let(:redis) { MockRedis.new } let(:metrics) { Vmpooler::DummyStatsd.new } - let(:config) { {} } let(:pool) { 'pool1' } let(:vm) { 'vm1' } let(:timeout) { 5 } let(:host) { double('host') } let(:token) { 'token1234'} + let(:provider_options) { {} } + let(:provider) { Vmpooler::PoolManager::Provider::Base.new(config, logger, metrics, 'mock_provider', provider_options) } + + let(:config) { YAML.load(<<-EOT +--- +:config: +:providers: + :mock: +:pools: + - name: '#{pool}' + size: 1 +EOT + ) + } + subject { Vmpooler::PoolManager.new(config, logger, redis, metrics) } describe '#check_pending_vm' do From 4bf32be87e705509bb33b660c5b5364b1ba83d58 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Mon, 3 Apr 2017 14:22:07 -0700 Subject: [PATCH 03/32] (POOLER-70) Update base VM provider Previously the base VM provider class was added however it was missing various functions from its definition. This commit: - Modifies the VMPooler configuration to add an empty provider config. if the provider config is missing - Helper method to return all of the pools this provider is responsible for --- lib/vmpooler/providers/base.rb | 25 ++++++++++++++++++++++--- spec/unit/providers/base_spec.rb | 26 +++++++++++++++++++++++--- 2 files changed, 45 insertions(+), 6 deletions(-) diff --git a/lib/vmpooler/providers/base.rb b/lib/vmpooler/providers/base.rb index 579124b..23b0bfb 100644 --- a/lib/vmpooler/providers/base.rb +++ b/lib/vmpooler/providers/base.rb @@ -18,7 +18,15 @@ module Vmpooler @metrics = metrics @provider_name = name + # Ensure that there is not a nil provider configuration + @config[:providers] = {} if @config[:providers].nil? + @config[:providers][@provider_name] = {} if provider_config.nil? + + # Ensure that there is not a nil pool configuration + @config[:pools] = {} if @config[:pools].nil? + @provider_options = options + logger.log('s', "[!] Creating provider '#{name}'") end # Helper Methods @@ -41,7 +49,7 @@ module Vmpooler def provider_config @config[:providers].each do |provider| # Convert the symbol from the config into a string for comparison - return provider[1] if provider[0].to_s == @provider_name + return (provider[1].nil? ? {} : provider[1]) if provider[0].to_s == @provider_name end nil @@ -60,6 +68,16 @@ module Vmpooler @provider_name end + # returns + # Array[String] : Array of pool names this provider services + def provided_pools + list = [] + @config[:pools].each do |pool| + list << pool['name'] if pool['provider'] == name + end + list + end + # Pool Manager Methods # inputs @@ -146,8 +164,8 @@ module Vmpooler # [String] new_snapshot_name : Name of the new snapshot to create # returns # [Boolean] : true if success, false if snapshot could not be created + # Raises RuntimeError if the Pool does not exist # Raises RuntimeError if the VM does not exist - # Raises RuntimeError if the snapshot already exists def create_snapshot(_pool_name, _vm_name, _new_snapshot_name) raise("#{self.class.name} does not implement create_snapshot") end @@ -158,8 +176,9 @@ module Vmpooler # [String] snapshot_name : Name of the snapshot to restore to # returns # [Boolean] : true if success, false if snapshot could not be revertted + # Raises RuntimeError if the Pool does not exist # Raises RuntimeError if the VM does not exist - # Raises RuntimeError if the snapshot already exists + # Raises RuntimeError if the snapshot does not exist def revert_snapshot(_pool_name, _vm_name, _snapshot_name) raise("#{self.class.name} does not implement revert_snapshot") end diff --git a/spec/unit/providers/base_spec.rb b/spec/unit/providers/base_spec.rb index b9b5155..5e7feb6 100644 --- a/spec/unit/providers/base_spec.rb +++ b/spec/unit/providers/base_spec.rb @@ -84,7 +84,7 @@ EOT ) } - context 'Given a misconfigured provider name' do + context 'Given a provider with no configuration' do let(:config) { YAML.load(<<-EOT --- :providers: @@ -94,8 +94,8 @@ EOT EOT ) } - it 'should return nil' do - expect(subject.provider_config).to be_nil + it 'should return empty hash' do + expect(subject.provider_config).to eq({}) end end @@ -120,6 +120,26 @@ EOT end end + describe '#provided_pools' do + let(:config) { YAML.load(<<-EOT +--- +:pools: + - name: 'pool1' + provider: 'base' + - name: 'pool2' + provider: 'base' + - name: 'otherpool' + provider: 'other provider' + - name: 'no name' +EOT + ) + } + + it "should return pools serviced by this provider" do + expect(subject.provided_pools).to eq(['pool1','pool2']) + end + end + describe '#vms_in_pool' do it 'should raise error' do expect{subject.vms_in_pool('pool')}.to raise_error(/does not implement vms_in_pool/) From 199bf4a07013bb26033201d61688a1baae620561 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 13:40:47 -0700 Subject: [PATCH 04/32] (POOLER-70) Update check_pending_vm for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the pool_manager to use the VM provider methods instead - Removes the open_socket method and tests as it is only required in the vSphere VM provider --- lib/vmpooler/pool_manager.rb | 29 +++++-------- spec/unit/pool_manager_spec.rb | 74 ++++------------------------------ 2 files changed, 19 insertions(+), 84 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index c8c033b..87c0588 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -22,34 +22,27 @@ module Vmpooler # Check the state of a VM def check_pending_vm(vm, pool, timeout, provider) Thread.new do - _check_pending_vm(vm, pool, timeout, provider) - end - end - - def open_socket(host, domain=nil, timeout=5, port=22, &block) - Timeout.timeout(timeout) do - target_host = host - target_host = "#{host}.#{domain}" if domain - sock = TCPSocket.new target_host, port begin - yield sock if block_given? - ensure - sock.close + _check_pending_vm(vm, pool, timeout, provider) + rescue => err + $logger.log('s', "[!] [#{pool}] '#{vm}' errored while checking a pending vm : #{err}") + fail_pending_vm(vm, pool, timeout) + raise end end end def _check_pending_vm(vm, pool, timeout, provider) - host = provider.find_vm(vm) - + host = provider.get_vm(pool, vm) if ! host fail_pending_vm(vm, pool, timeout, false) return end - open_socket vm - move_pending_vm_to_ready(vm, pool, host) - rescue - fail_pending_vm(vm, pool, timeout) + if provider.vm_ready?(pool, vm) + move_pending_vm_to_ready(vm, pool, host) + else + fail_pending_vm(vm, pool, timeout) + end end def remove_nonexistent_vm(vm, pool) diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index 6a6c226..df17f5c 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -37,8 +37,6 @@ EOT subject { Vmpooler::PoolManager.new(config, logger, redis, metrics) } describe '#check_pending_vm' do - let(:provider) { double('provider') } - before do expect(subject).not_to be_nil end @@ -51,72 +49,14 @@ EOT end end - describe '#open_socket' do - let(:TCPSocket) { double('tcpsocket') } - let(:socket) { double('tcpsocket') } - let(:hostname) { 'host' } - let(:domain) { 'domain.local'} - let(:default_socket) { 22 } - - before do - expect(subject).not_to be_nil - allow(socket).to receive(:close) - end - - it 'opens socket with defaults' do - expect(TCPSocket).to receive(:new).with(hostname,default_socket).and_return(socket) - - expect(subject.open_socket(hostname)).to eq(nil) - end - - it 'yields the socket if a block is given' do - expect(TCPSocket).to receive(:new).with(hostname,default_socket).and_return(socket) - - expect{ |socket| subject.open_socket(hostname,nil,nil,default_socket,&socket) }.to yield_control.exactly(1).times - end - - it 'closes the opened socket' do - expect(TCPSocket).to receive(:new).with(hostname,default_socket).and_return(socket) - expect(socket).to receive(:close) - - expect(subject.open_socket(hostname)).to eq(nil) - end - - it 'opens a specific socket' do - expect(TCPSocket).to receive(:new).with(hostname,80).and_return(socket) - - expect(subject.open_socket(hostname,nil,nil,80)).to eq(nil) - end - - it 'uses a specific domain with the hostname' do - expect(TCPSocket).to receive(:new).with("#{hostname}.#{domain}",default_socket).and_return(socket) - - expect(subject.open_socket(hostname,domain)).to eq(nil) - end - - it 'raises error if host is not resolvable' do - expect(TCPSocket).to receive(:new).with(hostname,default_socket).and_raise(SocketError,'getaddrinfo: No such host is known') - - expect { subject.open_socket(hostname,nil,1) }.to raise_error(SocketError) - end - - it 'raises error if socket is not listening' do - expect(TCPSocket).to receive(:new).with(hostname,default_socket).and_raise(SocketError,'No connection could be made because the target machine actively refused it') - - expect { subject.open_socket(hostname,nil,1) }.to raise_error(SocketError) - end - end - describe '#_check_pending_vm' do - let(:provider) { double('provider') } - before do expect(subject).not_to be_nil end context 'host does not exist or not in pool' do it 'calls fail_pending_vm' do - expect(provider).to receive(:find_vm).and_return(nil) + expect(provider).to receive(:get_vm).with(pool,vm).and_return(nil) expect(subject).to receive(:fail_pending_vm).with(vm, pool, timeout, false) subject._check_pending_vm(vm, pool, timeout, provider) @@ -124,17 +64,19 @@ EOT end context 'host is in pool' do + before do + expect(provider).to receive(:get_vm).with(pool,vm).and_return(host) + end + it 'calls move_pending_vm_to_ready if host is ready' do - expect(provider).to receive(:find_vm).and_return(host) - expect(subject).to receive(:open_socket).and_return(nil) + expect(provider).to receive(:vm_ready?).with(pool,vm).and_return(true) expect(subject).to receive(:move_pending_vm_to_ready).with(vm, pool, host) subject._check_pending_vm(vm, pool, timeout, provider) end - it 'calls fail_pending_vm if an error is raised' do - expect(provider).to receive(:find_vm).and_return(host) - expect(subject).to receive(:open_socket).and_raise(SocketError,'getaddrinfo: No such host is known') + it 'calls fail_pending_vm if host is not ready' do + expect(provider).to receive(:vm_ready?).with(pool,vm).and_return(false) expect(subject).to receive(:fail_pending_vm).with(vm, pool, timeout) subject._check_pending_vm(vm, pool, timeout, provider) From 9f4fc903b9fa44cc3db8c90f281abfd313100f51 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 13:42:09 -0700 Subject: [PATCH 05/32] (POOLER-70) Update fail_pending_vm for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the pool_manager to use the VM provider methods instead - Modified to return true or false to indicate that the VM was failed --- lib/vmpooler/pool_manager.rb | 6 ++++-- spec/unit/pool_manager_spec.rb | 12 ++++++------ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 87c0588..5e57dca 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -50,9 +50,9 @@ module Vmpooler $logger.log('d', "[!] [#{pool}] '#{vm}' no longer exists. Removing from pending.") end - def fail_pending_vm(vm, pool, timeout, exists=true) + def fail_pending_vm(vm, pool, timeout, exists = true) clone_stamp = $redis.hget("vmpooler__vm__#{vm}", 'clone') - return if ! clone_stamp + return true if !clone_stamp time_since_clone = (Time.now - Time.parse(clone_stamp)) / 60 if time_since_clone > timeout @@ -63,8 +63,10 @@ module Vmpooler remove_nonexistent_vm(vm, pool) end end + true rescue => err $logger.log('d', "Fail pending VM failed with an error: #{err}") + false end def move_pending_vm_to_ready(vm, pool, host) diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index df17f5c..fcdee1d 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -114,19 +114,19 @@ EOT end it 'takes no action if VM is not cloning' do - expect(subject.fail_pending_vm(vm, pool, timeout)).to eq(nil) + expect(subject.fail_pending_vm(vm, pool, timeout)).to eq(true) expect(redis.sismember("vmpooler__pending__#{pool}", vm)).to be(true) end it 'takes no action if VM is within timeout' do redis.hset("vmpooler__vm__#{vm}", 'clone',Time.now.to_s) - expect(subject.fail_pending_vm(vm, pool, timeout)).to eq(nil) + expect(subject.fail_pending_vm(vm, pool, timeout)).to eq(true) expect(redis.sismember("vmpooler__pending__#{pool}", vm)).to be(true) end it 'moves VM to completed queue if VM has exceeded timeout and exists' do redis.hset("vmpooler__vm__#{vm}", 'clone',Date.new(2001,1,1).to_s) - expect(subject.fail_pending_vm(vm, pool, timeout,true)).to eq(nil) + expect(subject.fail_pending_vm(vm, pool, timeout,true)).to eq(true) expect(redis.sismember("vmpooler__pending__#{pool}", vm)).to be(false) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(true) end @@ -134,18 +134,18 @@ EOT it 'logs message if VM has exceeded timeout and exists' do redis.hset("vmpooler__vm__#{vm}", 'clone',Date.new(2001,1,1).to_s) expect(logger).to receive(:log).with('d', "[!] [#{pool}] '#{vm}' marked as 'failed' after #{timeout} minutes") - expect(subject.fail_pending_vm(vm, pool, timeout,true)).to eq(nil) + expect(subject.fail_pending_vm(vm, pool, timeout,true)).to eq(true) end it 'calls remove_nonexistent_vm if VM has exceeded timeout and does not exist' do redis.hset("vmpooler__vm__#{vm}", 'clone',Date.new(2001,1,1).to_s) expect(subject).to receive(:remove_nonexistent_vm).with(vm, pool) - expect(subject.fail_pending_vm(vm, pool, timeout,false)).to eq(nil) + expect(subject.fail_pending_vm(vm, pool, timeout,false)).to eq(true) end it 'swallows error if an error is raised' do redis.hset("vmpooler__vm__#{vm}", 'clone','iamnotparsable_asdate') - expect(subject.fail_pending_vm(vm, pool, timeout,true)).to eq(nil) + expect(subject.fail_pending_vm(vm, pool, timeout,true)).to eq(false) end it 'logs message if an error is raised' do From 760dc1c67e8eba87bd883dff868cd87f286b53f6 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 13:43:24 -0700 Subject: [PATCH 06/32] (POOLER-70) Update move_pending_vm_to_ready for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the pool_manager to use the VM provider methods instead --- lib/vmpooler/pool_manager.rb | 10 +++------- spec/unit/pool_manager_spec.rb | 14 ++++---------- 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 5e57dca..01bc1f0 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -70,13 +70,9 @@ module Vmpooler end def move_pending_vm_to_ready(vm, pool, host) - if (host.summary) && - (host.summary.guest) && - (host.summary.guest.hostName) && - (host.summary.guest.hostName == vm) - + if host['hostname'] == vm begin - Socket.getaddrinfo(vm, nil) # WTF? + Socket.getaddrinfo(vm, nil) # WTF? I assume this is just priming the local DNS resolver cache?!?! rescue end @@ -86,7 +82,7 @@ module Vmpooler $redis.smove('vmpooler__pending__' + pool, 'vmpooler__ready__' + pool, vm) $redis.hset('vmpooler__boot__' + Date.today.to_s, pool + ':' + vm, finish) - $logger.log('s', "[>] [#{pool}] '#{vm}' moved to 'ready' queue") + $logger.log('s', "[>] [#{pool}] '#{vm}' moved from 'pending' to 'ready' queue") end end diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index fcdee1d..005c977 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -157,6 +157,8 @@ EOT end describe '#move_pending_vm_to_ready' do + let(:host) { { 'hostname' => vm }} + before do expect(subject).not_to be_nil allow(Socket).to receive(:getaddrinfo) @@ -171,21 +173,13 @@ EOT expect(logger).to receive(:log).exactly(0).times expect(Socket).to receive(:getaddrinfo).exactly(0).times - allow(host).to receive(:summary).and_return( double('summary') ) - allow(host).to receive_message_chain(:summary, :guest).and_return( double('guest') ) - allow(host).to receive_message_chain(:summary, :guest, :hostName).and_return ('different_name') + host['hostname'] = 'different_name' subject.move_pending_vm_to_ready(vm, pool, host) end end context 'when hostname matches VM name' do - before do - allow(host).to receive(:summary).and_return( double('summary') ) - allow(host).to receive_message_chain(:summary, :guest).and_return( double('guest') ) - allow(host).to receive_message_chain(:summary, :guest, :hostName).and_return (vm) - end - it 'should move the VM from pending to ready pool' do expect(redis.sismember("vmpooler__pending__#{pool}", vm)).to be(true) expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(false) @@ -195,7 +189,7 @@ EOT end it 'should log a message' do - expect(logger).to receive(:log).with('s', "[>] [#{pool}] '#{vm}' moved to 'ready' queue") + expect(logger).to receive(:log).with('s', "[>] [#{pool}] '#{vm}' moved from 'pending' to 'ready' queue") subject.move_pending_vm_to_ready(vm, pool, host) end From 8c421aa3bd72878a42a242f5aa6f7435e664e16d Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 13:50:30 -0700 Subject: [PATCH 07/32] (POOLER-70) Update check_ready_vm for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the pool_manager to use the VM provider methods instead - Splits the check_ready_vm function into two. One function spawns the thread while the other actually does the work. This makes testing much easier. --- lib/vmpooler/pool_manager.rb | 96 ++++++++++++----------- spec/unit/pool_manager_spec.rb | 136 +++++++++++++++++---------------- 2 files changed, 118 insertions(+), 114 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 01bc1f0..67ff54e 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -88,63 +88,61 @@ module Vmpooler def check_ready_vm(vm, pool, ttl, provider) Thread.new do - if ttl > 0 - if (((Time.now - host.runtime.bootTime) / 60).to_s[/^\d+\.\d{1}/].to_f) > ttl - $redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm) - - $logger.log('d', "[!] [#{pool}] '#{vm}' reached end of TTL after #{ttl} minutes, removed from 'ready' queue") - return - end + begin + _check_ready_vm(vm, pool, ttl, provider) + rescue => err + $logger.log('s', "[!] [#{pool}] '#{vm}' failed while checking a ready vm : #{err}") + raise end + end + end - check_stamp = $redis.hget('vmpooler__vm__' + vm, 'check') + def _check_ready_vm(vm, pool, ttl, provider) + # Periodically check that the VM is available + check_stamp = $redis.hget('vmpooler__vm__' + vm, 'check') + return if check_stamp && (((Time.now - Time.parse(check_stamp)) / 60) <= $config[:config]['vm_checktime']) - if - (!check_stamp) || - (((Time.now - Time.parse(check_stamp)) / 60) > $config[:config]['vm_checktime']) + host = provider.get_vm(pool, vm) + # Check if the host even exists + if !host + $redis.srem('vmpooler__ready__' + pool, vm) + $logger.log('s', "[!] [#{pool}] '#{vm}' not found in inventory, removed from 'ready' queue") + return + end - $redis.hset('vmpooler__vm__' + vm, 'check', Time.now) + # Check if the hosts TTL has expired + if ttl > 0 + if (((Time.now - host['boottime']) / 60).to_s[/^\d+\.\d{1}/].to_f) > ttl + $redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm) - host = provider.find_vm(vm) + $logger.log('d', "[!] [#{pool}] '#{vm}' reached end of TTL after #{ttl} minutes, removed from 'ready' queue") + return + end + end - if host - if - (host.runtime) && - (host.runtime.powerState) && - (host.runtime.powerState != 'poweredOn') + $redis.hset('vmpooler__vm__' + vm, 'check', Time.now) + # Check if the VM is not powered on + unless (host['powerstate'].casecmp('poweredon') == 0) + $redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm) + $logger.log('d', "[!] [#{pool}] '#{vm}' appears to be powered off, removed from 'ready' queue") + return + end - $redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm) + # Check if the hostname has magically changed from underneath Pooler + if (host['hostname'] != vm) + $redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm) + $logger.log('d', "[!] [#{pool}] '#{vm}' has mismatched hostname, removed from 'ready' queue") + return + end - $logger.log('d', "[!] [#{pool}] '#{vm}' appears to be powered off, removed from 'ready' queue") - return - end - - if - (host.summary.guest) && - (host.summary.guest.hostName) && - (host.summary.guest.hostName != vm) - - $redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm) - - $logger.log('d', "[!] [#{pool}] '#{vm}' has mismatched hostname, removed from 'ready' queue") - return - end - else - $redis.srem('vmpooler__ready__' + pool, vm) - - $logger.log('s', "[!] [#{pool}] '#{vm}' not found in vCenter inventory, removed from 'ready' queue") - end - - begin - open_socket vm - rescue - if $redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm) - $logger.log('d', "[!] [#{pool}] '#{vm}' is unreachable, removed from 'ready' queue") - else - $logger.log('d', "[!] [#{pool}] '#{vm}' is unreachable, and failed to remove from 'ready' queue") - end - return - end + # Check if the VM is still ready/available + begin + fail "VM #{vm} is not ready" unless provider.vm_ready?(pool, vm) + rescue + if $redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm) + $logger.log('d', "[!] [#{pool}] '#{vm}' is unreachable, removed from 'ready' queue") + else + $logger.log('d', "[!] [#{pool}] '#{vm}' is unreachable, and failed to remove from 'ready' queue") end end end diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index 005c977..f56aee4 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -216,103 +216,115 @@ EOT end describe '#check_ready_vm' do - let(:provider) { double('provider') } let(:ttl) { 0 } - let(:config) { - YAML.load(<<-EOT ---- -:config: - vm_checktime: 15 - -EOT - ) - } - - before(:each) do - expect(Thread).to receive(:new).and_yield - create_ready_vm(pool,vm) + before do + expect(subject).not_to be_nil end - it 'should raise an error if a TTL above zero is specified' do - expect { subject.check_ready_vm(vm,pool,5,provider) }.to raise_error(NameError) # This is an implementation bug + it 'calls _check_ready_vm' do + expect(Thread).to receive(:new).and_yield + expect(subject).to receive(:_check_ready_vm).with(vm, pool, ttl, provider) + + subject.check_ready_vm(vm, pool, ttl, provider) + end + end + + describe '#_check_ready_vm' do + let(:ttl) { 0 } + let(:host) { {} } + + before(:each) do + create_ready_vm(pool,vm) + config[:config] = {} + config[:config]['vm_checktime'] = 15 + + # Create a VM which is powered on + host['hostname'] = vm + host['powerstate'] = 'PoweredOn' + allow(provider).to receive(:get_vm).with(pool,vm).and_return(host) end context 'a VM that does not need to be checked' do it 'should do nothing' do - redis.hset("vmpooler__vm__#{vm}", 'check',Time.now.to_s) - subject.check_ready_vm(vm, pool, ttl, provider) + check_stamp = (Time.now - 60).to_s + redis.hset("vmpooler__vm__#{vm}", 'check', check_stamp) + expect(provider).to receive(:get_vm).exactly(0).times + subject._check_ready_vm(vm, pool, ttl, provider) + expect(redis.hget("vmpooler__vm__#{vm}", 'check')).to eq(check_stamp) end end context 'a VM that does not exist' do before do - allow(provider).to receive(:find_vm).and_return(nil) + expect(provider).to receive(:get_vm).with(pool,vm).and_return(nil) end - it 'should set the current check timestamp' do - allow(subject).to receive(:open_socket) + it 'should not set the current check timestamp' do + expect(redis.hget("vmpooler__vm__#{vm}", 'check')).to be_nil + subject._check_ready_vm(vm, pool, ttl, provider) expect(redis.hget("vmpooler__vm__#{vm}", 'check')).to be_nil - subject.check_ready_vm(vm, pool, ttl, provider) - expect(redis.hget("vmpooler__vm__#{vm}", 'check')).to_not be_nil end it 'should log a message' do - expect(logger).to receive(:log).with('s', "[!] [#{pool}] '#{vm}' not found in vCenter inventory, removed from 'ready' queue") - allow(subject).to receive(:open_socket) - subject.check_ready_vm(vm, pool, ttl, provider) + expect(logger).to receive(:log).with('s', "[!] [#{pool}] '#{vm}' not found in inventory, removed from 'ready' queue") + subject._check_ready_vm(vm, pool, ttl, provider) end it 'should remove the VM from the ready queue' do - allow(subject).to receive(:open_socket) expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(true) - subject.check_ready_vm(vm, pool, ttl, provider) + subject._check_ready_vm(vm, pool, ttl, provider) expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(false) end end - context 'a VM that needs to be checked' do - before(:each) do - redis.hset("vmpooler__vm__#{vm}", 'check',Date.new(2001,1,1).to_s) + context 'a VM that has never been checked' do + let(:last_check_date) { Date.new(2001,1,1).to_s } - allow(host).to receive(:summary).and_return( double('summary') ) - allow(host).to receive_message_chain(:summary, :guest).and_return( double('guest') ) - allow(host).to receive_message_chain(:summary, :guest, :hostName).and_return (vm) - - allow(provider).to receive(:find_vm).and_return(host) + it 'should set the current check timestamp' do + expect(redis.hget("vmpooler__vm__#{vm}", 'check')).to be_nil + subject._check_ready_vm(vm, pool, ttl, provider) + expect(redis.hget("vmpooler__vm__#{vm}", 'check')).to_not be_nil + end + end + + context 'a VM that needs to be checked' do + let(:last_check_date) { Date.new(2001,1,1).to_s } + before(:each) do + redis.hset("vmpooler__vm__#{vm}", 'check',last_check_date) + end + + it 'should set the current check timestamp' do + expect(redis.hget("vmpooler__vm__#{vm}", 'check')).to eq(last_check_date) + subject._check_ready_vm(vm, pool, ttl, provider) + expect(redis.hget("vmpooler__vm__#{vm}", 'check')).to_not eq(last_check_date) end context 'and is ready' do before(:each) do - allow(host).to receive(:runtime).and_return( double('runtime') ) - allow(host).to receive_message_chain(:runtime, :powerState).and_return('poweredOn') - allow(host).to receive_message_chain(:summary, :guest, :hostName).and_return (vm) - allow(subject).to receive(:open_socket).with(vm).and_return(nil) + expect(provider).to receive(:vm_ready?).with(pool, vm).and_return(true) end it 'should only set the next check interval' do - subject.check_ready_vm(vm, pool, ttl, provider) + subject._check_ready_vm(vm, pool, ttl, provider) end end - context 'is turned off, a name mismatch and not available via TCP' do + context 'is turned off' do before(:each) do - allow(host).to receive(:runtime).and_return( double('runtime') ) - allow(host).to receive_message_chain(:runtime, :powerState).and_return('poweredOff') - allow(host).to receive_message_chain(:summary, :guest, :hostName).and_return ('') - allow(subject).to receive(:open_socket).with(vm).and_raise(SocketError,'getaddrinfo: No such host is known') + host['powerstate'] = 'PoweredOff' end it 'should move the VM to the completed queue' do expect(redis).to receive(:smove).with("vmpooler__ready__#{pool}", "vmpooler__completed__#{pool}", vm) - subject.check_ready_vm(vm, pool, ttl, provider) + subject._check_ready_vm(vm, pool, ttl, provider) end it 'should move the VM to the completed queue in Redis' do expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(true) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(false) - subject.check_ready_vm(vm, pool, ttl, provider) + subject._check_ready_vm(vm, pool, ttl, provider) expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(false) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(true) end @@ -320,28 +332,25 @@ EOT it 'should log messages about being powered off' do expect(logger).to receive(:log).with('d', "[!] [#{pool}] '#{vm}' appears to be powered off, removed from 'ready' queue") - subject.check_ready_vm(vm, pool, ttl, provider) + subject._check_ready_vm(vm, pool, ttl, provider) end end - context 'is turned on, a name mismatch and not available via TCP' do + context 'is turned on, a name mismatch' do before(:each) do - allow(host).to receive(:runtime).and_return( double('runtime') ) - allow(host).to receive_message_chain(:runtime, :powerState).and_return('poweredOn') - allow(host).to receive_message_chain(:summary, :guest, :hostName).and_return ('') - allow(subject).to receive(:open_socket).with(vm).and_raise(SocketError,'getaddrinfo: No such host is known') + host['hostname'] = 'different_name' end it 'should move the VM to the completed queue' do expect(redis).to receive(:smove).with("vmpooler__ready__#{pool}", "vmpooler__completed__#{pool}", vm) - subject.check_ready_vm(vm, pool, ttl, provider) + subject._check_ready_vm(vm, pool, ttl, provider) end it 'should move the VM to the completed queue in Redis' do expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(true) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(false) - subject.check_ready_vm(vm, pool, ttl, provider) + subject._check_ready_vm(vm, pool, ttl, provider) expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(false) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(true) end @@ -349,28 +358,25 @@ EOT it 'should log messages about being misnamed' do expect(logger).to receive(:log).with('d', "[!] [#{pool}] '#{vm}' has mismatched hostname, removed from 'ready' queue") - subject.check_ready_vm(vm, pool, ttl, provider) + subject._check_ready_vm(vm, pool, ttl, provider) end end - context 'is turned on, with correct name and not available via TCP' do + context 'is turned on, with correct name and is not ready' do before(:each) do - allow(host).to receive(:runtime).and_return( double('runtime') ) - allow(host).to receive_message_chain(:runtime, :powerState).and_return('poweredOn') - allow(host).to receive_message_chain(:summary, :guest, :hostName).and_return (vm) - allow(subject).to receive(:open_socket).with(vm).and_raise(SocketError,'getaddrinfo: No such host is known') + expect(provider).to receive(:vm_ready?).with(pool, vm).and_return(false) end it 'should move the VM to the completed queue' do expect(redis).to receive(:smove).with("vmpooler__ready__#{pool}", "vmpooler__completed__#{pool}", vm) - subject.check_ready_vm(vm, pool, ttl, provider) + subject._check_ready_vm(vm, pool, ttl, provider) end it 'should move the VM to the completed queue in Redis' do expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(true) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(false) - subject.check_ready_vm(vm, pool, ttl, provider) + subject._check_ready_vm(vm, pool, ttl, provider) expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(false) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(true) end @@ -378,7 +384,7 @@ EOT it 'should log messages about being unreachable' do expect(logger).to receive(:log).with('d', "[!] [#{pool}] '#{vm}' is unreachable, removed from 'ready' queue") - subject.check_ready_vm(vm, pool, ttl, provider) + subject._check_ready_vm(vm, pool, ttl, provider) end end end From cc1910fd762c34e8845323ff2da2fac809cdd5ba Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 13:51:36 -0700 Subject: [PATCH 08/32] (POOLER-70) Update check_running_vm for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the pool_manager to use the VM provider methods instead --- lib/vmpooler/pool_manager.rb | 9 +++++++-- spec/unit/pool_manager_spec.rb | 19 +++++++++---------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 67ff54e..cb76788 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -149,12 +149,17 @@ module Vmpooler def check_running_vm(vm, pool, ttl, provider) Thread.new do - _check_running_vm(vm, pool, ttl, provider) + begin + _check_running_vm(vm, pool, ttl, provider) + rescue => err + $logger.log('s', "[!] [#{pool}] '#{vm}' failed while checking VM with an error: #{err}") + raise + end end end def _check_running_vm(vm, pool, ttl, provider) - host = provider.find_vm(vm) + host = provider.get_vm(pool, vm) if host queue_from, queue_to = 'running', 'completed' diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index f56aee4..01af0d4 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -407,7 +407,7 @@ EOT end describe '#_check_running_vm' do - let(:provider) { double('provider') } + let(:host) { {} } before do expect(subject).not_to be_nil @@ -415,24 +415,26 @@ EOT before(:each) do create_running_vm(pool,vm) + + # Create a VM which is powered on + host['hostname'] = vm + host['powerstate'] = 'PoweredOn' + allow(provider).to receive(:get_vm).with(pool,vm).and_return(host) end it 'does nothing with a missing VM' do - allow(provider).to receive(:find_vm).and_return(nil) + expect(provider).to receive(:get_vm).with(pool,vm).and_return(nil) expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) subject._check_running_vm(vm, pool, timeout, provider) expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) end context 'valid host' do - let(:vm_host) { double('vmhost') } - it 'should not move VM when not poweredOn' do # I'm not sure this test is useful. There is no codepath # in _check_running_vm that looks at Power State - allow(provider).to receive(:find_vm).and_return vm_host - allow(vm_host).to receive(:runtime).and_return true - allow(vm_host).to receive_message_chain(:runtime, :powerState).and_return 'poweredOff' + host['powerstate'] = 'PoweredOff' + expect(logger).not_to receive(:log).with('d', "[!] [#{pool}] '#{vm}' appears to be powered off or dead") expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) subject._check_running_vm(vm, pool, timeout, provider) @@ -440,14 +442,12 @@ EOT end it 'should not move VM if it has no checkout time' do - allow(provider).to receive(:find_vm).and_return vm_host expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) subject._check_running_vm(vm, pool, 0, provider) expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) end it 'should not move VM if TTL is zero' do - allow(provider).to receive(:find_vm).and_return vm_host redis.hset("vmpooler__active__#{pool}", vm,(Time.now - timeout*60*60).to_s) expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) subject._check_running_vm(vm, pool, 0, provider) @@ -455,7 +455,6 @@ EOT end it 'should move VM when past TTL' do - allow(provider).to receive(:find_vm).and_return vm_host redis.hset("vmpooler__active__#{pool}", vm,(Time.now - timeout*60*60).to_s) expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(false) From b21d78fa49a54d994e14c64456df4516fb80a242 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 13:59:08 -0700 Subject: [PATCH 09/32] (POOLER-70) Update _clone_vm for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the pool_manager to use the VM provider methods instead --- lib/vmpooler/pool_manager.rb | 94 ++-------- spec/unit/pool_manager_spec.rb | 310 +++++++-------------------------- 2 files changed, 78 insertions(+), 326 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index cb76788..5066dd6 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -195,99 +195,35 @@ module Vmpooler end def _clone_vm(pool, provider) - template = pool['template'] - folder = pool['folder'] - datastore = pool['datastore'] - target = pool['clone_target'] - vm = {} - - if template =~ /\// - templatefolders = template.split('/') - vm['template'] = templatefolders.pop - end - - if templatefolders - vm[vm['template']] = provider.find_folder(templatefolders.join('/')).find(vm['template']) - else - fail 'Please provide a full path to the template' - end - - if vm['template'].length == 0 - fail "Unable to find template '#{vm['template']}'!" - end + pool_name = pool['name'] # Generate a randomized hostname o = [('a'..'z'), ('0'..'9')].map(&:to_a).flatten - vm['hostname'] = $config[:config]['prefix'] + o[rand(25)] + (0...14).map { o[rand(o.length)] }.join + new_vmname = $config[:config]['prefix'] + o[rand(25)] + (0...14).map { o[rand(o.length)] }.join # Add VM to Redis inventory ('pending' pool) - $redis.sadd('vmpooler__pending__' + vm['template'], vm['hostname']) - $redis.hset('vmpooler__vm__' + vm['hostname'], 'clone', Time.now) - $redis.hset('vmpooler__vm__' + vm['hostname'], 'template', vm['template']) - - # Annotate with creation time, origin template, etc. - # Add extraconfig options that can be queried by vmtools - configSpec = RbVmomi::VIM.VirtualMachineConfigSpec( - annotation: JSON.pretty_generate( - name: vm['hostname'], - created_by: $config[:vsphere]['username'], - base_template: vm['template'], - creation_timestamp: Time.now.utc - ), - extraConfig: [ - { key: 'guestinfo.hostname', - value: vm['hostname'] - } - ] - ) - - # Choose a clone target - if target - $clone_target = provider.find_least_used_host(target) - elsif $config[:config]['clone_target'] - $clone_target = provider.find_least_used_host($config[:config]['clone_target']) - end - - # Put the VM in the specified folder and resource pool - relocateSpec = RbVmomi::VIM.VirtualMachineRelocateSpec( - datastore: provider.find_datastore(datastore), - host: $clone_target, - diskMoveType: :moveChildMostDiskBacking - ) - - # Create a clone spec - spec = RbVmomi::VIM.VirtualMachineCloneSpec( - location: relocateSpec, - config: configSpec, - powerOn: true, - template: false - ) - - # Clone the VM - $logger.log('d', "[ ] [#{vm['template']}] '#{vm['hostname']}' is being cloned from '#{vm['template']}'") + $redis.sadd('vmpooler__pending__' + pool_name, new_vmname) + $redis.hset('vmpooler__vm__' + new_vmname, 'clone', Time.now) + $redis.hset('vmpooler__vm__' + new_vmname, 'template', pool_name) begin + $logger.log('d', "[ ] [#{pool_name}] Starting to clone '#{new_vmname}'") start = Time.now - vm[vm['template']].CloneVM_Task( - folder: provider.find_folder(folder), - name: vm['hostname'], - spec: spec - ).wait_for_completion + provider.create_vm(pool_name, new_vmname) finish = '%.2f' % (Time.now - start) - $redis.hset('vmpooler__clone__' + Date.today.to_s, vm['template'] + ':' + vm['hostname'], finish) - $redis.hset('vmpooler__vm__' + vm['hostname'], 'clone_time', finish) + $redis.hset('vmpooler__clone__' + Date.today.to_s, pool_name + ':' + new_vmname, finish) + $redis.hset('vmpooler__vm__' + new_vmname, 'clone_time', finish) + $logger.log('s', "[+] [#{pool_name}] '#{new_vmname}' cloned in #{finish} seconds") - $logger.log('s', "[+] [#{vm['template']}] '#{vm['hostname']}' cloned from '#{vm['template']}' in #{finish} seconds") + $metrics.timing("clone.#{pool_name}", finish) rescue => err - $logger.log('s', "[!] [#{vm['template']}] '#{vm['hostname']}' clone failed with an error: #{err}") - $redis.srem('vmpooler__pending__' + vm['template'], vm['hostname']) + $logger.log('s', "[!] [#{pool_name}] '#{new_vmname}' clone failed with an error: #{err}") + $redis.srem('vmpooler__pending__' + pool_name, new_vmname) raise + ensure + $redis.decr('vmpooler__tasks__clone') end - - $redis.decr('vmpooler__tasks__clone') - - $metrics.timing("clone.#{vm['template']}", finish) end # Destroy a VM diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index 01af0d4..6c4af1a 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -504,299 +504,115 @@ EOT end describe '#clone_vm' do - let(:provider) { double('provider') } - - let(:config) { - YAML.load(<<-EOT ---- -:config: - prefix: "prefix" -:vsphere: - username: "vcenter_user" -:pools: - - name: #{pool} -EOT - ) - } - let (:pool_object) { config[:pools][0] } + let (:pool_object) { { 'name' => pool } } before do expect(subject).not_to be_nil + expect(Thread).to receive(:new).and_yield end it 'calls _clone_vm' do - expect(Thread).to receive(:new).and_yield expect(subject).to receive(:_clone_vm).with(pool_object,provider) subject.clone_vm(pool_object,provider) end it 'logs a message if an error is raised' do - expect(Thread).to receive(:new).and_yield - expect(logger).to receive(:log) - expect(subject).to receive(:_clone_vm).with(pool_object,provider).and_raise('an_error') + allow(logger).to receive(:log) + expect(logger).to receive(:log).with('s',"[!] [#{pool_object['name']}] failed while cloning VM with an error: MockError") + expect(subject).to receive(:_clone_vm).with(pool_object,provider).and_raise('MockError') - expect{subject.clone_vm(pool_object,provider)}.to raise_error(/an_error/) + expect{subject.clone_vm(pool_object,provider)}.to raise_error(/MockError/) end end describe '#_clone_vm' do + let (:pool_object) { { 'name' => pool } } + before do expect(subject).not_to be_nil end - let (:folder) { 'vmfolder' } - let (:folder_object) { double('folder_object') } - let (:template_name) { pool } - let (:template) { "template/#{template_name}" } - let (:datastore) { 'datastore' } - let (:target) { 'clone_target' } - let(:config) { YAML.load(<<-EOT --- :config: prefix: "prefix" -:vsphere: - username: "vcenter_user" -:pools: - - name: #{pool} - template: '#{template}' - folder: '#{folder}' - datastore: '#{datastore}' - clone_target: '#{target}' EOT ) } - let (:provider) { double('provider') } - let (:template_folder_object) { double('template_folder_object') } - let (:template_vm_object) { double('template_vm_object') } - let (:clone_task) { double('clone_task') } - let (:pool_object) { config[:pools][0] } - - context 'no template specified' do + context 'with no errors during cloning' do before(:each) do - pool_object['template'] = nil + expect(metrics).to receive(:timing).with(/clone\./,/0/) + expect(provider).to receive(:create_vm).with(pool, String) + allow(logger).to receive(:log) end - it 'should raise an error' do - expect{subject._clone_vm(pool_object,provider)}.to raise_error(/Please provide a full path to the template/) + it 'should create a cloning VM' do + expect(redis.scard("vmpooler__pending__#{pool}")).to eq(0) + + subject._clone_vm(pool_object,provider) + + expect(redis.scard("vmpooler__pending__#{pool}")).to eq(1) + # Get the new VM Name from the pending pool queue as it should be the only entry + vm_name = redis.smembers("vmpooler__pending__#{pool}")[0] + expect(redis.hget("vmpooler__vm__#{vm_name}", 'clone')).to_not be_nil + expect(redis.hget("vmpooler__vm__#{vm_name}", 'template')).to eq(pool) + expect(redis.hget("vmpooler__clone__#{Date.today.to_s}", "#{pool}:#{vm_name}")).to_not be_nil + expect(redis.hget("vmpooler__vm__#{vm_name}", 'clone_time')).to_not be_nil + end + + it 'should decrement the clone tasks counter' do + redis.incr('vmpooler__tasks__clone') + redis.incr('vmpooler__tasks__clone') + expect(redis.get('vmpooler__tasks__clone')).to eq('2') + subject._clone_vm(pool_object,provider) + expect(redis.get('vmpooler__tasks__clone')).to eq('1') + end + + it 'should log a message that is being cloned from a template' do + expect(logger).to receive(:log).with('d',/\[ \] \[#{pool}\] Starting to clone '(.+)'/) + + subject._clone_vm(pool_object,provider) + end + + it 'should log a message that it completed being cloned' do + expect(logger).to receive(:log).with('s',/\[\+\] \[#{pool}\] '(.+)' cloned in [0-9.]+ seconds/) + + subject._clone_vm(pool_object,provider) end end - context 'a template with no forward slash in the string' do + context 'with an error during cloning' do before(:each) do - pool_object['template'] = template_name + expect(provider).to receive(:create_vm).with(pool, String).and_raise('MockError') + allow(logger).to receive(:log) end - it 'should raise an error' do - expect{subject._clone_vm(pool_object,provider)}.to raise_error(/Please provide a full path to the template/) - end - end + it 'should not create a cloning VM' do + expect(redis.scard("vmpooler__pending__#{pool}")).to eq(0) - # Note - It is impossible to get into the following code branch - # ... - # if vm['template'].length == 0 - # fail "Unable to find template '#{vm['template']}'!" - # end - # ... + expect{subject._clone_vm(pool_object,provider)}.to raise_error(/MockError/) - context "Template name does not match pool name (Implementation Bug)" do - let (:template_name) { 'template_vm' } - - # The implementaion of _clone_vm incorrectly uses the VM Template name instead of the pool name. The VM Template represents the - # name of the VM to clone in vSphere whereas pool is the name of the pool in Pooler. The tests below document the behaviour of - # _clone_vm if the Template and Pool name differ. It is expected that these test will fail once this bug is removed. - - context 'a valid template' do - before(:each) do - expect(template_folder_object).to receive(:find).with(template_name).and_return(template_vm_object) - expect(provider).to receive(:find_folder).with('template').and_return(template_folder_object) - end - - context 'with no errors during cloning' do - before(:each) do - expect(provider).to receive(:find_least_used_host).with(target).and_return('least_used_host') - expect(provider).to receive(:find_datastore).with(datastore).and_return('datastore') - expect(provider).to receive(:find_folder).with('vmfolder').and_return(folder_object) - expect(template_vm_object).to receive(:CloneVM_Task).and_return(clone_task) - expect(clone_task).to receive(:wait_for_completion) - expect(metrics).to receive(:timing).with(/clone\./,/0/) - end - - it 'should create a cloning VM' do - expect(logger).to receive(:log).at_least(:once) - expect(redis.scard("vmpooler__pending__#{pool}")).to eq(0) - - subject._clone_vm(pool_object,provider) - - expect(redis.scard("vmpooler__pending__#{template_name}")).to eq(1) - # Get the new VM Name from the pending pool queue as it should be the only entry - vm_name = redis.smembers("vmpooler__pending__#{template_name}")[0] - expect(redis.hget("vmpooler__vm__#{vm_name}", 'clone')).to_not be_nil - expect(redis.hget("vmpooler__vm__#{vm_name}", 'template')).to eq(template_name) - expect(redis.hget("vmpooler__clone__#{Date.today.to_s}", "#{template_name}:#{vm_name}")).to_not be_nil - expect(redis.hget("vmpooler__vm__#{vm_name}", 'clone_time')).to_not be_nil - end - - it 'should log a message that is being cloned from a template' do - expect(logger).to receive(:log).with('d',/\[ \] \[#{template_name}\] '(.+)' is being cloned from '#{template_name}'/) - allow(logger).to receive(:log) - - subject._clone_vm(pool_object,provider) - end - - it 'should log a message that it completed being cloned' do - expect(logger).to receive(:log).with('s',/\[\+\] \[#{template_name}\] '(.+)' cloned from '#{template_name}' in [0-9.]+ seconds/) - allow(logger).to receive(:log) - - subject._clone_vm(pool_object,provider) - end - end - - # An error can be cause by the following configuration errors: - # - Missing or invalid datastore - # - Missing or invalid clone target - # also any runtime errors during the cloning process - # https://www.vmware.com/support/developer/converter-sdk/conv50_apireference/vim.VirtualMachine.html#clone - context 'with an error during cloning' do - before(:each) do - expect(provider).to receive(:find_least_used_host).with(target).and_return('least_used_host') - expect(provider).to receive(:find_datastore).with(datastore).and_return(nil) - expect(provider).to receive(:find_folder).with('vmfolder').and_return(folder_object) - expect(template_vm_object).to receive(:CloneVM_Task).and_return(clone_task) - expect(clone_task).to receive(:wait_for_completion).and_raise(RuntimeError,'SomeError') - expect(metrics).to receive(:timing).with(/clone\./,/0/).exactly(0).times - - end - - it 'should raise an error within the Thread' do - expect(logger).to receive(:log).at_least(:once) - expect{subject._clone_vm(pool_object,provider)}.to raise_error(/SomeError/) - end - - it 'should log a message that is being cloned from a template' do - expect(logger).to receive(:log).with('d',/\[ \] \[#{template_name}\] '(.+)' is being cloned from '#{template_name}'/) - allow(logger).to receive(:log) - - # Swallow the error - begin - subject._clone_vm(pool_object,provider) - rescue - end - end - - it 'should log messages that the clone failed' do - expect(logger).to receive(:log).with('s', /\[!\] \[#{template_name}\] '(.+)' clone failed with an error: SomeError/) - allow(logger).to receive(:log) - - # Swallow the error - begin - subject._clone_vm(pool_object,provider) - rescue - end - end - end - end - end - - context 'a valid template' do - before(:each) do - expect(template_folder_object).to receive(:find).with(template_name).and_return(template_vm_object) - expect(provider).to receive(:find_folder).with('template').and_return(template_folder_object) + expect(redis.scard("vmpooler__pending__#{pool}")).to eq(0) + # Get the new VM Name from the pending pool queue as it should be the only entry + vm_name = redis.smembers("vmpooler__pending__#{pool}")[0] + expect(vm_name).to be_nil end - context 'with no errors during cloning' do - before(:each) do - expect(provider).to receive(:find_least_used_host).with(target).and_return('least_used_host') - expect(provider).to receive(:find_datastore).with(datastore).and_return('datastore') - expect(provider).to receive(:find_folder).with('vmfolder').and_return(folder_object) - expect(template_vm_object).to receive(:CloneVM_Task).and_return(clone_task) - expect(clone_task).to receive(:wait_for_completion) - expect(metrics).to receive(:timing).with(/clone\./,/0/) - end - - it 'should create a cloning VM' do - expect(logger).to receive(:log).at_least(:once) - expect(redis.scard("vmpooler__pending__#{pool}")).to eq(0) - - subject._clone_vm(pool_object,provider) - - expect(redis.scard("vmpooler__pending__#{pool}")).to eq(1) - # Get the new VM Name from the pending pool queue as it should be the only entry - vm_name = redis.smembers("vmpooler__pending__#{pool}")[0] - expect(redis.hget("vmpooler__vm__#{vm_name}", 'clone')).to_not be_nil - expect(redis.hget("vmpooler__vm__#{vm_name}", 'template')).to eq(template_name) - expect(redis.hget("vmpooler__clone__#{Date.today.to_s}", "#{pool}:#{vm_name}")).to_not be_nil - expect(redis.hget("vmpooler__vm__#{vm_name}", 'clone_time')).to_not be_nil - end - - it 'should decrement the clone tasks counter' do - redis.incr('vmpooler__tasks__clone') - redis.incr('vmpooler__tasks__clone') - expect(redis.get('vmpooler__tasks__clone')).to eq('2') - subject._clone_vm(pool_object,provider) - expect(redis.get('vmpooler__tasks__clone')).to eq('1') - end - - it 'should log a message that is being cloned from a template' do - expect(logger).to receive(:log).with('d',/\[ \] \[#{pool}\] '(.+)' is being cloned from '#{template_name}'/) - allow(logger).to receive(:log) - - subject._clone_vm(pool_object,provider) - end - - it 'should log a message that it completed being cloned' do - expect(logger).to receive(:log).with('s',/\[\+\] \[#{pool}\] '(.+)' cloned from '#{template_name}' in [0-9.]+ seconds/) - allow(logger).to receive(:log) - - subject._clone_vm(pool_object,provider) - end + it 'should decrement the clone tasks counter' do + redis.incr('vmpooler__tasks__clone') + redis.incr('vmpooler__tasks__clone') + expect(redis.get('vmpooler__tasks__clone')).to eq('2') + expect{subject._clone_vm(pool_object,provider)}.to raise_error(/MockError/) + expect(redis.get('vmpooler__tasks__clone')).to eq('1') end - # An error can be cause by the following configuration errors: - # - Missing or invalid datastore - # - Missing or invalid clone target - # also any runtime errors during the cloning process - # https://www.vmware.com/support/developer/converter-sdk/conv50_apireference/vim.VirtualMachine.html#clone - context 'with an error during cloning' do - before(:each) do - expect(provider).to receive(:find_least_used_host).with(target).and_return('least_used_host') - expect(provider).to receive(:find_datastore).with(datastore).and_return(nil) - expect(provider).to receive(:find_folder).with('vmfolder').and_return(folder_object) - expect(template_vm_object).to receive(:CloneVM_Task).and_return(clone_task) - expect(clone_task).to receive(:wait_for_completion).and_raise(RuntimeError,'SomeError') - expect(metrics).to receive(:timing).with(/clone\./,/0/).exactly(0).times + it 'should log messages that the clone failed' do + expect(logger).to receive(:log).with('s', /\[!\] \[#{pool}\] '(.+)' clone failed with an error: MockError/) - end - - it 'should raise an error within the Thread' do - expect(logger).to receive(:log).at_least(:once) - expect{subject._clone_vm(pool_object,provider)}.to raise_error(/SomeError/) - end - - it 'should log a message that is being cloned from a template' do - expect(logger).to receive(:log).with('d',/\[ \] \[#{pool}\] '(.+)' is being cloned from '#{template_name}'/) - allow(logger).to receive(:log) - - # Swallow the error - begin - subject._clone_vm(pool_object,provider) - rescue - end - end - - it 'should log messages that the clone failed' do - expect(logger).to receive(:log).with('s', /\[!\] \[#{pool}\] '(.+)' clone failed with an error: SomeError/) - allow(logger).to receive(:log) - - # Swallow the error - begin - subject._clone_vm(pool_object,provider) - rescue - end - end + expect{subject._clone_vm(pool_object,provider)}.to raise_error(/MockError/) end end end From 64bca33d456669884cd44bfb9261a4ec811ed5e4 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 13:59:59 -0700 Subject: [PATCH 10/32] (POOLER-70) Update destroy_vm for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the pool_manager to use the VM provider methods instead - Splits the destroy_vm function into two. One function spawns the thread while the other actually does the work. This makes testing much easier. --- lib/vmpooler/pool_manager.rb | 48 +++++------ spec/unit/pool_manager_spec.rb | 153 +++++++++++++-------------------- 2 files changed, 81 insertions(+), 120 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 5066dd6..ae5c510 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -229,36 +229,32 @@ module Vmpooler # Destroy a VM def destroy_vm(vm, pool, provider) Thread.new do - $redis.srem('vmpooler__completed__' + pool, vm) - $redis.hdel('vmpooler__active__' + pool, vm) - $redis.hset('vmpooler__vm__' + vm, 'destroy', Time.now) - - # Auto-expire metadata key - $redis.expire('vmpooler__vm__' + vm, ($config[:redis]['data_ttl'].to_i * 60 * 60)) - - host = provider.find_vm(vm) - - if host - start = Time.now - - if - (host.runtime) && - (host.runtime.powerState) && - (host.runtime.powerState == 'poweredOn') - - $logger.log('d', "[ ] [#{pool}] '#{vm}' is being shut down") - host.PowerOffVM_Task.wait_for_completion - end - - host.Destroy_Task.wait_for_completion - finish = '%.2f' % (Time.now - start) - - $logger.log('s', "[-] [#{pool}] '#{vm}' destroyed in #{finish} seconds") - $metrics.timing("destroy.#{pool}", finish) + begin + _destroy_vm(vm, pool, provider) + rescue => err + $logger.log('d', "[!] [#{pool}] '#{vm}' failed while destroying the VM with an error: #{err}") + raise end end end + def _destroy_vm(vm, pool, provider) + $redis.srem('vmpooler__completed__' + pool, vm) + $redis.hdel('vmpooler__active__' + pool, vm) + $redis.hset('vmpooler__vm__' + vm, 'destroy', Time.now) + + # Auto-expire metadata key + $redis.expire('vmpooler__vm__' + vm, ($config[:redis]['data_ttl'].to_i * 60 * 60)) + + start = Time.now + + provider.destroy_vm(pool, vm) + + finish = '%.2f' % (Time.now - start) + $logger.log('s', "[-] [#{pool}] '#{vm}' destroyed in #{finish} seconds") + $metrics.timing("destroy.#{pool}", finish) + end + def create_vm_disk(vm, disk_size, provider) Thread.new do _create_vm_disk(vm, disk_size, provider) diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index 6c4af1a..13c0b3f 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -617,140 +617,105 @@ EOT end end - describe "#destroy_vm" do - let (:provider) { double('provider') } - - let(:config) { - YAML.load(<<-EOT ---- -:redis: - data_ttl: 168 -EOT - ) - } - + describe '#destroy_vm' do before do expect(subject).not_to be_nil + expect(Thread).to receive(:new).and_yield end + it 'calls _destroy_vm' do + expect(subject).to receive(:_destroy_vm).with(vm,pool,provider) + + subject.destroy_vm(vm,pool,provider) + end + + it 'logs a message if an error is raised' do + allow(logger).to receive(:log) + expect(logger).to receive(:log).with('d',"[!] [#{pool}] '#{vm}' failed while destroying the VM with an error: MockError") + expect(subject).to receive(:_destroy_vm).with(vm,pool,provider).and_raise('MockError') + + expect{subject.destroy_vm(vm,pool,provider)}.to raise_error(/MockError/) + end + end + + describe "#_destroy_vm" do before(:each) do - expect(Thread).to receive(:new).and_yield + expect(subject).not_to be_nil create_completed_vm(vm,pool,true) + + allow(provider).to receive(:destroy_vm).with(pool,vm).and_return(true) + + # Set redis configuration + config[:redis] = {} + config[:redis]['data_ttl'] = 168 end context 'when redis data_ttl is not specified in the configuration' do - let(:config) { - YAML.load(<<-EOT ---- -:redis: - "key": "value" -EOT - ) - } - before(:each) do - expect(provider).to receive(:find_vm).and_return(nil) + config[:redis]['data_ttl'] = nil end it 'should call redis expire with 0' do expect(redis.hget("vmpooler__vm__#{vm}", 'checkout')).to_not be_nil - subject.destroy_vm(vm,pool,provider) + subject._destroy_vm(vm,pool,provider) expect(redis.hget("vmpooler__vm__#{vm}", 'checkout')).to be_nil end end context 'when there is no redis section in the configuration' do - let(:config) {} + before(:each) do + config[:redis] = nil + end it 'should raise an error' do - expect{ subject.destroy_vm(vm,pool,provider) }.to raise_error(NoMethodError) + expect{ subject._destroy_vm(vm,pool,provider) }.to raise_error(NoMethodError) end end context 'when a VM does not exist' do before(:each) do - expect(provider).to receive(:find_vm).and_return(nil) + # As per base_spec, destroy_vm will return true if the VM does not exist + expect(provider).to receive(:destroy_vm).with(pool,vm).and_return(true) end - it 'should not call any provider methods' do - subject.destroy_vm(vm,pool,provider) + it 'should not raise an error' do + subject._destroy_vm(vm,pool,provider) end end - context 'when a VM exists' do - let (:destroy_task) { double('destroy_task') } - let (:poweroff_task) { double('poweroff_task') } + context 'when the VM is destroyed without error' do + it 'should log a message the VM was destroyed' do + expect(logger).to receive(:log).with('s', /\[-\] \[#{pool}\] '#{vm}' destroyed in [0-9.]+ seconds/) + allow(logger).to receive(:log) + subject._destroy_vm(vm,pool,provider) + end + + it 'should emit a timing metric' do + expect(metrics).to receive(:timing).with("destroy.#{pool}", String) + + subject._destroy_vm(vm,pool,provider) + end + end + + context 'when the VM destruction raises an eror' do before(:each) do - expect(provider).to receive(:find_vm).and_return(host) - allow(host).to receive(:runtime).and_return(true) + # As per base_spec, destroy_vm will return true if the VM does not exist + expect(provider).to receive(:destroy_vm).with(pool,vm).and_raise('MockError') end - context 'and an error occurs during destroy' do - before(:each) do - allow(host).to receive_message_chain(:runtime, :powerState).and_return('poweredOff') - expect(host).to receive(:Destroy_Task).and_return(destroy_task) - expect(destroy_task).to receive(:wait_for_completion).and_raise(RuntimeError,'DestroyFailure') - expect(metrics).to receive(:timing).exactly(0).times - end + it 'should not log a message the VM was destroyed' do + expect(logger).to receive(:log).with('s', /\[-\] \[#{pool}\] '#{vm}' destroyed in [0-9.]+ seconds/).exactly(0).times + allow(logger).to receive(:log) - it 'should raise an error in the thread' do - expect { subject.destroy_vm(vm,pool,provider) }.to raise_error(/DestroyFailure/) - end + expect{ subject._destroy_vm(vm,pool,provider) }.to raise_error(/MockError/) end - context 'and an error occurs during power off' do - before(:each) do - allow(host).to receive_message_chain(:runtime, :powerState).and_return('poweredOn') - expect(host).to receive(:PowerOffVM_Task).and_return(poweroff_task) - expect(poweroff_task).to receive(:wait_for_completion).and_raise(RuntimeError,'PowerOffFailure') - expect(logger).to receive(:log).with('d', "[ ] [#{pool}] '#{vm}' is being shut down") - expect(metrics).to receive(:timing).exactly(0).times - end + it 'should not emit a timing metric' do + expect(metrics).to receive(:timing).with("destroy.#{pool}", String).exactly(0).times - it 'should raise an error in the thread' do - expect { subject.destroy_vm(vm,pool,provider) }.to raise_error(/PowerOffFailure/) - end - end - - context 'and is powered off' do - before(:each) do - allow(host).to receive_message_chain(:runtime, :powerState).and_return('poweredOff') - expect(host).to receive(:Destroy_Task).and_return(destroy_task) - expect(destroy_task).to receive(:wait_for_completion) - expect(metrics).to receive(:timing).with("destroy.#{pool}", /0/) - end - - it 'should log a message the VM was destroyed' do - expect(logger).to receive(:log).with('s', /\[-\] \[#{pool}\] '#{vm}' destroyed in [0-9.]+ seconds/) - subject.destroy_vm(vm,pool,provider) - end - end - - context 'and is powered on' do - before(:each) do - allow(host).to receive_message_chain(:runtime, :powerState).and_return('poweredOn') - expect(host).to receive(:Destroy_Task).and_return(destroy_task) - expect(host).to receive(:PowerOffVM_Task).and_return(poweroff_task) - expect(poweroff_task).to receive(:wait_for_completion) - expect(destroy_task).to receive(:wait_for_completion) - expect(metrics).to receive(:timing).with("destroy.#{pool}", /0/) - end - - it 'should log a message the VM is being shutdown' do - expect(logger).to receive(:log).with('d', "[ ] [#{pool}] '#{vm}' is being shut down") - allow(logger).to receive(:log) - - subject.destroy_vm(vm,pool,provider) - end - - it 'should log a message the VM was destroyed' do - expect(logger).to receive(:log).with('s', /\[-\] \[#{pool}\] '#{vm}' destroyed in [0-9.]+ seconds/) - allow(logger).to receive(:log) - - subject.destroy_vm(vm,pool,provider) - end + expect{ subject._destroy_vm(vm,pool,provider) }.to raise_error(/MockError/) end end end From fd54c4ca189fc4869c676988755c0a3a18bd050e Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 14:01:10 -0700 Subject: [PATCH 11/32] (POOLER-70) Update create_vm_disk for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the pool_manager to use the VM provider methods instead --- lib/vmpooler/pool_manager.rb | 50 ++++++------ spec/unit/pool_manager_spec.rb | 145 ++++++++++++++++----------------- 2 files changed, 91 insertions(+), 104 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index ae5c510..55e5e81 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -255,44 +255,40 @@ module Vmpooler $metrics.timing("destroy.#{pool}", finish) end - def create_vm_disk(vm, disk_size, provider) + def create_vm_disk(pool_name, vm, disk_size, provider) Thread.new do - _create_vm_disk(vm, disk_size, provider) + begin + _create_vm_disk(pool_name, vm, disk_size, provider) + rescue => err + $logger.log('d', "[!] [#{pool_name}] '#{vm}' failed while creating disk: #{err}") + raise + end end end - def _create_vm_disk(vm, disk_size, provider) - host = provider.find_vm(vm) + def _create_vm_disk(pool_name, vm_name, disk_size, provider) + raise("Invalid disk size of '#{disk_size}' passed") if (disk_size.nil?) || (disk_size.empty?) || (disk_size.to_i <= 0) - if (host) && ((! disk_size.nil?) && (! disk_size.empty?) && (disk_size.to_i > 0)) - $logger.log('s', "[ ] [disk_manager] '#{vm}' is attaching a #{disk_size}gb disk") + $logger.log('s', "[ ] [disk_manager] '#{vm_name}' is attaching a #{disk_size}gb disk") - start = Time.now + start = Time.now - template = $redis.hget('vmpooler__vm__' + vm, 'template') - datastore = nil + result = provider.create_disk(pool_name, vm_name, disk_size.to_i) - $config[:pools].each do |pool| - if pool['name'] == template - datastore = pool['datastore'] - end - end + finish = '%.2f' % (Time.now - start) - if ((! datastore.nil?) && (! datastore.empty?)) - provider.add_disk(host, disk_size, datastore) + if result + rdisks = $redis.hget('vmpooler__vm__' + vm_name, 'disk') + disks = rdisks ? rdisks.split(':') : [] + disks.push("+#{disk_size}gb") + $redis.hset('vmpooler__vm__' + vm_name, 'disk', disks.join(':')) - rdisks = $redis.hget('vmpooler__vm__' + vm, 'disk') - disks = rdisks ? rdisks.split(':') : [] - disks.push("+#{disk_size}gb") - $redis.hset('vmpooler__vm__' + vm, 'disk', disks.join(':')) - - finish = '%.2f' % (Time.now - start) - - $logger.log('s', "[+] [disk_manager] '#{vm}' attached #{disk_size}gb disk in #{finish} seconds") - else - $logger.log('s', "[+] [disk_manager] '#{vm}' failed to attach disk") - end + $logger.log('s', "[+] [disk_manager] '#{vm_name}' attached #{disk_size}gb disk in #{finish} seconds") + else + $logger.log('s', "[+] [disk_manager] '#{vm_name}' failed to attach disk") end + + result end def create_vm_snapshot(vm, snapshot_name, provider) diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index 13c0b3f..fe23a40 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -730,104 +730,95 @@ EOT it 'calls _create_vm_disk' do expect(Thread).to receive(:new).and_yield - expect(subject).to receive(:_create_vm_disk).with(vm, disk_size, provider) + expect(subject).to receive(:_create_vm_disk).with(pool, vm, disk_size, provider) - subject.create_vm_disk(vm, disk_size, provider) + subject.create_vm_disk(pool, vm, disk_size, provider) end end describe "#_create_vm_disk" do - let(:provider) { double('provider') } let(:disk_size) { '15' } - let(:datastore) { 'datastore0'} - let(:config) { - YAML.load(<<-EOT ---- -:pools: - - name: #{pool} - datastore: '#{datastore}' -EOT - ) - } - - before do - expect(subject).not_to be_nil - end before(:each) do - allow(provider).to receive(:find_vm).with(vm).and_return(host) + expect(subject).not_to be_nil + allow(logger).to receive(:log) + create_running_vm(pool,vm,token) end - it 'should not do anything if the VM does not exist' do - expect(provider).to receive(:find_vm).with(vm).and_return(nil) - expect(logger).to receive(:log).exactly(0).times - subject._create_vm_disk(vm, disk_size, provider) + context 'Given a VM that does not exist' do + before(:each) do + # As per base_spec, create_disk will raise if the VM does not exist + expect(provider).to receive(:create_disk).with(pool,vm,disk_size.to_i).and_raise("VM #{vm} does not exist") + end + + it 'should not update redis if the VM does not exist' do + expect(redis).to receive(:hset).exactly(0).times + expect{ subject._create_vm_disk(pool, vm, disk_size, provider) }.to raise_error(RuntimeError) + end end - it 'should not do anything if the disk size is nil' do - expect(logger).to receive(:log).exactly(0).times - subject._create_vm_disk(vm, nil, provider) + context 'Given an invalid disk size' do + [{ :description => 'is nil', :value => nil }, + { :description => 'is an empty string', :value => '' }, + { :description => 'is less than 1', :value => '0' }, + { :description => 'cannot be converted to an integer', :value => 'abc123' }, + ].each do |testcase| + it "should not attempt the create the disk if the disk size #{testcase[:description]}" do + expect(provider).to receive(:create_disk).exactly(0).times + expect{ subject._create_vm_disk(pool, vm, testcase[:value], provider) }.to raise_error(/Invalid disk size/) + end + end + + it 'should raise an error if the disk size is a Fixnum' do + expect(redis).to receive(:hset).exactly(0).times + expect{ subject._create_vm_disk(pool, vm, 10, provider) }.to raise_error(NoMethodError,/empty?/) + end end - it 'should not do anything if the disk size is empty string' do - expect(logger).to receive(:log).exactly(0).times - subject._create_vm_disk(vm, '', provider) + context 'Given a successful disk creation' do + before(:each) do + expect(provider).to receive(:create_disk).with(pool,vm,disk_size.to_i).and_return(true) + end + + it 'should log a message' do + expect(logger).to receive(:log).with('s', "[ ] [disk_manager] '#{vm}' is attaching a #{disk_size}gb disk") + expect(logger).to receive(:log).with('s', /\[\+\] \[disk_manager\] '#{vm}' attached #{disk_size}gb disk in 0.[\d]+ seconds/) + + subject._create_vm_disk(pool, vm, disk_size, provider) + end + + it 'should update redis information when attaching the first disk' do + subject._create_vm_disk(pool, vm, disk_size, provider) + expect(redis.hget("vmpooler__vm__#{vm}", 'disk')).to eq("+#{disk_size}gb") + end + + it 'should update redis information when attaching the additional disks' do + initial_disks = '+10gb:+20gb' + redis.hset("vmpooler__vm__#{vm}", 'disk', initial_disks) + + subject._create_vm_disk(pool, vm, disk_size, provider) + expect(redis.hget("vmpooler__vm__#{vm}", 'disk')).to eq("#{initial_disks}:+#{disk_size}gb") + end end - it 'should not do anything if the disk size is less than 1' do - expect(logger).to receive(:log).exactly(0).times - subject._create_vm_disk(vm, '0', provider) - end + context 'Given a failed disk creation' do + before(:each) do + expect(provider).to receive(:create_disk).with(pool,vm,disk_size.to_i).and_return(false) + end - it 'should not do anything if the disk size cannot be converted to an integer' do - expect(logger).to receive(:log).exactly(0).times - subject._create_vm_disk(vm, 'abc123', provider) - end + it 'should not update redis information' do + expect(redis).to receive(:hset).exactly(0).times - it 'should raise an error if the disk size is a Fixnum' do - expect(logger).to receive(:log).exactly(0).times - expect{ subject._create_vm_disk(vm, 10, provider) }.to raise_error(NoMethodError,/empty?/) - end + subject._create_vm_disk(pool, vm, disk_size, provider) + expect(redis.hget("vmpooler__vm__#{vm}", 'disk')).to be_nil + end - it 'should not do anything if the datastore for pool is nil' do - expect(logger).to receive(:log).with('s', "[ ] [disk_manager] '#{vm}' is attaching a #{disk_size}gb disk") - expect(logger).to receive(:log).with('s', "[+] [disk_manager] '#{vm}' failed to attach disk") - config[:pools][0]['datastore'] = nil + it 'should log a message' do + expect(logger).to receive(:log).with('s', "[+] [disk_manager] '#{vm}' failed to attach disk") - subject._create_vm_disk(vm, disk_size, provider) - end - - it 'should not do anything if the datastore for pool is empty' do - expect(logger).to receive(:log).with('s', "[ ] [disk_manager] '#{vm}' is attaching a #{disk_size}gb disk") - expect(logger).to receive(:log).with('s', "[+] [disk_manager] '#{vm}' failed to attach disk") - config[:pools][0]['datastore'] = '' - - subject._create_vm_disk(vm, disk_size, provider) - end - - it 'should attach the disk' do - expect(logger).to receive(:log).with('s', "[ ] [disk_manager] '#{vm}' is attaching a #{disk_size}gb disk") - expect(logger).to receive(:log).with('s', /\[\+\] \[disk_manager\] '#{vm}' attached #{disk_size}gb disk in 0.[\d]+ seconds/) - expect(provider).to receive(:add_disk).with(host,disk_size,datastore) - - subject._create_vm_disk(vm, disk_size, provider) - end - - it 'should update redis information when attaching the first disk' do - expect(provider).to receive(:add_disk).with(host,disk_size,datastore) - - subject._create_vm_disk(vm, disk_size, provider) - expect(redis.hget("vmpooler__vm__#{vm}", 'disk')).to eq("+#{disk_size}gb") - end - - it 'should update redis information when attaching the additional disks' do - expect(provider).to receive(:add_disk).with(host,disk_size,datastore) - initial_disks = '+10gb:+20gb' - redis.hset("vmpooler__vm__#{vm}", 'disk', initial_disks) - - subject._create_vm_disk(vm, disk_size, provider) - expect(redis.hget("vmpooler__vm__#{vm}", 'disk')).to eq("#{initial_disks}:+#{disk_size}gb") + subject._create_vm_disk(pool, vm, disk_size, provider) + end end end From a56d61c8bf2d219039fac78a0e2cc007a5fc40ac Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 14:01:54 -0700 Subject: [PATCH 12/32] (POOLER-70) Update create_vm_snapshot for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the pool_manager to use the VM provider methods instead --- lib/vmpooler/pool_manager.rb | 38 +++++++-------- spec/unit/pool_manager_spec.rb | 85 +++++++++++++++++++++++----------- 2 files changed, 77 insertions(+), 46 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 55e5e81..6f433ac 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -291,33 +291,33 @@ module Vmpooler result end - def create_vm_snapshot(vm, snapshot_name, provider) + def create_vm_snapshot(pool_name, vm, snapshot_name, provider) Thread.new do - _create_vm_snapshot(vm, snapshot_name, provider) + begin + _create_vm_snapshot(pool_name, vm, snapshot_name, provider) + rescue => err + $logger.log('d', "[!] [#{pool_name}] '#{vm}' failed while creating snapshot: #{err}") + raise + end end end - def _create_vm_snapshot(vm, snapshot_name, provider) - host = provider.find_vm(vm) + def _create_vm_snapshot(pool_name, vm_name, snapshot_name, provider) + $logger.log('s', "[ ] [snapshot_manager] 'Attempting to snapshot #{vm_name} in pool #{pool_name}") + start = Time.now - if (host) && ((! snapshot_name.nil?) && (! snapshot_name.empty?)) - $logger.log('s', "[ ] [snapshot_manager] '#{vm}' is being snapshotted") + result = provider.create_snapshot(pool_name, vm_name, snapshot_name) - start = Time.now + finish = '%.2f' % (Time.now - start) - host.CreateSnapshot_Task( - name: snapshot_name, - description: 'vmpooler', - memory: true, - quiesce: true - ).wait_for_completion - - finish = '%.2f' % (Time.now - start) - - $redis.hset('vmpooler__vm__' + vm, 'snapshot:' + snapshot_name, Time.now.to_s) - - $logger.log('s', "[+] [snapshot_manager] '#{vm}' snapshot created in #{finish} seconds") + if result + $redis.hset('vmpooler__vm__' + vm_name, 'snapshot:' + snapshot_name, Time.now.to_s) + $logger.log('s', "[+] [snapshot_manager] '#{vm_name}' snapshot created in #{finish} seconds") + else + $logger.log('s', "[+] [snapshot_manager] Failed to snapshot '#{vm_name}'") end + + result end def revert_vm_snapshot(vm, snapshot_name, provider) diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index fe23a40..0c520bd 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -823,7 +823,6 @@ EOT end describe '#create_vm_snapshot' do - let(:provider) { double('provider') } let(:snapshot_name) { 'snapshot' } before do @@ -832,54 +831,86 @@ EOT it 'calls _create_vm_snapshot' do expect(Thread).to receive(:new).and_yield - expect(subject).to receive(:_create_vm_snapshot).with(vm, snapshot_name, provider) + expect(subject).to receive(:_create_vm_snapshot).with(pool, vm, snapshot_name, provider) - subject.create_vm_snapshot(vm, snapshot_name, provider) + subject.create_vm_snapshot(pool, vm, snapshot_name, provider) end end describe '#_create_vm_snapshot' do - let(:provider) { double('provider') } let(:snapshot_name) { 'snapshot1' } - let(:snapshot_task) { double('snapshot_task') } before do expect(subject).not_to be_nil end before(:each) do - allow(provider).to receive(:find_vm).with(vm).and_return(host) - allow(snapshot_task).to receive(:wait_for_completion).and_return(nil) - allow(host).to receive(:CreateSnapshot_Task).with({:name=>snapshot_name, :description=>"vmpooler", :memory=>true, :quiesce=>true}).and_return(snapshot_task) create_running_vm(pool,vm,token) end - it 'should not do anything if the VM does not exist' do - expect(provider).to receive(:find_vm).with(vm).and_return(nil) - expect(logger).to receive(:log).exactly(0).times - subject._create_vm_snapshot(vm, snapshot_name, provider) + context 'Given a Pool that does not exist' do + let(:missing_pool) { 'missing_pool' } + + before(:each) do + expect(provider).to receive(:create_snapshot).with(missing_pool, vm, snapshot_name).and_raise("Pool #{missing_pool} not found") + end + + it 'should not update redis' do + expect(redis.hget("vmpooler__vm__#{vm}", "snapshot:#{snapshot_name}")).to be_nil + expect{ subject._create_vm_snapshot(missing_pool, vm, snapshot_name, provider) }.to raise_error("Pool #{missing_pool} not found") + expect(redis.hget("vmpooler__vm__#{vm}", "snapshot:#{snapshot_name}")).to be_nil + end end - it 'should not do anything if the snapshot name is nil' do - expect(logger).to receive(:log).exactly(0).times - subject._create_vm_snapshot(vm, nil, provider) + context 'Given a VM that does not exist' do + let(:missing_vm) { 'missing_vm' } + before(:each) do + expect(provider).to receive(:create_snapshot).with(pool, missing_vm, snapshot_name).and_raise("VM #{missing_vm} not found") + end + + it 'should not update redis' do + expect(redis.hget("vmpooler__vm__#{vm}", "snapshot:#{snapshot_name}")).to be_nil + expect{ subject._create_vm_snapshot(pool, missing_vm, snapshot_name, provider) }.to raise_error("VM #{missing_vm} not found") + expect(redis.hget("vmpooler__vm__#{vm}", "snapshot:#{snapshot_name}")).to be_nil + end end - it 'should not do anything if the snapshot name is empty string' do - expect(logger).to receive(:log).exactly(0).times - subject._create_vm_snapshot(vm, '', provider) + context 'Given a snapshot creation that succeeds' do + before(:each) do + expect(provider).to receive(:create_snapshot).with(pool, vm, snapshot_name).and_return(true) + end + + it 'should log messages' do + expect(logger).to receive(:log).with('s', "[ ] [snapshot_manager] 'Attempting to snapshot #{vm} in pool #{pool}") + expect(logger).to receive(:log).with('s', /\[\+\] \[snapshot_manager\] '#{vm}' snapshot created in 0.[\d]+ seconds/) + + subject._create_vm_snapshot(pool, vm, snapshot_name, provider) + end + + it 'should add snapshot redis information' do + expect(redis.hget("vmpooler__vm__#{vm}", "snapshot:#{snapshot_name}")).to be_nil + subject._create_vm_snapshot(pool, vm, snapshot_name, provider) + expect(redis.hget("vmpooler__vm__#{vm}", "snapshot:#{snapshot_name}")).to_not be_nil + end end - it 'should invoke provider to snapshot the VM' do - expect(logger).to receive(:log).with('s', "[ ] [snapshot_manager] '#{vm}' is being snapshotted") - expect(logger).to receive(:log).with('s', /\[\+\] \[snapshot_manager\] '#{vm}' snapshot created in 0.[\d]+ seconds/) - subject._create_vm_snapshot(vm, snapshot_name, provider) - end + context 'Given a snapshot creation that fails' do + before(:each) do + expect(provider).to receive(:create_snapshot).with(pool, vm, snapshot_name).and_return(false) + end - it 'should add snapshot redis information' do - expect(redis.hget("vmpooler__vm__#{vm}", "snapshot:#{snapshot_name}")).to be_nil - subject._create_vm_snapshot(vm, snapshot_name, provider) - expect(redis.hget("vmpooler__vm__#{vm}", "snapshot:#{snapshot_name}")).to_not be_nil + it 'should log messages' do + expect(logger).to receive(:log).with('s', "[ ] [snapshot_manager] 'Attempting to snapshot #{vm} in pool #{pool}") + expect(logger).to receive(:log).with('s', "[+] [snapshot_manager] Failed to snapshot '#{vm}'") + + subject._create_vm_snapshot(pool, vm, snapshot_name, provider) + end + + it 'should not update redis' do + expect(redis.hget("vmpooler__vm__#{vm}", "snapshot:#{snapshot_name}")).to be_nil + subject._create_vm_snapshot(pool, vm, snapshot_name, provider) + expect(redis.hget("vmpooler__vm__#{vm}", "snapshot:#{snapshot_name}")).to be_nil + end end end From c7b37dec75c031dcdcec2c925345092acd8272ef Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 14:02:39 -0700 Subject: [PATCH 13/32] (POOLER-70) Update revert_vm_snapshot for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the pool_manager to use the VM provider methods instead --- lib/vmpooler/pool_manager.rb | 34 +++++++------- spec/unit/pool_manager_spec.rb | 84 +++++++++++++++++++++++----------- 2 files changed, 75 insertions(+), 43 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 6f433ac..0c788ea 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -320,30 +320,32 @@ module Vmpooler result end - def revert_vm_snapshot(vm, snapshot_name, provider) + def revert_vm_snapshot(pool_name, vm, snapshot_name, provider) Thread.new do - _revert_vm_snapshot(vm, snapshot_name, provider) + begin + _revert_vm_snapshot(pool_name, vm, snapshot_name, provider) + rescue => err + $logger.log('d', "[!] [#{pool_name}] '#{vm}' failed while reverting snapshot: #{err}") + raise + end end end - def _revert_vm_snapshot(vm, snapshot_name, provider) - host = provider.find_vm(vm) + def _revert_vm_snapshot(pool_name, vm_name, snapshot_name, provider) + $logger.log('s', "[ ] [snapshot_manager] 'Attempting to revert #{vm_name}' in pool #{pool_name} to snapshot '#{snapshot_name}'") + start = Time.now - if host - snapshot = provider.find_snapshot(host, snapshot_name) + result = provider.revert_snapshot(pool_name, vm_name, snapshot_name) - if snapshot - $logger.log('s', "[ ] [snapshot_manager] '#{vm}' is being reverted to snapshot '#{snapshot_name}'") + finish = '%.2f' % (Time.now - start) - start = Time.now - - snapshot.RevertToSnapshot_Task.wait_for_completion - - finish = '%.2f' % (Time.now - start) - - $logger.log('s', "[<] [snapshot_manager] '#{vm}' reverted to snapshot in #{finish} seconds") - end + if result + $logger.log('s', "[+] [snapshot_manager] '#{vm_name}' reverted to snapshot '#{snapshot_name}' in #{finish} seconds") + else + $logger.log('s', "[+] [snapshot_manager] Failed to revert #{vm_name}' in pool #{pool_name} to snapshot '#{snapshot_name}'") end + + result end def check_disk_queue(maxloop = 0, loop_delay = 5) diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index 0c520bd..25da65d 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -915,58 +915,88 @@ EOT end describe '#revert_vm_snapshot' do - let(:provider) { double('provider') } let(:snapshot_name) { 'snapshot' } before do expect(subject).not_to be_nil end - it 'calls _create_vm_snapshot' do + it 'calls _revert_vm_snapshot' do expect(Thread).to receive(:new).and_yield - expect(subject).to receive(:_revert_vm_snapshot).with(vm, snapshot_name, provider) + expect(subject).to receive(:_revert_vm_snapshot).with(pool, vm, snapshot_name, provider) - subject.revert_vm_snapshot(vm, snapshot_name, provider) + subject.revert_vm_snapshot(pool, vm, snapshot_name, provider) end end describe '#_revert_vm_snapshot' do - let(:provider) { double('provider') } let(:snapshot_name) { 'snapshot1' } - let(:snapshot_object) { double('snapshot_object') } before do expect(subject).not_to be_nil end - before(:each) do - allow(provider).to receive(:find_vm).with(vm).and_return(host) - allow(snapshot_object).to receive_message_chain(:RevertToSnapshot_Task, :wait_for_completion) - allow(provider).to receive(:find_snapshot).with(host,snapshot_name).and_return(snapshot_object) + context 'Given a Pool that does not exist' do + let(:missing_pool) { 'missing_pool' } + + before(:each) do + expect(provider).to receive(:revert_snapshot).with(missing_pool, vm, snapshot_name).and_raise("Pool #{missing_pool} not found") + end + + it 'should not log a result message' do + expect(logger).to receive(:log).with('s', /\[\+\] \[snapshot_manager\] '#{vm}' reverted to snapshot '#{snapshot_name}' in 0.[\d]+ seconds/).exactly(0).times + expect(logger).to receive(:log).with('s', "[+] [snapshot_manager] Failed to revert #{vm}' in pool #{missing_pool} to snapshot '#{snapshot_name}'").exactly(0).times + + expect{ subject._revert_vm_snapshot(missing_pool, vm, snapshot_name, provider) }.to raise_error("Pool #{missing_pool} not found") + end end - it 'should not do anything if the VM does not exist' do - expect(provider).to receive(:find_vm).with(vm).and_return(nil) - expect(logger).to receive(:log).exactly(0).times - subject._revert_vm_snapshot(vm, snapshot_name, provider) + context 'Given a VM that does not exist' do + let(:missing_vm) { 'missing_vm' } + before(:each) do + expect(provider).to receive(:revert_snapshot).with(pool, missing_vm, snapshot_name).and_raise("VM #{missing_vm} not found") + end + + it 'should not log a result message' do + expect(logger).to receive(:log).with('s', /\[\+\] \[snapshot_manager\] '#{missing_vm}' reverted to snapshot '#{snapshot_name}' in 0.[\d]+ seconds/).exactly(0).times + expect(logger).to receive(:log).with('s', "[+] [snapshot_manager] Failed to revert #{missing_vm}' in pool #{pool} to snapshot '#{snapshot_name}'").exactly(0).times + + expect{ subject._revert_vm_snapshot(pool, missing_vm, snapshot_name, provider) }.to raise_error("VM #{missing_vm} not found") + end end - it 'should not do anything if the snapshot name is nil' do - expect(logger).to receive(:log).exactly(0).times - expect(provider).to receive(:find_snapshot).with(host,nil).and_return nil - subject._revert_vm_snapshot(vm, nil, provider) + context 'Given a snapshot revert that succeeds' do + before(:each) do + expect(provider).to receive(:revert_snapshot).with(pool, vm, snapshot_name).and_return(true) + end + + it 'should log success messages' do + expect(logger).to receive(:log).with('s', "[ ] [snapshot_manager] 'Attempting to revert #{vm}' in pool #{pool} to snapshot '#{snapshot_name}'") + expect(logger).to receive(:log).with('s', /\[\+\] \[snapshot_manager\] '#{vm}' reverted to snapshot '#{snapshot_name}' in 0.[\d]+ seconds/) + + subject._revert_vm_snapshot(pool, vm, snapshot_name, provider) + end + + it 'should return true' do + expect(subject._revert_vm_snapshot(pool, vm, snapshot_name, provider)).to be true + end end - it 'should not do anything if the snapshot name is empty string' do - expect(logger).to receive(:log).exactly(0).times - expect(provider).to receive(:find_snapshot).with(host,'').and_return nil - subject._revert_vm_snapshot(vm, '', provider) - end + context 'Given a snapshot creation that fails' do + before(:each) do + expect(provider).to receive(:revert_snapshot).with(pool, vm, snapshot_name).and_return(false) + end - it 'should invoke provider to revert the VM to the snapshot' do - expect(logger).to receive(:log).with('s', "[ ] [snapshot_manager] '#{vm}' is being reverted to snapshot '#{snapshot_name}'") - expect(logger).to receive(:log).with('s', /\[\<\] \[snapshot_manager\] '#{vm}' reverted to snapshot in 0\.[\d]+ seconds/) - subject._revert_vm_snapshot(vm, snapshot_name, provider) + it 'should log failure messages' do + expect(logger).to receive(:log).with('s', "[ ] [snapshot_manager] 'Attempting to revert #{vm}' in pool #{pool} to snapshot '#{snapshot_name}'") + expect(logger).to receive(:log).with('s', "[+] [snapshot_manager] Failed to revert #{vm}' in pool #{pool} to snapshot '#{snapshot_name}'") + + subject._revert_vm_snapshot(pool, vm, snapshot_name, provider) + end + + it 'should return false' do + expect(subject._revert_vm_snapshot(pool, vm, snapshot_name, provider)).to be false + end end end From c09035cfcb423aaf995e248b0b7f534a44e18960 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 14:07:08 -0700 Subject: [PATCH 14/32] (POOLER-70) Add get_pool_name_for_vm for VM Provider Previously there was no simple way to calculate which pool a VM was a member of. This commit adds a helper method which queries redis for the pool name for a given VM. --- lib/vmpooler/pool_manager.rb | 5 +++++ spec/unit/pool_manager_spec.rb | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 0c788ea..a190d2d 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -348,6 +348,11 @@ module Vmpooler result end + def get_pool_name_for_vm(vm_name) + # the 'template' is a bad name. Should really be 'poolname' + $redis.hget('vmpooler__vm__' + vm_name, 'template') + end + def check_disk_queue(maxloop = 0, loop_delay = 5) $logger.log('d', "[*] [disk_manager] starting worker thread") diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index 25da65d..df0780b 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -1000,6 +1000,24 @@ EOT end end + describe '#get_pool_name_for_vm' do + context 'Given a valid VM' do + before(:each) do + create_running_vm(pool, vm, token) + end + + it 'should return the pool name' do + expect(subject.get_pool_name_for_vm(vm)).to eq(pool) + end + end + + context 'Given an invalid VM' do + it 'should return nil' do + expect(subject.get_pool_name_for_vm('does_not_exist')).to be_nil + end + end + end + describe '#check_disk_queue' do let(:threads) {[]} From e01b96c6d0c6e17b5b1d0830d092c5fc374a37b1 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Mon, 3 Apr 2017 15:33:10 -0700 Subject: [PATCH 15/32] (POOLER-70) Add get_provider_for_pool for VM Provider This commit adds a helper method which retrieves the associated Provider object for a pool by name --- lib/vmpooler/pool_manager.rb | 11 +++++ spec/unit/pool_manager_spec.rb | 87 ++++++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index a190d2d..a1b9e3c 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -353,6 +353,17 @@ module Vmpooler $redis.hget('vmpooler__vm__' + vm_name, 'template') end + def get_provider_for_pool(pool_name) + provider_name = nil + $config[:pools].each do |pool| + next unless pool['name'] == pool_name + provider_name = pool['provider'] + end + return nil if provider_name.nil? + + $providers[provider_name] + end + def check_disk_queue(maxloop = 0, loop_delay = 5) $logger.log('d', "[*] [disk_manager] starting worker thread") diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index df0780b..fc328e4 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -1018,6 +1018,93 @@ EOT end end + describe '#get_provider_for_pool' do + let(:provider_name) { 'mock_provider' } + + before do + expect(subject).not_to be_nil + # Inject mock provider into global variable - Note this is a code smell + $providers = { provider_name => provider } + end + + after(:each) do + # Reset the global variable - Note this is a code smell + $providers = nil + end + + context 'Given a pool name which does not exist' do + let(:config) { YAML.load(<<-EOT +--- +:config: +:providers: + :mock: +:pools: + - name: '#{pool}' + size: 1 +EOT + )} + + it 'should return nil' do + expect(subject.get_provider_for_pool('pool_does_not_exist')).to be_nil + end + end + + context 'Given a pool which does not have a provider' do + let(:config) { YAML.load(<<-EOT +--- +:config: +:providers: + :mock: +:pools: + - name: '#{pool}' + size: 1 +EOT + )} + + it 'should return nil' do + expect(subject.get_provider_for_pool(pool)).to be_nil + end + end + + context 'Given a pool which uses an invalid provider' do + let(:config) { YAML.load(<<-EOT +--- +:config: +:providers: + :mock: +:pools: + - name: '#{pool}' + size: 1 + provider: 'does_not_exist' +EOT + )} + + it 'should return nil' do + expect(subject.get_provider_for_pool(pool)).to be_nil + end + end + + context 'Given a pool which uses a valid provider' do + let(:config) { YAML.load(<<-EOT +--- +:config: +:providers: + :mock: +:pools: + - name: '#{pool}' + size: 1 + provider: #{provider_name} +EOT + )} + + it 'should return a provider object' do + result = subject.get_provider_for_pool(pool) + expect(result).to_not be_nil + expect(result.name).to eq(provider_name) + end + end + end + describe '#check_disk_queue' do let(:threads) {[]} From 41f9d7b3c4a9db3614ec9b7f937f5923c30aecf6 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 14:06:25 -0700 Subject: [PATCH 16/32] (POOLER-70) Update check_disk_queue for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the pool_manager to use the VM provider methods instead --- lib/vmpooler/pool_manager.rb | 24 ++++++----- spec/unit/pool_manager_spec.rb | 74 +++++++++++++++++++++++++--------- 2 files changed, 69 insertions(+), 29 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index a1b9e3c..22e4283 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -367,11 +367,10 @@ module Vmpooler def check_disk_queue(maxloop = 0, loop_delay = 5) $logger.log('d', "[*] [disk_manager] starting worker thread") - $providers['disk_manager'] ||= Vmpooler::VsphereHelper.new $config, $metrics $threads['disk_manager'] = Thread.new do loop_count = 1 loop do - _check_disk_queue $providers['disk_manager'] + _check_disk_queue sleep(loop_delay) unless maxloop.zero? @@ -382,15 +381,20 @@ module Vmpooler end end - def _check_disk_queue(provider) - vm = $redis.spop('vmpooler__tasks__disk') - - unless vm.nil? + def _check_disk_queue + task_detail = $redis.spop('vmpooler__tasks__disk') + unless task_detail.nil? begin - vm_name, disk_size = vm.split(':') - create_vm_disk(vm_name, disk_size, provider) - rescue - $logger.log('s', "[!] [disk_manager] disk creation appears to have failed") + vm_name, disk_size = task_detail.split(':') + pool_name = get_pool_name_for_vm(vm_name) + raise("Unable to determine which pool #{vm_name} is a member of") if pool_name.nil? + + provider = get_provider_for_pool(pool_name) + raise("Missing Provider for vm #{vm_name} in pool #{pool_name}") if provider.nil? + + create_vm_disk(pool_name, vm_name, disk_size, provider) + rescue => err + $logger.log('s', "[!] [disk_manager] disk creation appears to have failed: #{err}") end end end diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index fc328e4..3bd85dc 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -1129,7 +1129,7 @@ EOT end it 'should call _check_disk_queue' do - expect(subject).to receive(:_check_disk_queue).with(Vmpooler::VsphereHelper) + expect(subject).to receive(:_check_disk_queue).with(no_args) subject.check_disk_queue(1,0) end @@ -1145,12 +1145,9 @@ EOT end it 'when a non-default loop delay is specified' do - start_time = Time.now - subject.check_disk_queue(maxloop,loop_delay) - finish_time = Time.now + expect(subject).to receive(:sleep).with(loop_delay).exactly(maxloop).times - # Use a generous delta to take into account various CPU load etc. - expect(finish_time - start_time).to be_within(0.75).of(maxloop * loop_delay) + subject.check_disk_queue(maxloop,loop_delay) end end @@ -1164,7 +1161,7 @@ EOT end it 'should call _check_disk_queue 5 times' do - expect(subject).to receive(:_check_disk_queue).with(Vmpooler::VsphereHelper).exactly(maxloop).times + expect(subject).to receive(:_check_disk_queue).with(no_args).exactly(maxloop).times subject.check_disk_queue(maxloop,0) end @@ -1172,8 +1169,6 @@ EOT end describe '#_check_disk_queue' do - let(:provider) { double('provider') } - before do expect(subject).not_to be_nil end @@ -1181,31 +1176,72 @@ EOT context 'when no VMs in the queue' do it 'should not call create_vm_disk' do expect(subject).to receive(:create_vm_disk).exactly(0).times - subject._check_disk_queue(provider) + subject._check_disk_queue + end + end + + context 'when VM in the queue does not exist' do + before(:each) do + disk_task_vm(vm,"snapshot_#{vm}") + end + + it 'should log an error' do + expect(logger).to receive(:log).with('s', /Unable to determine which pool #{vm} is a member of/) + + subject._check_disk_queue + end + + it 'should not call create_vm_disk' do + expect(subject).to receive(:create_vm_disk).exactly(0).times + + subject._check_disk_queue + end + end + + context 'when specified provider does not exist' do + before(:each) do + disk_task_vm(vm,"snapshot_#{vm}") + create_running_vm(pool, vm, token) + expect(subject).to receive(:get_provider_for_pool).and_return(nil) + end + + it 'should log an error' do + expect(logger).to receive(:log).with('s', /Missing Provider for/) + + subject._check_disk_queue + end + + it 'should not call create_vm_disk' do + expect(subject).to receive(:create_vm_disk).exactly(0).times + + subject._check_disk_queue end end context 'when multiple VMs in the queue' do before(:each) do - disk_task_vm('vm1',1) - disk_task_vm('vm2',2) - disk_task_vm('vm3',3) + ['vm1', 'vm2', 'vm3'].each do |vm_name| + disk_task_vm(vm_name,"snapshot_#{vm_name}") + create_running_vm(pool, vm_name, token) + end + + allow(subject).to receive(:get_provider_for_pool).with(pool).and_return(provider) end it 'should call create_vm_disk once' do expect(subject).to receive(:create_vm_disk).exactly(1).times - subject._check_disk_queue(provider) + subject._check_disk_queue end - it 'should snapshot the first VM in the queue' do - expect(subject).to receive(:create_vm_disk).with('vm1','1',provider) - subject._check_disk_queue(provider) + it 'should create the disk for the first VM in the queue' do + expect(subject).to receive(:create_vm_disk).with(pool,'vm1','snapshot_vm1',provider) + subject._check_disk_queue end it 'should log an error if one occurs' do expect(subject).to receive(:create_vm_disk).and_raise(RuntimeError,'MockError') - expect(logger).to receive(:log).with('s', "[!] [disk_manager] disk creation appears to have failed") - subject._check_disk_queue(provider) + expect(logger).to receive(:log).with('s', "[!] [disk_manager] disk creation appears to have failed: MockError") + subject._check_disk_queue end end end From acf32a3f7b97d5cceb32b9f91cb4937561b4e7e6 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 14:19:24 -0700 Subject: [PATCH 17/32] (POOLER-70) Update check_snapshot_queue for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the pool_manager to use the VM provider methods instead --- lib/vmpooler/pool_manager.rb | 42 +++++---- spec/unit/pool_manager_spec.rb | 156 +++++++++++++++++++++++---------- 2 files changed, 134 insertions(+), 64 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 22e4283..3a58e03 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -402,12 +402,10 @@ module Vmpooler def check_snapshot_queue(maxloop = 0, loop_delay = 5) $logger.log('d', "[*] [snapshot_manager] starting worker thread") - $providers['snapshot_manager'] ||= Vmpooler::VsphereHelper.new $config, $metrics - $threads['snapshot_manager'] = Thread.new do loop_count = 1 loop do - _check_snapshot_queue $providers['snapshot_manager'] + _check_snapshot_queue sleep(loop_delay) unless maxloop.zero? @@ -418,26 +416,38 @@ module Vmpooler end end - def _check_snapshot_queue(provider) - vm = $redis.spop('vmpooler__tasks__snapshot') + def _check_snapshot_queue + task_detail = $redis.spop('vmpooler__tasks__snapshot') - unless vm.nil? + unless task_detail.nil? begin - vm_name, snapshot_name = vm.split(':') - create_vm_snapshot(vm_name, snapshot_name, provider) - rescue - $logger.log('s', "[!] [snapshot_manager] snapshot appears to have failed") + vm_name, snapshot_name = task_detail.split(':') + pool_name = get_pool_name_for_vm(vm_name) + raise("Unable to determine which pool #{vm_name} is a member of") if pool_name.nil? + + provider = get_provider_for_pool(pool_name) + raise("Missing Provider for vm #{vm_name} in pool #{pool_name}") if provider.nil? + + create_vm_snapshot(pool_name, vm_name, snapshot_name, provider) + rescue => err + $logger.log('s', "[!] [snapshot_manager] snapshot create appears to have failed: #{err}") end end - vm = $redis.spop('vmpooler__tasks__snapshot-revert') + task_detail = $redis.spop('vmpooler__tasks__snapshot-revert') - unless vm.nil? + unless task_detail.nil? begin - vm_name, snapshot_name = vm.split(':') - revert_vm_snapshot(vm_name, snapshot_name, provider) - rescue - $logger.log('s', "[!] [snapshot_manager] snapshot revert appears to have failed") + vm_name, snapshot_name = task_detail.split(':') + pool_name = get_pool_name_for_vm(vm_name) + raise("Unable to determine which pool #{vm_name} is a member of") if pool_name.nil? + + provider = get_provider_for_pool(pool_name) + raise("Missing Provider for vm #{vm_name} in pool #{pool_name}") if provider.nil? + + revert_vm_snapshot(pool_name, vm_name, snapshot_name, provider) + rescue => err + $logger.log('s', "[!] [snapshot_manager] snapshot revert appears to have failed: #{err}") end end end diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index 3bd85dc..dd633de 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -1251,10 +1251,10 @@ EOT before(:each) do expect(Thread).to receive(:new).and_yield - allow(subject).to receive(:_check_snapshot_queue) + allow(subject).to receive(:_check_snapshot_queue).with(no_args) end - it 'should log the disk manager is starting' do + it 'should log the snapshot manager is starting' do expect(logger).to receive(:log).with('d', "[*] [snapshot_manager] starting worker thread") expect($threads.count).to be(0) @@ -1270,7 +1270,7 @@ EOT end it 'should call _check_snapshot_queue' do - expect(subject).to receive(:_check_snapshot_queue).with(Vmpooler::VsphereHelper) + expect(subject).to receive(:_check_snapshot_queue).with(no_args) subject.check_snapshot_queue(1,0) end @@ -1286,12 +1286,9 @@ EOT end it 'when a non-default loop delay is specified' do - start_time = Time.now - subject.check_snapshot_queue(maxloop,loop_delay) - finish_time = Time.now + expect(subject).to receive(:sleep).with(loop_delay).exactly(maxloop).times - # Use a generous delta to take into account various CPU load etc. - expect(finish_time - start_time).to be_within(0.75).of(maxloop * loop_delay) + subject.check_snapshot_queue(maxloop,loop_delay) end end @@ -1305,7 +1302,7 @@ EOT end it 'should call _check_snapshot_queue 5 times' do - expect(subject).to receive(:_check_snapshot_queue).with(Vmpooler::VsphereHelper).exactly(maxloop).times + expect(subject).to receive(:_check_snapshot_queue).with(no_args).exactly(maxloop).times subject.check_snapshot_queue(maxloop,0) end @@ -1313,8 +1310,6 @@ EOT end describe '#_check_snapshot_queue' do - let(:provider) { double('provider') } - before do expect(subject).not_to be_nil end @@ -1323,64 +1318,146 @@ EOT context 'when no VMs in the queue' do it 'should not call create_vm_snapshot' do expect(subject).to receive(:create_vm_snapshot).exactly(0).times - subject._check_snapshot_queue(provider) + subject._check_snapshot_queue + end + end + + context 'when VM in the queue does not exist' do + before(:each) do + snapshot_vm(vm,"snapshot_#{vm}") + end + + it 'should log an error' do + expect(logger).to receive(:log).with('s', /Unable to determine which pool #{vm} is a member of/) + + subject._check_snapshot_queue + end + + it 'should not call create_vm_snapshot' do + expect(subject).to receive(:create_vm_snapshot).exactly(0).times + + subject._check_snapshot_queue + end + end + + context 'when specified provider does not exist' do + before(:each) do + snapshot_vm(vm,"snapshot_#{vm}") + create_running_vm(pool, vm, token) + expect(subject).to receive(:get_provider_for_pool).and_return(nil) + end + + it 'should log an error' do + expect(logger).to receive(:log).with('s', /Missing Provider for/) + + subject._check_snapshot_queue + end + + it 'should not call create_vm_snapshot' do + expect(subject).to receive(:create_vm_snapshot).exactly(0).times + + subject._check_snapshot_queue end end context 'when multiple VMs in the queue' do before(:each) do - snapshot_vm('vm1','snapshot1') - snapshot_vm('vm2','snapshot2') - snapshot_vm('vm3','snapshot3') + ['vm1', 'vm2', 'vm3'].each do |vm_name| + snapshot_vm(vm_name,"snapshot_#{vm_name}") + create_running_vm(pool, vm_name, token) + end + + allow(subject).to receive(:get_provider_for_pool).with(pool).and_return(provider) end it 'should call create_vm_snapshot once' do expect(subject).to receive(:create_vm_snapshot).exactly(1).times - subject._check_snapshot_queue(provider) + subject._check_snapshot_queue end it 'should snapshot the first VM in the queue' do - expect(subject).to receive(:create_vm_snapshot).with('vm1','snapshot1',provider) - subject._check_snapshot_queue(provider) + expect(subject).to receive(:create_vm_snapshot).with(pool,'vm1','snapshot_vm1',provider) + subject._check_snapshot_queue end it 'should log an error if one occurs' do expect(subject).to receive(:create_vm_snapshot).and_raise(RuntimeError,'MockError') - expect(logger).to receive(:log).with('s', "[!] [snapshot_manager] snapshot appears to have failed") - subject._check_snapshot_queue(provider) + expect(logger).to receive(:log).with('s', "[!] [snapshot_manager] snapshot create appears to have failed: MockError") + subject._check_snapshot_queue end end end - context 'revert_vm_snapshot queue' do + context 'vmpooler__tasks__snapshot-revert queue' do context 'when no VMs in the queue' do it 'should not call revert_vm_snapshot' do expect(subject).to receive(:revert_vm_snapshot).exactly(0).times - subject._check_snapshot_queue(provider) + subject._check_snapshot_queue + end + end + + context 'when VM in the queue does not exist' do + before(:each) do + snapshot_revert_vm(vm,"snapshot_#{vm}") + end + + it 'should log an error' do + expect(logger).to receive(:log).with('s', /Unable to determine which pool #{vm} is a member of/) + + subject._check_snapshot_queue + end + + it 'should not call revert_vm_snapshot' do + expect(subject).to receive(:revert_vm_snapshot).exactly(0).times + + subject._check_snapshot_queue + end + end + + context 'when specified provider does not exist' do + before(:each) do + snapshot_revert_vm(vm,"snapshot_#{vm}") + create_running_vm(pool, vm, token) + expect(subject).to receive(:get_provider_for_pool).and_return(nil) + end + + it 'should log an error' do + expect(logger).to receive(:log).with('s', /Missing Provider for/) + + subject._check_snapshot_queue + end + + it 'should not call revert_vm_snapshot' do + expect(subject).to receive(:revert_vm_snapshot).exactly(0).times + + subject._check_snapshot_queue end end context 'when multiple VMs in the queue' do before(:each) do - snapshot_revert_vm('vm1','snapshot1') - snapshot_revert_vm('vm2','snapshot2') - snapshot_revert_vm('vm3','snapshot3') + ['vm1', 'vm2', 'vm3'].each do |vm_name| + snapshot_revert_vm(vm_name,"snapshot_#{vm_name}") + create_running_vm(pool, vm_name, token) + end + + allow(subject).to receive(:get_provider_for_pool).with(pool).and_return(provider) end it 'should call revert_vm_snapshot once' do expect(subject).to receive(:revert_vm_snapshot).exactly(1).times - subject._check_snapshot_queue(provider) + subject._check_snapshot_queue end - it 'should revert snapshot the first VM in the queue' do - expect(subject).to receive(:revert_vm_snapshot).with('vm1','snapshot1',provider) - subject._check_snapshot_queue(provider) + it 'should snapshot the first VM in the queue' do + expect(subject).to receive(:revert_vm_snapshot).with(pool,'vm1','snapshot_vm1',provider) + subject._check_snapshot_queue end it 'should log an error if one occurs' do expect(subject).to receive(:revert_vm_snapshot).and_raise(RuntimeError,'MockError') - expect(logger).to receive(:log).with('s', "[!] [snapshot_manager] snapshot revert appears to have failed") - subject._check_snapshot_queue(provider) + expect(logger).to receive(:log).with('s', "[!] [snapshot_manager] snapshot revert appears to have failed: MockError") + subject._check_snapshot_queue end end end @@ -2496,21 +2573,4 @@ EOT end end end - - describe '#_check_snapshot_queue' do - let(:pool_helper) { double('pool') } - let(:provider) { {pool => pool_helper} } - - before do - expect(subject).not_to be_nil - $provider = provider - end - - it 'checks appropriate redis queues' do - expect(redis).to receive(:spop).with('vmpooler__tasks__snapshot') - expect(redis).to receive(:spop).with('vmpooler__tasks__snapshot-revert') - - subject._check_snapshot_queue(provider) - end - end end From 2974eac37174817e6ead628c0a52ad539af04337 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 14:21:06 -0700 Subject: [PATCH 18/32] (POOLER-70) Update migrate_vm for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the pool_manager to use the VM provider methods instead --- lib/vmpooler/pool_manager.rb | 53 ++++++++++++------------- spec/unit/pool_manager_spec.rb | 70 +++++++++++++++++++--------------- 2 files changed, 67 insertions(+), 56 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 3a58e03..2111dd4 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -458,42 +458,43 @@ module Vmpooler migration_limit if migration_limit >= 1 end - def migrate_vm(vm, pool, provider) + def migrate_vm(vm_name, pool_name, provider) Thread.new do - _migrate_vm(vm, pool, provider) + begin + _migrate_vm(vm_name, pool_name, provider) + rescue => err + $logger.log('s', "[x] [#{pool_name}] '#{vm_name}' migration failed with an error: #{err}") + remove_vmpooler_migration_vm(pool_name, vm_name) + end end end - def _migrate_vm(vm, pool, provider) - begin - $redis.srem('vmpooler__migrating__' + pool, vm) - vm_object = provider.find_vm(vm) - parent_host, parent_host_name = get_vm_host_info(vm_object) - migration_limit = migration_limit $config[:config]['migration_limit'] - migration_count = $redis.scard('vmpooler__migration') + def _migrate_vm(vm_name, pool_name, provider) + $redis.srem('vmpooler__migrating__' + pool_name, vm_name) - if ! migration_limit - $logger.log('s', "[ ] [#{pool}] '#{vm}' is running on #{parent_host_name}") + parent_host_name = provider.get_vm_host(pool_name, vm_name) + raise('Unable to determine which host the VM is running on') if parent_host_name.nil? + migration_limit = migration_limit $config[:config]['migration_limit'] + migration_count = $redis.scard('vmpooler__migration') + + if ! migration_limit + $logger.log('s', "[ ] [#{pool_name}] '#{vm_name}' is running on #{parent_host_name}") + return + else + if migration_count >= migration_limit + $logger.log('s', "[ ] [#{pool_name}] '#{vm_name}' is running on #{parent_host_name}. No migration will be evaluated since the migration_limit has been reached") return else - if migration_count >= migration_limit - $logger.log('s', "[ ] [#{pool}] '#{vm}' is running on #{parent_host_name}. No migration will be evaluated since the migration_limit has been reached") - return + $redis.sadd('vmpooler__migration', vm_name) + host_name = provider.find_least_used_compatible_host(vm_name) + if host_name == parent_host_name + $logger.log('s', "[ ] [#{pool_name}] No migration required for '#{vm_name}' running on #{parent_host_name}") else - $redis.sadd('vmpooler__migration', vm) - host, host_name = provider.find_least_used_compatible_host(vm_object) - if host == parent_host - $logger.log('s', "[ ] [#{pool}] No migration required for '#{vm}' running on #{parent_host_name}") - else - finish = migrate_vm_and_record_timing(vm_object, vm, pool, host, parent_host_name, host_name, provider) - $logger.log('s', "[>] [#{pool}] '#{vm}' migrated from #{parent_host_name} to #{host_name} in #{finish} seconds") - end - remove_vmpooler_migration_vm(pool, vm) + finish = migrate_vm_and_record_timing(vm_name, pool_name, parent_host_name, host_name, provider) + $logger.log('s', "[>] [#{pool_name}] '#{vm_name}' migrated from #{parent_host_name} to #{host_name} in #{finish} seconds") end + remove_vmpooler_migration_vm(pool_name, vm_name) end - rescue => err - $logger.log('s', "[x] [#{pool}] '#{vm}' migration failed with an error: #{err}") - remove_vmpooler_migration_vm(pool, vm) end end diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index dd633de..43323a6 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -1487,60 +1487,75 @@ EOT end describe '#migrate_vm' do - let(:provider) { double('provider') } - - before do + before(:each) do expect(subject).not_to be_nil + expect(Thread).to receive(:new).and_yield end it 'calls _migrate_vm' do - expect(Thread).to receive(:new).and_yield expect(subject).to receive(:_migrate_vm).with(vm, pool, provider) subject.migrate_vm(vm, pool, provider) end + + context 'When an error is raised' do + before(:each) do + expect(subject).to receive(:_migrate_vm).with(vm, pool, provider).and_raise('MockError') + end + + it 'logs a message' do + allow(logger).to receive(:log) + expect(logger).to receive(:log).with('s', "[x] [#{pool}] '#{vm}' migration failed with an error: MockError") + + subject.migrate_vm(vm, pool, provider) + end + + it 'should attempt to remove from vmpooler_migration queue' do + expect(subject).to receive(:remove_vmpooler_migration_vm).with(pool, vm) + + subject.migrate_vm(vm, pool, provider) + end + end end describe "#_migrate_vm" do - let(:provider) { double('provider') } let(:vm_parent_hostname) { 'parent1' } let(:config) { YAML.load(<<-EOT --- :config: migration_limit: 5 -:pools: - - name: #{pool} EOT ) } - before do + before(:each) do expect(subject).not_to be_nil + allow(provider).to receive(:get_vm_host).with(pool, vm).and_return(vm_parent_hostname) end - context 'when an error occurs' do - it 'should log an error message and attempt to remove from vmpooler_migration queue' do - expect(provider).to receive(:find_vm).with(vm).and_raise(RuntimeError,'MockError') - expect(logger).to receive(:log).with('s', "[x] [#{pool}] '#{vm}' migration failed with an error: MockError") - expect(subject).to receive(:remove_vmpooler_migration_vm) - subject._migrate_vm(vm, pool, provider) + context 'when an error occurs trying to retrieve the current host' do + before(:each) do + expect(provider).to receive(:get_vm_host).with(pool, vm).and_raise(RuntimeError,'MockError') + end + + it 'should raise an error' do + expect{ subject._migrate_vm(vm, pool, provider) }.to raise_error('MockError') end end - context 'when VM does not exist' do - it 'should log an error message when VM does not exist' do - expect(provider).to receive(:find_vm).with(vm).and_return(nil) - # This test is quite fragile. Should refactor the code to make this scenario easier to detect - expect(logger).to receive(:log).with('s', "[x] [#{pool}] '#{vm}' migration failed with an error: undefined method `summary' for nil:NilClass") - subject._migrate_vm(vm, pool, provider) + context 'when the current host can not be determined' do + before(:each) do + expect(provider).to receive(:get_vm_host).with(pool, vm).and_return(nil) + end + + it 'should raise an error' do + expect{ subject._migrate_vm(vm, pool, provider) }.to raise_error(/Unable to determine which host the VM is running on/) end end context 'when VM exists but migration is disabled' do before(:each) do - expect(provider).to receive(:find_vm).with(vm).and_return(host) - allow(subject).to receive(:get_vm_host_info).with(host).and_return([{'name' => vm_parent_hostname}, vm_parent_hostname]) create_migrating_vm(vm, pool) end @@ -1564,8 +1579,6 @@ EOT context 'when VM exists but migration limit is reached' do before(:each) do - expect(provider).to receive(:find_vm).with(vm).and_return(host) - allow(subject).to receive(:get_vm_host_info).with(host).and_return([{'name' => vm_parent_hostname}, vm_parent_hostname]) create_migrating_vm(vm, pool) redis.sadd('vmpooler__migration', 'fakevm1') @@ -1589,9 +1602,6 @@ EOT context 'when VM exists but migration limit is not yet reached' do before(:each) do - expect(provider).to receive(:find_vm).with(vm).and_return(host) - allow(subject).to receive(:get_vm_host_info).with(host).and_return([{'name' => vm_parent_hostname}, vm_parent_hostname]) - create_migrating_vm(vm, pool) redis.sadd('vmpooler__migration', 'fakevm1') redis.sadd('vmpooler__migration', 'fakevm2') @@ -1599,7 +1609,7 @@ EOT context 'and host to migrate to is the same as the current host' do before(:each) do - expect(provider).to receive(:find_least_used_compatible_host).with(host).and_return([{'name' => vm_parent_hostname}, vm_parent_hostname]) + expect(provider).to receive(:find_least_used_compatible_host).with(vm).and_return(vm_parent_hostname) end it "should not migrate the VM" do @@ -1628,8 +1638,8 @@ EOT context 'and host to migrate to different to the current host' do let(:vm_new_hostname) { 'new_hostname' } before(:each) do - expect(provider).to receive(:find_least_used_compatible_host).with(host).and_return([{'name' => vm_new_hostname}, vm_new_hostname]) - expect(subject).to receive(:migrate_vm_and_record_timing).with(host, vm, pool, Object, vm_parent_hostname, vm_new_hostname, provider).and_return('1.00') + expect(provider).to receive(:find_least_used_compatible_host).with(vm).and_return(vm_new_hostname) + expect(subject).to receive(:migrate_vm_and_record_timing).with(vm, pool, vm_parent_hostname, vm_new_hostname, provider).and_return('1.00') end it "should migrate the VM" do From cf15829f05ce8f7b248ed5ddad7da60924b97f08 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 14:22:58 -0700 Subject: [PATCH 19/32] (POOLER-70) Remove get_vm_host_info from pool_manager Previously the Pool Manager would use vSphere objects directly. This commit removes get_vm_host_info as this functionality is now in the vSphere VM Provider. --- lib/vmpooler/pool_manager.rb | 5 ----- spec/unit/pool_manager_spec.rb | 16 ---------------- 2 files changed, 21 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 2111dd4..747f760 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -498,11 +498,6 @@ module Vmpooler end end - def get_vm_host_info(vm_object) - parent_host = vm_object.summary.runtime.host - [parent_host, parent_host.name] - end - def remove_vmpooler_migration_vm(pool, vm) begin $redis.srem('vmpooler__migration', vm) diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index 43323a6..78ad85e 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -1667,22 +1667,6 @@ EOT end end - describe "#get_vm_host_info" do - before do - expect(subject).not_to be_nil - end - - let(:vm_object) { double('vm_object') } - let(:parent_host) { double('parent_host') } - - it 'should return an array with host information' do - expect(vm_object).to receive_message_chain(:summary, :runtime, :host).and_return(parent_host) - expect(parent_host).to receive(:name).and_return('vmhostname') - - expect(subject.get_vm_host_info(vm_object)).to eq([parent_host,'vmhostname']) - end - end - describe "#execute!" do let(:threads) {{}} From 3f6ead8134bfab2451a0143c4bf4533f99f365e2 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 14:23:56 -0700 Subject: [PATCH 20/32] (POOLER-70) Update migrate_vm_and_record_timing for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the migrate_vm_and_record_timing method to use VM and Pool names instead of VM and Pool objects. --- lib/vmpooler/pool_manager.rb | 6 +++--- spec/unit/pool_manager_spec.rb | 20 ++++++++------------ 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 747f760..d8426f5 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -506,11 +506,11 @@ module Vmpooler end end - def migrate_vm_and_record_timing(vm_object, vm_name, pool, host, source_host_name, dest_host_name, provider) + def migrate_vm_and_record_timing(vm_name, pool_name, source_host_name, dest_host_name, provider) start = Time.now - provider.migrate_vm_host(vm_object, host) + provider.migrate_vm_to_host(pool_name, vm_name, dest_host_name) finish = '%.2f' % (Time.now - start) - $metrics.timing("migrate.#{pool}", finish) + $metrics.timing("migrate.#{pool_name}", finish) $metrics.increment("migrate_from.#{source_host_name}") $metrics.increment("migrate_to.#{dest_host_name}") checkout_to_migration = '%.2f' % (Time.now - Time.parse($redis.hget("vmpooler__vm__#{vm_name}", 'checkout'))) diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index 78ad85e..df5e317 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -1981,45 +1981,41 @@ EOT end describe '#migrate_vm_and_record_timing' do - let(:provider) { double('provider') } - let(:vm_object) { double('vm_object') } let(:source_host_name) { 'source_host' } let(:dest_host_name) { 'dest_host' } - before do - expect(subject).not_to be_nil - end - before(:each) do create_vm(vm,token) - expect(provider).to receive(:migrate_vm_host).with(vm_object, host) + expect(subject).not_to be_nil + + expect(provider).to receive(:migrate_vm_to_host).with(pool, vm, dest_host_name) end it 'should return the elapsed time for the migration' do - result = subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, provider) + result = subject.migrate_vm_and_record_timing(vm, pool, source_host_name, dest_host_name, provider) expect(result).to match(/0\.[\d]+/) end it 'should add timing metric' do expect(metrics).to receive(:timing).with("migrate.#{pool}",String) - subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, provider) + subject.migrate_vm_and_record_timing(vm, pool, source_host_name, dest_host_name, provider) end it 'should increment from_host and to_host metric' do expect(metrics).to receive(:increment).with("migrate_from.#{source_host_name}") expect(metrics).to receive(:increment).with("migrate_to.#{dest_host_name}") - subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, provider) + subject.migrate_vm_and_record_timing(vm, pool, source_host_name, dest_host_name, provider) end it 'should set migration_time metric in redis' do expect(redis.hget("vmpooler__vm__#{vm}", 'migration_time')).to be_nil - subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, provider) + subject.migrate_vm_and_record_timing(vm, pool, source_host_name, dest_host_name, provider) expect(redis.hget("vmpooler__vm__#{vm}", 'migration_time')).to match(/0\.[\d]+/) end it 'should set checkout_to_migration metric in redis' do expect(redis.hget("vmpooler__vm__#{vm}", 'checkout_to_migration')).to be_nil - subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, provider) + subject.migrate_vm_and_record_timing(vm, pool, source_host_name, dest_host_name, provider) expect(redis.hget("vmpooler__vm__#{vm}", 'checkout_to_migration')).to match(/0\.[\d]+/) end end From 7c3ad716af4ff58c55aeceb225e97dc1afbcffed Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 14:30:00 -0700 Subject: [PATCH 21/32] (POOLER-70) Update check_pool for VM Provider Previously the Pool Manager would use a single VM provider per Pool. This commit changes Pool Manager to use a single provider that services multiple pools. --- lib/vmpooler/pool_manager.rb | 24 +++++++++++++++--------- spec/unit/pool_manager_spec.rb | 26 +++++++------------------- 2 files changed, 22 insertions(+), 28 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index d8426f5..56e3074 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -522,18 +522,24 @@ module Vmpooler def check_pool(pool, maxloop = 0, loop_delay = 5) $logger.log('d', "[*] [#{pool['name']}] starting worker thread") - $providers[pool['name']] ||= Vmpooler::VsphereHelper.new $config, $metrics - $threads[pool['name']] = Thread.new do - loop_count = 1 - loop do - _check_pool(pool, $providers[pool['name']]) - sleep(loop_delay) + begin + loop_count = 1 + provider = get_provider_for_pool(pool['name']) + raise("Could not find provider '#{pool['provider']}") if provider.nil? + loop do + _check_pool(pool, provider) - unless maxloop.zero? - break if loop_count >= maxloop - loop_count += 1 + sleep(loop_delay) + + unless maxloop.zero? + break if loop_count >= maxloop + loop_count += 1 + end end + rescue => err + $logger.log('s', "[!] [#{pool['name']}] Error while checking the pool: #{err}") + raise end end end diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index df5e317..3ff9985 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -1855,48 +1855,40 @@ EOT describe "#check_pool" do let(:threads) {{}} - let(:provider) {{}} - + let(:provider_name) { 'mock_provider' } let(:config) { YAML.load(<<-EOT --- :pools: - name: #{pool} + provider: #{provider_name} EOT ) } - let(:thread) { double('thread') } let(:pool_object) { config[:pools][0] } before do expect(subject).not_to be_nil expect(Thread).to receive(:new).and_yield + allow(subject).to receive(:get_provider_for_pool).with(pool).and_return(provider) end context 'on startup' do before(:each) do - # Note the Vmpooler::VsphereHelper is not mocked - allow(subject).to receive(:_check_pool) + allow(subject).to receive(:_check_pool) expect(logger).to receive(:log).with('d', "[*] [#{pool}] starting worker thread") end after(:each) do # Reset the global variable - Note this is a code smell $threads = nil - $providers = nil end it 'should log a message the worker thread is starting' do subject.check_pool(pool_object,1,0) end - it 'should populate the providers global variable' do - subject.check_pool(pool_object,1,0) - - expect($providers[pool]).to_not be_nil - end - it 'should populate the threads global variable' do subject.check_pool(pool_object,1,0) @@ -1913,22 +1905,18 @@ EOT before(:each) do allow(logger).to receive(:log) # Note the Vmpooler::VsphereHelper is not mocked - allow(subject).to receive(:_check_pool) + allow(subject).to receive(:_check_pool) end after(:each) do # Reset the global variable - Note this is a code smell $threads = nil - $provider = nil end it 'when a non-default loop delay is specified' do - start_time = Time.now - subject.check_pool(pool_object,maxloop,loop_delay) - finish_time = Time.now + expect(subject).to receive(:sleep).with(loop_delay).exactly(maxloop).times - # Use a generous delta to take into account various CPU load etc. - expect(finish_time - start_time).to be_within(0.75).of(maxloop * loop_delay) + subject.check_pool(pool_object,maxloop,loop_delay) end end From 2ca1a39e8c0d35e8668584419c89344040830986 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 14:31:43 -0700 Subject: [PATCH 22/32] (POOLER-70) Update _check_pool for VM Provider Previously the Pool Manager would use vSphere objects directly. This commit - Modifies the pool_manager to use the VM provider methods instead - Removes the MockFindFolder class as it is no longer required - Minor update for rubocop violations --- lib/vmpooler/pool_manager.rb | 6 +-- spec/helpers.rb | 15 -------- spec/unit/pool_manager_spec.rb | 68 +++++++++++++++++++--------------- 3 files changed, 41 insertions(+), 48 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 56e3074..10c6fcf 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -548,9 +548,7 @@ module Vmpooler # INVENTORY inventory = {} begin - base = provider.find_folder(pool['folder']) - - base.childEntity.each do |vm| + provider.vms_in_pool(pool['name']).each do |vm| if (! $redis.sismember('vmpooler__running__' + pool['name'], vm['name'])) && (! $redis.sismember('vmpooler__ready__' + pool['name'], vm['name'])) && @@ -629,7 +627,7 @@ module Vmpooler # DISCOVERED begin $redis.smembers("vmpooler__discovered__#{pool['name']}").each do |vm| - %w(pending ready running completed).each do |queue| + %w[pending ready running completed].each do |queue| if $redis.sismember("vmpooler__#{queue}__#{pool['name']}", vm) $logger.log('d', "[!] [#{pool['name']}] '#{vm}' found in '#{queue}', removed from 'discovered' queue") $redis.srem("vmpooler__discovered__#{pool['name']}", vm) diff --git a/spec/helpers.rb b/spec/helpers.rb index 7fda0d9..d9b6914 100644 --- a/spec/helpers.rb +++ b/spec/helpers.rb @@ -6,21 +6,6 @@ def redis @redis end -# Mock an object which is result from Vmpooler::VsphereHelper.find_folder(foldername) -class MockFindFolder - attr_reader :childEntity - - def initialize(vmlist = []) - # Generate an array of hashes - @childEntity = vmlist.map do |vm| - vm_object = {} - vm_object['name'] = vm - - vm_object - end - end -end - # Mock an object which represents a Logger. This stops the proliferation # of allow(logger).to .... expectations in tests. class MockLogger diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index 3ff9985..bc7b5f8 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -2009,6 +2009,21 @@ EOT end describe '#_check_pool' do + let(:new_vm_response) { + # Mock response from Base Provider for vms_in_pool + [{ 'name' => new_vm}] + } + let(:vm_response) { + # Mock response from Base Provider for vms_in_pool + [{ 'name' => vm}] + } + let(:multi_vm_response) { + # Mock response from Base Provider for vms_in_pool + [{ 'name' => 'vm1'}, + { 'name' => 'vm2'}, + { 'name' => 'vm3'}] + } + # Default test fixtures will consist of; # - Empty Redis dataset # - A single pool with a pool size of zero i.e. no new VMs should be created @@ -2020,18 +2035,16 @@ EOT task_limit: 10 :pools: - name: #{pool} - folder: 'vm_folder' size: 0 EOT ) } let(:pool_object) { config[:pools][0] } - let(:provider) { double('provider') } let(:new_vm) { 'newvm'} before do expect(subject).not_to be_nil - allow(logger).to receive(:log).with("s", "[!] [#{pool}] is empty") + allow(logger).to receive(:log) end # INVENTORY @@ -2043,24 +2056,23 @@ EOT allow(subject).to receive(:check_pending_vm) allow(subject).to receive(:destroy_vm) allow(subject).to receive(:clone_vm) + allow(provider).to receive(:vms_in_pool).with(pool).and_return(new_vm_response) end it 'should log an error if one occurs' do - expect(provider).to receive(:find_folder).and_raise(RuntimeError,'Mock Error') + expect(provider).to receive(:vms_in_pool).and_raise(RuntimeError,'Mock Error') expect(logger).to receive(:log).with('s', "[!] [#{pool}] _check_pool failed with an error while inspecting inventory: Mock Error") subject._check_pool(pool_object,provider) end it 'should log the discovery of VMs' do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) expect(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue") subject._check_pool(pool_object,provider) end it 'should add undiscovered VMs to the completed queue' do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) allow(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue") expect(redis.sismember("vmpooler__discovered__#{pool}", new_vm)).to be(false) @@ -2074,8 +2086,6 @@ EOT ['running','ready','pending','completed','discovered','migrating'].each do |queue_name| it "should not discover VMs in the #{queue_name} queue" do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) - expect(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue").exactly(0).times expect(redis.sismember("vmpooler__discovered__#{pool}", new_vm)).to be(false) redis.sadd("vmpooler__#{queue_name}__#{pool}", new_vm) @@ -2095,7 +2105,7 @@ EOT # RUNNING context 'Running VM not in the inventory' do before(:each) do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(new_vm_response) expect(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue") create_running_vm(pool,vm,token) end @@ -2109,7 +2119,7 @@ EOT context 'Running VM in the inventory' do before(:each) do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(vm_response) allow(subject).to receive(:check_running_vm) create_running_vm(pool,vm,token) end @@ -2148,7 +2158,7 @@ EOT # READY context 'Ready VM not in the inventory' do before(:each) do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(new_vm_response) expect(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue") create_ready_vm(pool,vm,token) end @@ -2162,7 +2172,7 @@ EOT context 'Ready VM in the inventory' do before(:each) do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(vm_response) allow(subject).to receive(:check_ready_vm) create_ready_vm(pool,vm,token) end @@ -2193,7 +2203,7 @@ EOT # PENDING context 'Pending VM not in the inventory' do before(:each) do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(new_vm_response) expect(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue") create_pending_vm(pool,vm,token) end @@ -2208,7 +2218,7 @@ EOT context 'Pending VM in the inventory' do before(:each) do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(vm_response) allow(subject).to receive(:check_pending_vm) create_pending_vm(pool,vm,token) end @@ -2248,7 +2258,7 @@ EOT # COMPLETED context 'Completed VM not in the inventory' do before(:each) do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(new_vm_response) expect(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue") expect(logger).to receive(:log).with('s', "[!] [#{pool}] '#{vm}' not found in inventory, removed from 'completed' queue") create_completed_vm(vm,pool,true) @@ -2279,7 +2289,7 @@ EOT context 'Completed VM in the inventory' do before(:each) do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(vm_response) create_completed_vm(vm,pool,true) end @@ -2316,7 +2326,7 @@ EOT # DISCOVERED context 'Discovered VM' do before(:each) do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(vm_response) create_discovered_vm(vm,pool) end @@ -2375,7 +2385,7 @@ EOT # MIGRATIONS context 'Migrating VM not in the inventory' do before(:each) do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(new_vm_response) expect(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue") create_migrating_vm(vm,pool) end @@ -2389,7 +2399,7 @@ EOT context 'Migrating VM in the inventory' do before(:each) do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(vm_response) allow(subject).to receive(:check_ready_vm) allow(logger).to receive(:log).with("s", "[!] [#{pool}] is empty") create_migrating_vm(vm,pool) @@ -2412,14 +2422,14 @@ EOT # REPOPULATE context 'Repopulate a pool' do it 'should not call clone_vm when number of VMs is equal to the pool size' do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return([]) expect(subject).to receive(:clone_vm).exactly(0).times subject._check_pool(pool_object,provider) end it 'should not call clone_vm when number of VMs is greater than the pool size' do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(vm_response) create_ready_vm(pool,vm,token) expect(subject).to receive(:clone_vm).exactly(0).times @@ -2428,7 +2438,7 @@ EOT ['ready','pending'].each do |queue_name| it "should use VMs in #{queue_name} queue to caculate pool size" do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(vm_response) expect(subject).to receive(:clone_vm).exactly(0).times # Modify the pool size to 1 and add a VM in the queue redis.sadd("vmpooler__#{queue_name}__#{pool}",vm) @@ -2440,7 +2450,7 @@ EOT ['running','completed','discovered','migrating'].each do |queue_name| it "should not use VMs in #{queue_name} queue to caculate pool size" do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(vm_response) expect(subject).to receive(:clone_vm) # Modify the pool size to 1 and add a VM in the queue redis.sadd("vmpooler__#{queue_name}__#{pool}",vm) @@ -2451,7 +2461,7 @@ EOT end it 'should log a message the first time a pool is empty' do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return([]) expect(logger).to receive(:log).with('s', "[!] [#{pool}] is empty") subject._check_pool(pool_object,provider) @@ -2459,7 +2469,7 @@ EOT context 'when pool is marked as empty' do before(:each) do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return([]) redis.set("vmpooler__empty__#{pool}", 'true') end @@ -2480,7 +2490,7 @@ EOT context 'when number of VMs is less than the pool size' do before(:each) do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return([]) end it 'should call clone_vm to populate the pool' do @@ -2520,7 +2530,7 @@ EOT create_ready_vm(pool,'vm1') create_ready_vm(pool,'vm2') create_ready_vm(pool,'vm3') - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new(['vm1','vm2','vm3'])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(multi_vm_response) expect(metrics).to receive(:gauge).with("ready.#{pool}", 3) allow(metrics).to receive(:gauge) @@ -2532,7 +2542,7 @@ EOT create_running_vm(pool,'vm1',token) create_running_vm(pool,'vm2',token) create_running_vm(pool,'vm3',token) - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new(['vm1','vm2','vm3'])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return(multi_vm_response) expect(metrics).to receive(:gauge).with("running.#{pool}", 3) allow(metrics).to receive(:gauge) @@ -2541,7 +2551,7 @@ EOT end it 'increments metrics with 0 when pool empty' do - expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([])) + expect(provider).to receive(:vms_in_pool).with(pool).and_return([]) expect(metrics).to receive(:gauge).with("ready.#{pool}", 0) expect(metrics).to receive(:gauge).with("running.#{pool}", 0) From 710b3808a5875745407696a03fa599c9b258d759 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Tue, 4 Apr 2017 14:23:57 -0700 Subject: [PATCH 23/32] (POOLER-70) Add create_provider_object for VM Provider This commit adds a factory style function to create a VM provider object from a provider name string. This is used during VMPooler initialization to create the various VM providers on startup. --- lib/vmpooler/pool_manager.rb | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 10c6fcf..7ab229f 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -690,6 +690,17 @@ module Vmpooler raise end + def create_provider_object(config, logger, metrics, provider_name, options) + case provider_name + when 'vsphere' + Vmpooler::PoolManager::Provider::VSphere.new(config, logger, metrics, provider_name, options) + when 'dummy' + Vmpooler::PoolManager::Provider::Dummy.new(config, logger, metrics, provider_name, options) + else + raise("Provider '#{provider_name}' is unknown") + end + end + def execute!(maxloop = 0, loop_delay = 1) $logger.log('d', 'starting vmpooler') From 1a3ae869430ac306c5bb14a7d241c48654d95a58 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Tue, 4 Apr 2017 14:23:36 -0700 Subject: [PATCH 24/32] (POOLER-70) Add config for VM Provider This commit adds a public function to access the internal variable holding the VMPooler configuration. This is required for later commits for the execute! function testing. --- lib/vmpooler/pool_manager.rb | 4 ++++ spec/unit/pool_manager_spec.rb | 10 ++++++++++ 2 files changed, 14 insertions(+) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 7ab229f..2133c53 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -19,6 +19,10 @@ module Vmpooler $threads = {} end + def config + $config + end + # Check the state of a VM def check_pending_vm(vm, pool, timeout, provider) Thread.new do diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index bc7b5f8..cec693d 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -36,6 +36,16 @@ EOT subject { Vmpooler::PoolManager.new(config, logger, redis, metrics) } + describe '#config' do + before do + expect(subject).not_to be_nil + end + + it 'should return the current configuration' do + expect(subject.config).to eq(config) + end + end + describe '#check_pending_vm' do before do expect(subject).not_to be_nil From 57eba4a8e440a4d364206a0a33e7325a279139c6 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 31 Mar 2017 17:28:22 -0700 Subject: [PATCH 25/32] (POOLER-70) Update execute! for VM Provider This commit modifies execute! to create the VM Providers on VMPooler startup instead of check_pool creating a provider per pool. This commit also adds legacy support for old configuration files: - Setting the default provider for pools to be vsphere - Copying VSphere connection settings in the configuration file from the legacy location in the root, to under :providers/:vsphere which is new location for all provider configuration --- lib/vmpooler/pool_manager.rb | 26 +++++ spec/unit/pool_manager_spec.rb | 180 +++++++++++++++++++++++---------- 2 files changed, 153 insertions(+), 53 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 2133c53..35e7966 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -713,6 +713,32 @@ module Vmpooler # Clear out vmpooler__migrations since stale entries may be left after a restart $redis.del('vmpooler__migration') + # Copy vSphere settings to correct location. This happens with older configuration files + if !$config[:vsphere].nil? && ($config[:providers].nil? || $config[:providers][:vsphere].nil?) + $logger.log('d', "[!] Detected an older configuration file. Copying the settings from ':vsphere:' to ':providers:/:vsphere:'") + $config[:providers] = {} if $config[:providers].nil? + $config[:providers][:vsphere] = $config[:vsphere] + end + + # Set default provider for all pools that do not have one defined + $config[:pools].each do |pool| + if pool['provider'].nil? + $logger.log('d', "[!] Setting provider for pool '#{pool['name']}' to 'vsphere' as default") + pool['provider'] = 'vsphere' + end + end + + # Create the providers + $config[:pools].each do |pool| + provider_name = pool['provider'] + begin + $providers[provider_name] = create_provider_object($config, $logger, $metrics, provider_name, {}) if $providers[provider_name].nil? + rescue => err + $logger.log('s', "Error while creating provider for pool #{pool['name']}: #{err}") + raise + end + end + loop_count = 1 loop do if ! $threads['disk_manager'] diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index cec693d..de7c11e 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -1678,8 +1678,6 @@ EOT end describe "#execute!" do - let(:threads) {{}} - let(:config) { YAML.load(<<-EOT --- @@ -1689,18 +1687,26 @@ EOT ) } - let(:thread) { double('thread') } - - before do + before(:each) do expect(subject).not_to be_nil + + allow(subject).to receive(:check_disk_queue) + allow(subject).to receive(:check_snapshot_queue) + allow(subject).to receive(:check_pool) + + allow(logger).to receive(:log) + end + + after(:each) do + # Reset the global variable - Note this is a code smell + $threads = nil end context 'on startup' do - before(:each) do - allow(subject).to receive(:check_disk_queue) - allow(subject).to receive(:check_snapshot_queue) - allow(subject).to receive(:check_pool) + it 'should log a message that VMPooler has started' do expect(logger).to receive(:log).with('d', 'starting vmpooler') + + subject.execute!(1,0) end it 'should set clone tasks to zero' do @@ -1732,69 +1738,146 @@ EOT subject.execute!(1,0) end + + context 'creating Providers' do + let(:vsphere_provider) { double('vsphere_provider') } + let(:config) { + YAML.load(<<-EOT +--- +:pools: + - name: #{pool} + - name: 'dummy' + provider: 'vsphere' +EOT + )} + + it 'should call create_provider_object idempotently' do + # Even though there are two pools using the vsphere provider (the default), it should only + # create the provider object once. + expect(subject).to receive(:create_provider_object).with(Object, Object, Object, 'vsphere', Object).and_return(vsphere_provider) + + subject.execute!(1,0) + end + + it 'should raise an error if the provider can not be created' do + expect(subject).to receive(:create_provider_object).and_raise(RuntimeError, "MockError") + + expect{ subject.execute!(1,0) }.to raise_error(/MockError/) + end + + it 'should log a message if the provider can not be created' do + expect(subject).to receive(:create_provider_object).and_raise(RuntimeError, "MockError") + expect(logger).to receive(:log).with('s',"Error while creating provider for pool #{pool}: MockError") + + expect{ subject.execute!(1,0) }.to raise_error(/MockError/) + end + end + end + + context 'modify configuration on startup' do + context 'move vSphere configuration to providers location' do + let(:config) { + YAML.load(<<-EOT +:vsphere: + server: 'vsphere.company.com' + username: 'vmpooler' + password: 'password' +:pools: + - name: #{pool} +EOT + )} + + it 'should set providers with no provider to vsphere' do + expect(subject.config[:providers]).to be nil + + subject.execute!(1,0) + expect(subject.config[:providers][:vsphere]['server']).to eq('vsphere.company.com') + expect(subject.config[:providers][:vsphere]['username']).to eq('vmpooler') + expect(subject.config[:providers][:vsphere]['password']).to eq('password') + end + + it 'should log a message' do + expect(logger).to receive(:log).with('d', "[!] Detected an older configuration file. Copying the settings from ':vsphere:' to ':providers:/:vsphere:'") + + subject.execute!(1,0) + end + end + + + context 'default to the vphere provider' do + let(:config) { + YAML.load(<<-EOT +--- +:pools: + - name: #{pool} + - name: 'dummy' + provider: 'dummy' +EOT + )} + + it 'should set providers with no provider to vsphere' do + expect(subject.config[:pools][0]['provider']).to be_nil + expect(subject.config[:pools][1]['provider']).to eq('dummy') + + subject.execute!(1,0) + + expect(subject.config[:pools][0]['provider']).to eq('vsphere') + expect(subject.config[:pools][1]['provider']).to eq('dummy') + end + + it 'should log a message' do + expect(logger).to receive(:log).with('d', "[!] Setting provider for pool '#{pool}' to 'vsphere' as default") + + subject.execute!(1,0) + end + end end context 'with dead disk_manager thread' do - before(:each) do - allow(subject).to receive(:check_snapshot_queue) - allow(subject).to receive(:check_pool) - expect(logger).to receive(:log).with('d', 'starting vmpooler') - end + let(:disk_manager_thread) { double('thread', :alive? => false) } - after(:each) do + before(:each) do # Reset the global variable - Note this is a code smell - $threads = nil + $threads = {} + $threads['disk_manager'] = disk_manager_thread end it 'should run the check_disk_queue method and log a message' do - expect(thread).to receive(:alive?).and_return(false) expect(subject).to receive(:check_disk_queue) expect(logger).to receive(:log).with('d', "[!] [disk_manager] worker thread died, restarting") - $threads['disk_manager'] = thread subject.execute!(1,0) end end context 'with dead snapshot_manager thread' do + let(:snapshot_manager_thread) { double('thread', :alive? => false) } before(:each) do - allow(subject).to receive(:check_disk_queue) - allow(subject).to receive(:check_pool) - expect(logger).to receive(:log).with('d', 'starting vmpooler') - end - - after(:each) do # Reset the global variable - Note this is a code smell - $threads = nil + $threads = {} + $threads['snapshot_manager'] = snapshot_manager_thread end it 'should run the check_snapshot_queue method and log a message' do - expect(thread).to receive(:alive?).and_return(false) expect(subject).to receive(:check_snapshot_queue) expect(logger).to receive(:log).with('d', "[!] [snapshot_manager] worker thread died, restarting") - $threads['snapshot_manager'] = thread + $threads['snapshot_manager'] = snapshot_manager_thread subject.execute!(1,0) end end context 'with dead pool thread' do + let(:pool_thread) { double('thread', :alive? => false) } before(:each) do - allow(subject).to receive(:check_disk_queue) - allow(subject).to receive(:check_snapshot_queue) - expect(logger).to receive(:log).with('d', 'starting vmpooler') - end - - after(:each) do # Reset the global variable - Note this is a code smell - $threads = nil + $threads = {} + $threads[pool] = pool_thread end it 'should run the check_pool method and log a message' do - expect(thread).to receive(:alive?).and_return(false) expect(subject).to receive(:check_pool).with(a_pool_with_name_of(pool)) expect(logger).to receive(:log).with('d', "[!] [#{pool}] worker thread died, restarting") - $threads[pool] = thread subject.execute!(1,0) end @@ -1812,30 +1895,22 @@ EOT end it 'when a non-default loop delay is specified' do - start_time = Time.now - subject.execute!(maxloop,loop_delay) - finish_time = Time.now + expect(subject).to receive(:sleep).with(loop_delay).exactly(maxloop).times - # Use a generous delta to take into account various CPU load etc. - expect(finish_time - start_time).to be_within(0.75).of(maxloop * loop_delay) + subject.execute!(maxloop,loop_delay) end end context 'loops specified number of times (5)' do + let(:alive_thread) { double('thread', :alive? => true) } let(:maxloop) { 5 } # Note a maxloop of zero can not be tested as it never terminates before(:each) do end - after(:each) do - # Reset the global variable - Note this is a code smell - $threads = nil - end - it 'should run startup tasks only once' do - allow(subject).to receive(:check_disk_queue) - allow(subject).to receive(:check_snapshot_queue) - allow(subject).to receive(:check_pool) + expect(redis).to receive(:set).with('vmpooler__tasks__clone', 0).once + expect(redis).to receive(:del).with('vmpooler__migration').once subject.execute!(maxloop,0) end @@ -1853,10 +1928,9 @@ EOT expect(subject).to receive(:check_snapshot_queue).exactly(0).times expect(subject).to receive(:check_pool).exactly(0).times - allow(thread).to receive(:alive?).and_return(true) - $threads[pool] = thread - $threads['disk_manager'] = thread - $threads['snapshot_manager'] = thread + $threads[pool] = alive_thread + $threads['disk_manager'] = alive_thread + $threads['snapshot_manager'] = alive_thread subject.execute!(maxloop,0) end From 2e255a5a43e5e5157f26ac956526fd6f3d7147dd Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 14 Apr 2017 16:08:12 -0700 Subject: [PATCH 26/32] (POOLER-70) Move vSphere configuration into providers section Previously the vSphere based configuration was in the root of the configuration YAML. As there is deprecation support to move the old configuration to the new location, the vSphere provider can be updated. This commit updates the vSphere Provider and tests to use the new configuration location under: :providers: :vsphere: --- README.md | 11 +- lib/vmpooler/providers/vsphere.rb | 28 ++--- spec/unit/providers/vsphere_spec.rb | 43 +++---- vmpooler.yaml.example | 184 +++++++++++++++------------- 4 files changed, 138 insertions(+), 128 deletions(-) diff --git a/README.md b/README.md index 40e29ef..8dcbfbb 100644 --- a/README.md +++ b/README.md @@ -29,10 +29,11 @@ The following YAML configuration sets up two pools, `debian-7-i386` and `debian- ``` --- -:vsphere: - server: 'vsphere.company.com' - username: 'vmpooler' - password: 'swimsw1msw!m' +:providers: + :vsphere: + server: 'vsphere.company.com' + username: 'vmpooler' + password: 'swimsw1msw!m' :redis: server: 'redis.company.com' @@ -47,12 +48,14 @@ The following YAML configuration sets up two pools, `debian-7-i386` and `debian- pool: 'Pooled VMs/debian-7-i386' datastore: 'vmstorage' size: 5 + provider: vsphere - name: 'debian-7-x86_64' template: 'Templates/debian-7-x86_64' folder: 'Pooled VMs/debian-7-x86_64' pool: 'Pooled VMs/debian-7-x86_64' datastore: 'vmstorage' size: 5 + provider: vsphere ``` See the provided YAML configuration example, [vmpooler.yaml.example](vmpooler.yaml.example), for additional configuration options and parameters. diff --git a/lib/vmpooler/providers/vsphere.rb b/lib/vmpooler/providers/vsphere.rb index 38c6d51..6dc9819 100644 --- a/lib/vmpooler/providers/vsphere.rb +++ b/lib/vmpooler/providers/vsphere.rb @@ -2,12 +2,6 @@ module Vmpooler class PoolManager class Provider class VSphere < Vmpooler::PoolManager::Provider::Base - def initialize(config, logger, metrics, name, options) - super(config, logger, metrics, name, options) - @credentials = provider_config - @conf = global_config[:config] - end - def name 'vsphere' end @@ -239,12 +233,6 @@ module Vmpooler true end - def provider_config - # The vSphere configuration is currently in it's own root. This will - # eventually shift into the same location base expects it - global_config[:vsphere] - end - # VSphere Helper methods def get_target_cluster_from_config(pool_name) @@ -279,21 +267,21 @@ module Vmpooler begin @connection.serviceInstance.CurrentTime rescue - @connection = connect_to_vsphere @credentials + @connection = connect_to_vsphere end @connection end - def connect_to_vsphere(credentials) - max_tries = @conf['max_tries'] || 3 - retry_factor = @conf['retry_factor'] || 10 + def connect_to_vsphere + max_tries = global_config[:config]['max_tries'] || 3 + retry_factor = global_config[:config]['retry_factor'] || 10 try = 1 begin - connection = RbVmomi::VIM.connect host: credentials['server'], - user: credentials['username'], - password: credentials['password'], - insecure: credentials['insecure'] || true + connection = RbVmomi::VIM.connect host: provider_config['server'], + user: provider_config['username'], + password: provider_config['password'], + insecure: provider_config['insecure'] || true metrics.increment('connect.open') return connection rescue => err diff --git a/spec/unit/providers/vsphere_spec.rb b/spec/unit/providers/vsphere_spec.rb index 906f1b3..948e6da 100644 --- a/spec/unit/providers/vsphere_spec.rb +++ b/spec/unit/providers/vsphere_spec.rb @@ -47,11 +47,12 @@ describe 'Vmpooler::PoolManager::Provider::VSphere' do :config: max_tries: 3 retry_factor: 10 -:vsphere: - server: "vcenter.domain.local" - username: "vcenter_user" - password: "vcenter_password" - insecure: true +:providers: + :vsphere: + server: "vcenter.domain.local" + username: "vcenter_user" + password: "vcenter_password" + insecure: true :pools: - name: '#{poolname}' alias: [ 'mockpool' ] @@ -66,8 +67,6 @@ EOT ) } - let(:credentials) { config[:vsphere] } - let(:connection_options) {{}} let(:connection) { mock_RbVmomi_VIM_Connection(connection_options) } let(:vmname) { 'vm1' } @@ -912,14 +911,14 @@ EOT it 'should call connect_to_vsphere to reconnect' do allow(metrics).to receive(:increment) - expect(subject).to receive(:connect_to_vsphere).with(credentials) + expect(subject).to receive(:connect_to_vsphere).with(no_args) subject.get_connection() end it 'should return a new connection' do new_connection = mock_RbVmomi_VIM_Connection(connection_options) - expect(subject).to receive(:connect_to_vsphere).with(credentials).and_return(new_connection) + expect(subject).to receive(:connect_to_vsphere).with(no_args).and_return(new_connection) result = subject.get_connection() @@ -933,6 +932,8 @@ EOT allow(RbVmomi::VIM).to receive(:connect).and_return(connection) end + let (:credentials) { config[:providers][:vsphere] } + context 'succesful connection' do it 'should use the supplied credentials' do expect(RbVmomi::VIM).to receive(:connect).with({ @@ -941,7 +942,7 @@ EOT :password => credentials['password'], :insecure => credentials['insecure'] }).and_return(connection) - subject.connect_to_vsphere(credentials) + subject.connect_to_vsphere end it 'should honor the insecure setting' do @@ -954,11 +955,11 @@ EOT :password => credentials['password'], :insecure => false, }).and_return(connection) - subject.connect_to_vsphere(credentials) + subject.connect_to_vsphere end it 'should default to an insecure connection' do - config[:vsphere][:insecure] = nil + config[:providers][:vsphere][:insecure] = nil expect(RbVmomi::VIM).to receive(:connect).with({ :host => credentials['server'], @@ -967,18 +968,18 @@ EOT :insecure => true }).and_return(connection) - subject.connect_to_vsphere(credentials) + subject.connect_to_vsphere end it 'should return the connection object' do - result = subject.connect_to_vsphere(credentials) + result = subject.connect_to_vsphere expect(result).to be(connection) end it 'should increment the connect.open counter' do expect(metrics).to receive(:increment).with('connect.open') - subject.connect_to_vsphere(credentials) + subject.connect_to_vsphere end end @@ -992,7 +993,7 @@ EOT end it 'should return the connection object' do - result = subject.connect_to_vsphere(credentials) + result = subject.connect_to_vsphere expect(result).to be(connection) end @@ -1000,7 +1001,7 @@ EOT it 'should increment the connect.fail and then connect.open counter' do expect(metrics).to receive(:increment).with('connect.fail').exactly(1).times expect(metrics).to receive(:increment).with('connect.open').exactly(1).times - subject.connect_to_vsphere(credentials) + subject.connect_to_vsphere end end @@ -1011,7 +1012,7 @@ EOT end it 'should raise an error' do - expect{subject.connect_to_vsphere(credentials)}.to raise_error(RuntimeError,'MockError') + expect{subject.connect_to_vsphere}.to raise_error(RuntimeError,'MockError') end it 'should retry the connection attempt config.max_tries times' do @@ -1020,7 +1021,7 @@ EOT begin # Swallow any errors - subject.connect_to_vsphere(credentials) + subject.connect_to_vsphere rescue end end @@ -1031,7 +1032,7 @@ EOT begin # Swallow any errors - subject.connect_to_vsphere(credentials) + subject.connect_to_vsphere rescue end end @@ -1051,7 +1052,7 @@ EOT begin # Swallow any errors - subject.connect_to_vsphere(credentials) + subject.connect_to_vsphere rescue end end diff --git a/vmpooler.yaml.example b/vmpooler.yaml.example index 2aab60c..b7d3ea6 100644 --- a/vmpooler.yaml.example +++ b/vmpooler.yaml.example @@ -1,9 +1,20 @@ --- +:providers: +# :providers: +# +# This section contains the VM providers for VMs and Pools +# The currently supported backing services are: +# - vsphere +# - dummy + # :vsphere: # # This section contains the server hostname and authentication credentials # needed for vmpooler to connect to VMware vSphere. # +# NOTE - To support older configuration files, a :vsphere: configuration section +# will be copied into :providers:/:vsphere: if one does not already exist. +# # Available configuration parameters: # # - server @@ -17,20 +28,16 @@ # - password # The password used to authenticate VMware vSphere. # (required) - +# +# - insecure +# Whether to ignore any HTTPS negotiation errors (e.g. untrusted self-signed certificates) +# (optional: default true) # Example: -:vsphere: - server: 'vsphere.company.com' - username: 'vmpooler' - password: 'swimsw1msw!m' - -:providers: -# :providers: -# -# This section contains the VM providers for VMs and Pools -# The currently supported backing services are: -# - dummy + :vsphere: + server: 'vsphere.company.com' + username: 'vmpooler' + password: 'swimsw1msw!m' # :dummy: # @@ -146,58 +153,58 @@ server: 'redis.company.com' - # :graphs: - # - # This section contains the server and prefix information for a graphite- - # compatible web front-end where graphs may be viewed. This is used by the - # vmpooler dashboard to retrieve statistics and graphs for a given instance. - # - # NOTE: This is not the endpoint for publishing metrics data. See `graphite:` - # and `statsd:` below. - # - # NOTE: If `graphs:` is not set, for legacy compatibility, `graphite:` will be - # consulted for `server`/`prefix` information to use in locating a - # graph server for our dashboard. `graphs:` is recommended over - # `graphite:` - # - # - # Available configuration parameters: - # - # - # - server - # The FQDN hostname of the statsd daemon. - # (required) - # - # - prefix - # The prefix to use while storing statsd data. - # (optional; default: 'vmpooler') +# :graphs: +# +# This section contains the server and prefix information for a graphite- +# compatible web front-end where graphs may be viewed. This is used by the +# vmpooler dashboard to retrieve statistics and graphs for a given instance. +# +# NOTE: This is not the endpoint for publishing metrics data. See `graphite:` +# and `statsd:` below. +# +# NOTE: If `graphs:` is not set, for legacy compatibility, `graphite:` will be +# consulted for `server`/`prefix` information to use in locating a +# graph server for our dashboard. `graphs:` is recommended over +# `graphite:` +# +# +# Available configuration parameters: +# +# +# - server +# The FQDN hostname of the statsd daemon. +# (required) +# +# - prefix +# The prefix to use while storing statsd data. +# (optional; default: 'vmpooler') - # :statsd: - # - # This section contains the connection information required to store - # historical data via statsd. This is mutually exclusive with graphite - # and takes precedence. - # - # Available configuration parameters: - # - # - server - # The FQDN hostname of the statsd daemon. - # (required) - # - # - prefix - # The prefix to use while storing statsd data. - # (optional; default: 'vmpooler') - # - # - port - # The UDP port to communicate with the statsd daemon. - # (optional; default: 8125) +# :statsd: +# +# This section contains the connection information required to store +# historical data via statsd. This is mutually exclusive with graphite +# and takes precedence. +# +# Available configuration parameters: +# +# - server +# The FQDN hostname of the statsd daemon. +# (required) +# +# - prefix +# The prefix to use while storing statsd data. +# (optional; default: 'vmpooler') +# +# - port +# The UDP port to communicate with the statsd daemon. +# (optional; default: 8125) - # Example: +# Example: - :statsd: - server: 'statsd.company.com' - prefix: 'vmpooler' - port: 8125 +:statsd: + server: 'statsd.company.com' + prefix: 'vmpooler' + port: 8125 # :graphite: # @@ -309,7 +316,7 @@ # (optional; default: same cluster/host as origin template) # # - task_limit -# The number of concurrent VMware vSphere tasks to perform. +# The number of concurrent VM creation tasks to perform. # (optional; default: '10') # # - timeout @@ -341,21 +348,23 @@ # # - migration_limit # When set to any value greater than 0 enable VM migration at checkout. -# When enabled this capability will evaluate a VM for migration when it is requested +# When enabled this capability will evaluate a VM for migration to a different host when it is requested # in an effort to maintain a more even distribution of load across compute resources. -# The migration_limit ensures that no more than n migrations will be evaluated at any one time +# The migration_limit ensures that no more than the specified migrations will be evaluated at any one time # and greatly reduces the possibilty of VMs ending up bunched together on a particular host. # -# - max_tries -# Set the max number of times a connection should retry in vsphere helper. -# This optional setting allows a user to dial in retry limits to -# suit your environment. +# - max_tries +# Set the max number of times a connection should retry in VM providers. +# This optional setting allows a user to dial in retry limits to +# suit your environment. +# (optional; default: 3) # -# - retry_factor -# When retrying, each attempt sleeps for the try count * retry_factor. -# Increase this number to lengthen the delay between retry attempts. -# This is particularly useful for instances with a large number of pools -# to prevent a thundering herd when retrying connections. +# - retry_factor +# When retrying, each attempt sleeps for the try count * retry_factor. +# Increase this number to lengthen the delay between retry attempts. +# This is particularly useful for instances with a large number of pools +# to prevent a thundering herd when retrying connections. +# (optional; default: 10) # Example: @@ -392,18 +401,15 @@ # The template or virtual machine target to spawn clones from. # (required) # -# - folder -# The vSphere 'folder' destination for spawned clones. -# (required) -# -# - datastore -# The vSphere 'datastore' destination for spawned clones. -# (required) -# # - size # The number of waiting VMs to keep in a pool. # (required) # +# - provider +# The name of the VM provider which manage this pool. This should match +# a name in the :providers: section above e.g. vsphere +# (required; will default to vsphere for backwards compatibility) +# # - clone_target # Per-pool option to override the global 'clone_target' cluster. # (optional) @@ -415,8 +421,18 @@ # # - ready_ttl # How long (in minutes) to keep VMs in 'ready' queues before destroying. -# (optional) - +# (optional; default: no limit) +# +# Provider specific pool settings +# vSphere provider +# - folder +# The vSphere 'folder' destination for spawned clones. +# (required) +# +# - datastore +# The vSphere 'datastore' destination for spawned clones. +# (required) +# # Example: :pools: @@ -428,6 +444,7 @@ size: 5 timeout: 15 ready_ttl: 1440 + provider: vsphere - name: 'debian-7-x86_64' alias: [ 'debian-7-64', 'debian-7-amd64' ] template: 'Templates/debian-7-x86_64' @@ -436,3 +453,4 @@ size: 5 timeout: 15 ready_ttl: 1440 + provider: vsphere From ba686e3c0af863b59882bc2d3181c05a740b366b Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Tue, 4 Apr 2017 17:08:56 -0700 Subject: [PATCH 27/32] (maint) Update VMPooler files with fixes for Rubocop violations This commit updates vmpooler.rb, api.rb and providers.rb with style changes as per rubocop style violations. This commit also updates the rubocop configuration to always use LF line endings even on Windows as rubocop was expecting CRLF even though git is configured for LF. --- .rubocop.yml | 6 ++++++ lib/vmpooler.rb | 2 +- lib/vmpooler/api.rb | 2 +- lib/vmpooler/providers.rb | 2 +- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/.rubocop.yml b/.rubocop.yml index c5cf24d..ee900f8 100644 --- a/.rubocop.yml +++ b/.rubocop.yml @@ -9,6 +9,8 @@ AllCops: - 'scripts/**/*' - 'spec/**/*' - 'vendor/**/*' + - Gemfile + - Rakefile Style/Documentation: Enabled: false @@ -58,3 +60,7 @@ Style/ConditionalAssignment: Next: Enabled: false +# Enforce LF line endings, even when on Windows +Style/EndOfLine: + EnforcedStyle: lf + diff --git a/lib/vmpooler.rb b/lib/vmpooler.rb index 953dac1..98949fb 100644 --- a/lib/vmpooler.rb +++ b/lib/vmpooler.rb @@ -12,7 +12,7 @@ module Vmpooler require 'yaml' require 'set' - %w(api graphite logger pool_manager vsphere_helper statsd dummy_statsd providers).each do |lib| + %w[api graphite logger pool_manager vsphere_helper statsd dummy_statsd providers].each do |lib| begin require "vmpooler/#{lib}" rescue LoadError diff --git a/lib/vmpooler/api.rb b/lib/vmpooler/api.rb index 0821d9d..25fa7f6 100644 --- a/lib/vmpooler/api.rb +++ b/lib/vmpooler/api.rb @@ -30,7 +30,7 @@ module Vmpooler use Vmpooler::Dashboard # Load API components - %w(helpers dashboard reroute v1).each do |lib| + %w[helpers dashboard reroute v1].each do |lib| begin require "api/#{lib}" rescue LoadError diff --git a/lib/vmpooler/providers.rb b/lib/vmpooler/providers.rb index c1c2071..9d2f0ae 100644 --- a/lib/vmpooler/providers.rb +++ b/lib/vmpooler/providers.rb @@ -1,4 +1,4 @@ -%w(base dummy vsphere).each do |lib| +%w[base dummy vsphere].each do |lib| begin require "vmpooler/providers/#{lib}" rescue LoadError From 0aa550f852f801c9bca987141b58ae9d3f18f839 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Tue, 4 Apr 2017 20:49:01 -0700 Subject: [PATCH 28/32] (POOLER-70) Remove vsphere_helper In previous commits the code from vsphere_helper is now all moved to the vSphere Provider. This commit removes the vsphere_helper.rb file, spec tests and from being loaded by vmpooler itself. --- lib/vmpooler.rb | 2 +- lib/vmpooler/vsphere_helper.rb | 415 ------ spec/unit/vsphere_helper_spec.rb | 2104 ------------------------------ 3 files changed, 1 insertion(+), 2520 deletions(-) delete mode 100644 lib/vmpooler/vsphere_helper.rb delete mode 100644 spec/unit/vsphere_helper_spec.rb diff --git a/lib/vmpooler.rb b/lib/vmpooler.rb index 98949fb..590f26f 100644 --- a/lib/vmpooler.rb +++ b/lib/vmpooler.rb @@ -12,7 +12,7 @@ module Vmpooler require 'yaml' require 'set' - %w[api graphite logger pool_manager vsphere_helper statsd dummy_statsd providers].each do |lib| + %w[api graphite logger pool_manager statsd dummy_statsd providers].each do |lib| begin require "vmpooler/#{lib}" rescue LoadError diff --git a/lib/vmpooler/vsphere_helper.rb b/lib/vmpooler/vsphere_helper.rb deleted file mode 100644 index 3ad8843..0000000 --- a/lib/vmpooler/vsphere_helper.rb +++ /dev/null @@ -1,415 +0,0 @@ -require 'rubygems' unless defined?(Gem) - -module Vmpooler - class VsphereHelper - ADAPTER_TYPE = 'lsiLogic' - DISK_TYPE = 'thin' - DISK_MODE = 'persistent' - - def initialize(config, metrics) - @credentials = config[:vsphere] - @conf = config[:config] - @metrics = metrics - end - - def ensure_connected(connection, credentials) - connection.serviceInstance.CurrentTime - rescue - connect_to_vsphere @credentials - end - - def connect_to_vsphere(credentials) - max_tries = @conf['max_tries'] || 3 - retry_factor = @conf['retry_factor'] || 10 - try = 1 - begin - @connection = RbVmomi::VIM.connect host: credentials['server'], - user: credentials['username'], - password: credentials['password'], - insecure: credentials['insecure'] || true - @metrics.increment("connect.open") - rescue => err - try += 1 - @metrics.increment("connect.fail") - raise err if try == max_tries - sleep(try * retry_factor) - retry - end - end - - def add_disk(vm, size, datastore) - ensure_connected @connection, @credentials - - return false unless size.to_i > 0 - - vmdk_datastore = find_datastore(datastore) - vmdk_file_name = "#{vm['name']}/#{vm['name']}_#{find_vmdks(vm['name'], datastore).length + 1}.vmdk" - - controller = find_disk_controller(vm) - - vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec( - capacityKb: size.to_i * 1024 * 1024, - adapterType: ADAPTER_TYPE, - diskType: DISK_TYPE - ) - - vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo( - datastore: vmdk_datastore, - diskMode: DISK_MODE, - fileName: "[#{vmdk_datastore.name}] #{vmdk_file_name}" - ) - - device = RbVmomi::VIM::VirtualDisk( - backing: vmdk_backing, - capacityInKB: size.to_i * 1024 * 1024, - controllerKey: controller.key, - key: -1, - unitNumber: find_disk_unit_number(vm, controller) - ) - - device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec( - device: device, - operation: RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add') - ) - - vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec( - deviceChange: [device_config_spec] - ) - - @connection.serviceContent.virtualDiskManager.CreateVirtualDisk_Task( - datacenter: @connection.serviceInstance.find_datacenter, - name: "[#{vmdk_datastore.name}] #{vmdk_file_name}", - spec: vmdk_spec - ).wait_for_completion - - vm.ReconfigVM_Task(spec: vm_config_spec).wait_for_completion - - true - end - - def find_datastore(datastorename) - ensure_connected @connection, @credentials - - datacenter = @connection.serviceInstance.find_datacenter - datacenter.find_datastore(datastorename) - end - - def find_device(vm, deviceName) - ensure_connected @connection, @credentials - - vm.config.hardware.device.each do |device| - return device if device.deviceInfo.label == deviceName - end - - nil - end - - def find_disk_controller(vm) - ensure_connected @connection, @credentials - - devices = find_disk_devices(vm) - - devices.keys.sort.each do |device| - if devices[device]['children'].length < 15 - return find_device(vm, devices[device]['device'].deviceInfo.label) - end - end - - nil - end - - def find_disk_devices(vm) - ensure_connected @connection, @credentials - - devices = {} - - vm.config.hardware.device.each do |device| - if device.is_a? RbVmomi::VIM::VirtualSCSIController - if devices[device.controllerKey].nil? - devices[device.key] = {} - devices[device.key]['children'] = [] - end - - devices[device.key]['device'] = device - end - - if device.is_a? RbVmomi::VIM::VirtualDisk - if devices[device.controllerKey].nil? - devices[device.controllerKey] = {} - devices[device.controllerKey]['children'] = [] - end - - devices[device.controllerKey]['children'].push(device) - end - end - - devices - end - - def find_disk_unit_number(vm, controller) - ensure_connected @connection, @credentials - - used_unit_numbers = [] - available_unit_numbers = [] - - devices = find_disk_devices(vm) - - devices.keys.sort.each do |c| - next unless controller.key == devices[c]['device'].key - used_unit_numbers.push(devices[c]['device'].scsiCtlrUnitNumber) - devices[c]['children'].each do |disk| - used_unit_numbers.push(disk.unitNumber) - end - end - - (0..15).each do |scsi_id| - if used_unit_numbers.grep(scsi_id).length <= 0 - available_unit_numbers.push(scsi_id) - end - end - - available_unit_numbers.sort[0] - end - - def find_folder(foldername) - ensure_connected @connection, @credentials - - datacenter = @connection.serviceInstance.find_datacenter - base = datacenter.vmFolder - folders = foldername.split('/') - folders.each do |folder| - if base.is_a? RbVmomi::VIM::Folder - base = base.childEntity.find { |f| f.name == folder } - else - raise(RuntimeError, "Unexpected object type encountered (#{base.class}) while finding folder") - end - end - - base - end - - # Returns an array containing cumulative CPU and memory utilization of a host, and its object reference - # Params: - # +model+:: CPU arch version to match on - # +limit+:: Hard limit for CPU or memory utilization beyond which a host is excluded for deployments - def get_host_utilization(host, model=nil, limit=90) - if model - return nil unless host_has_cpu_model? host, model - end - return nil if host.runtime.inMaintenanceMode - return nil unless host.overallStatus == 'green' - - cpu_utilization = cpu_utilization_for host - memory_utilization = memory_utilization_for host - - return nil if cpu_utilization > limit - return nil if memory_utilization > limit - - [ cpu_utilization + memory_utilization, host ] - end - - def host_has_cpu_model?(host, model) - get_host_cpu_arch_version(host) == model - end - - def get_host_cpu_arch_version(host) - cpu_model = host.hardware.cpuPkg[0].description - cpu_model_parts = cpu_model.split() - arch_version = cpu_model_parts[4] - arch_version - end - - def cpu_utilization_for(host) - cpu_usage = host.summary.quickStats.overallCpuUsage - cpu_size = host.summary.hardware.cpuMhz * host.summary.hardware.numCpuCores - (cpu_usage.to_f / cpu_size.to_f) * 100 - end - - def memory_utilization_for(host) - memory_usage = host.summary.quickStats.overallMemoryUsage - memory_size = host.summary.hardware.memorySize / 1024 / 1024 - (memory_usage.to_f / memory_size.to_f) * 100 - end - - def find_least_used_host(cluster) - ensure_connected @connection, @credentials - - cluster_object = find_cluster(cluster) - target_hosts = get_cluster_host_utilization(cluster_object) - least_used_host = target_hosts.sort[0][1] - least_used_host - end - - def find_cluster(cluster) - datacenter = @connection.serviceInstance.find_datacenter - datacenter.hostFolder.children.find { |cluster_object| cluster_object.name == cluster } - end - - def get_cluster_host_utilization(cluster) - cluster_hosts = [] - cluster.host.each do |host| - host_usage = get_host_utilization(host) - cluster_hosts << host_usage if host_usage - end - cluster_hosts - end - - def find_least_used_compatible_host(vm) - ensure_connected @connection, @credentials - - source_host = vm.summary.runtime.host - model = get_host_cpu_arch_version(source_host) - cluster = source_host.parent - target_hosts = [] - cluster.host.each do |host| - host_usage = get_host_utilization(host, model) - target_hosts << host_usage if host_usage - end - target_host = target_hosts.sort[0][1] - [target_host, target_host.name] - end - - def find_pool(poolname) - ensure_connected @connection, @credentials - - datacenter = @connection.serviceInstance.find_datacenter - base = datacenter.hostFolder - pools = poolname.split('/') - pools.each do |pool| - case - when base.is_a?(RbVmomi::VIM::Folder) - base = base.childEntity.find { |f| f.name == pool } - when base.is_a?(RbVmomi::VIM::ClusterComputeResource) - base = base.resourcePool.resourcePool.find { |f| f.name == pool } - when base.is_a?(RbVmomi::VIM::ResourcePool) - base = base.resourcePool.find { |f| f.name == pool } - else - raise(RuntimeError, "Unexpected object type encountered (#{base.class}) while finding resource pool") - end - end - - base = base.resourcePool unless base.is_a?(RbVmomi::VIM::ResourcePool) && base.respond_to?(:resourcePool) - base - end - - def find_snapshot(vm, snapshotname) - if vm.snapshot - get_snapshot_list(vm.snapshot.rootSnapshotList, snapshotname) - end - end - - def find_vm(vmname) - ensure_connected @connection, @credentials - find_vm_light(vmname) || find_vm_heavy(vmname)[vmname] - end - - def find_vm_light(vmname) - ensure_connected @connection, @credentials - - @connection.searchIndex.FindByDnsName(vmSearch: true, dnsName: vmname) - end - - def find_vm_heavy(vmname) - ensure_connected @connection, @credentials - - vmname = vmname.is_a?(Array) ? vmname : [vmname] - containerView = get_base_vm_container_from @connection - propertyCollector = @connection.propertyCollector - - objectSet = [{ - obj: containerView, - skip: true, - selectSet: [RbVmomi::VIM::TraversalSpec.new( - name: 'gettingTheVMs', - path: 'view', - skip: false, - type: 'ContainerView' - )] - }] - - propSet = [{ - pathSet: ['name'], - type: 'VirtualMachine' - }] - - results = propertyCollector.RetrievePropertiesEx( - specSet: [{ - objectSet: objectSet, - propSet: propSet - }], - options: { maxObjects: nil } - ) - - vms = {} - results.objects.each do |result| - name = result.propSet.first.val - next unless vmname.include? name - vms[name] = result.obj - end - - while results.token - results = propertyCollector.ContinueRetrievePropertiesEx(token: results.token) - results.objects.each do |result| - name = result.propSet.first.val - next unless vmname.include? name - vms[name] = result.obj - end - end - - vms - end - - def find_vmdks(vmname, datastore) - ensure_connected @connection, @credentials - - disks = [] - - vmdk_datastore = find_datastore(datastore) - - vm_files = vmdk_datastore._connection.serviceContent.propertyCollector.collectMultiple vmdk_datastore.vm, 'layoutEx.file' - vm_files.keys.each do |f| - vm_files[f]['layoutEx.file'].each do |l| - if l.name.match(/^\[#{vmdk_datastore.name}\] #{vmname}\/#{vmname}_([0-9]+).vmdk/) - disks.push(l) - end - end - end - - disks - end - - def get_base_vm_container_from(connection) - ensure_connected @connection, @credentials - - viewManager = connection.serviceContent.viewManager - viewManager.CreateContainerView( - container: connection.serviceContent.rootFolder, - recursive: true, - type: ['VirtualMachine'] - ) - end - - def get_snapshot_list(tree, snapshotname) - snapshot = nil - - tree.each do |child| - if child.name == snapshotname - snapshot ||= child.snapshot - else - snapshot ||= get_snapshot_list(child.childSnapshotList, snapshotname) - end - end - - snapshot - end - - def migrate_vm_host(vm, host) - relospec = RbVmomi::VIM.VirtualMachineRelocateSpec(host: host) - vm.RelocateVM_Task(spec: relospec).wait_for_completion - end - - def close - @connection.close - end - end -end diff --git a/spec/unit/vsphere_helper_spec.rb b/spec/unit/vsphere_helper_spec.rb deleted file mode 100644 index 18a8915..0000000 --- a/spec/unit/vsphere_helper_spec.rb +++ /dev/null @@ -1,2104 +0,0 @@ -require 'spec_helper' - -RSpec::Matchers.define :relocation_spec_with_host do |value| - match { |actual| actual[:spec].host == value } -end - -RSpec::Matchers.define :create_virtual_disk_with_size do |value| - match { |actual| actual[:spec].capacityKb == value * 1024 * 1024 } -end - -describe 'Vmpooler::VsphereHelper' do - let(:metrics) { Vmpooler::DummyStatsd.new } - let(:config) { YAML.load(<<-EOT ---- -:config: - max_tries: 3 - retry_factor: 10 -:vsphere: - server: "vcenter.domain.local" - username: "vcenter_user" - password: "vcenter_password" - insecure: true -EOT - ) - } - subject { Vmpooler::VsphereHelper.new(config, metrics) } - - let(:credentials) { config[:vsphere] } - - let(:connection_options) {{}} - let(:connection) { mock_RbVmomi_VIM_Connection(connection_options) } - let(:vmname) { 'vm1' } - - describe '#ensure_connected' do - context 'when connection has ok' do - it 'should not attempt to reconnect' do - expect(subject).to receive(:connect_to_vsphere).exactly(0).times - - subject.ensure_connected(connection,credentials) - end - end - - context 'when connection has broken' do - before(:each) do - expect(connection.serviceInstance).to receive(:CurrentTime).and_raise(RuntimeError,'MockConnectionError') - end - - it 'should not increment the connect.open metric' do - # https://github.com/puppetlabs/vmpooler/issues/195 - expect(metrics).to receive(:increment).with('connect.open').exactly(0).times - allow(subject).to receive(:connect_to_vsphere) - - subject.ensure_connected(connection,credentials) - end - - it 'should call connect_to_vsphere to reconnect' do - allow(metrics).to receive(:increment) - allow(subject).to receive(:connect_to_vsphere).with(credentials) - - subject.ensure_connected(connection,credentials) - end - end - end - - describe '#connect_to_vsphere' do - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",nil) - - allow(RbVmomi::VIM).to receive(:connect).and_return(connection) - end - - context 'succesful connection' do - it 'should use the supplied credentials' do - expect(RbVmomi::VIM).to receive(:connect).with({ - :host => credentials['server'], - :user => credentials['username'], - :password => credentials['password'], - :insecure => credentials['insecure'] - }).and_return(connection) - subject.connect_to_vsphere(credentials) - end - - it 'should honor the insecure setting' do - pending('Resolution of issue https://github.com/puppetlabs/vmpooler/issues/207') - config[:vsphere][:insecure] = false - - expect(RbVmomi::VIM).to receive(:connect).with({ - :host => credentials['server'], - :user => credentials['username'], - :password => credentials['password'], - :insecure => false, - }).and_return(connection) - subject.connect_to_vsphere(credentials) - end - - it 'should default to an insecure connection' do - config[:vsphere][:insecure] = nil - - expect(RbVmomi::VIM).to receive(:connect).with({ - :host => credentials['server'], - :user => credentials['username'], - :password => credentials['password'], - :insecure => true - }).and_return(connection) - - subject.connect_to_vsphere(credentials) - end - - it 'should set the instance level connection object' do - # NOTE - Using instance_variable_get is a code smell of code that is not testable - expect(subject.instance_variable_get("@connection")).to be_nil - subject.connect_to_vsphere(credentials) - expect(subject.instance_variable_get("@connection")).to be(connection) - end - - it 'should increment the connect.open counter' do - expect(metrics).to receive(:increment).with('connect.open') - subject.connect_to_vsphere(credentials) - end - end - - context 'connection is initially unsuccessful' do - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",nil) - - # Simulate a failure and then success - expect(RbVmomi::VIM).to receive(:connect).and_raise(RuntimeError,'MockError').ordered - expect(RbVmomi::VIM).to receive(:connect).and_return(connection).ordered - - allow(subject).to receive(:sleep) - end - - it 'should set the instance level connection object' do - # NOTE - Using instance_variable_get is a code smell of code that is not testable - expect(subject.instance_variable_get("@connection")).to be_nil - subject.connect_to_vsphere(credentials) - expect(subject.instance_variable_get("@connection")).to be(connection) - end - - it 'should increment the connect.fail and then connect.open counter' do - expect(metrics).to receive(:increment).with('connect.fail').exactly(1).times - expect(metrics).to receive(:increment).with('connect.open').exactly(1).times - subject.connect_to_vsphere(credentials) - end - end - - context 'connection is always unsuccessful' do - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",nil) - - allow(RbVmomi::VIM).to receive(:connect).and_raise(RuntimeError,'MockError') - allow(subject).to receive(:sleep) - end - - it 'should raise an error' do - expect{subject.connect_to_vsphere(credentials)}.to raise_error(RuntimeError,'MockError') - end - - it 'should retry the connection attempt config.max_tries times' do - pending('Resolution of issue https://github.com/puppetlabs/vmpooler/issues/199') - expect(RbVmomi::VIM).to receive(:connect).exactly(config[:config]['max_tries']).times.and_raise(RuntimeError,'MockError') - - begin - # Swallow any errors - subject.connect_to_vsphere(credentials) - rescue - end - end - - it 'should increment the connect.fail counter config.max_tries times' do - pending('Resolution of issue https://github.com/puppetlabs/vmpooler/issues/199') - expect(metrics).to receive(:increment).with('connect.fail').exactly(config[:config]['max_tries']).times - - begin - # Swallow any errors - subject.connect_to_vsphere(credentials) - rescue - end - end - - [{:max_tries => 5, :retry_factor => 1}, - {:max_tries => 8, :retry_factor => 5}, - ].each do |testcase| - context "Configuration set for max_tries of #{testcase[:max_tries]} and retry_facter of #{testcase[:retry_factor]}" do - it "should sleep #{testcase[:max_tries] - 1} times between attempts with increasing timeout" do - pending('Resolution of issue https://github.com/puppetlabs/vmpooler/issues/199') - config[:config]['max_tries'] = testcase[:max_tries] - config[:config]['retry_factor'] = testcase[:retry_factor] - - (1..testcase[:max_tries] - 1).each do |try| - expect(subject).to receive(:sleep).with(testcase[:retry_factor] * try).ordered - end - - begin - # Swallow any errors - subject.connect_to_vsphere(credentials) - rescue - end - end - end - end - end - end - - describe '#add_disk' do - let(:datastorename) { 'datastore' } - let(:disk_size) { 30 } - let(:collectMultiple_response) { {} } - - let(:vm_scsi_controller) { mock_RbVmomi_VIM_VirtualSCSIController() } - - # Require at least one SCSI Controller - let(:vm_object) { - mock_vm = mock_RbVmomi_VIM_VirtualMachine({ - :name => vmname, - }) - mock_vm.config.hardware.device << vm_scsi_controller - - mock_vm - } - - # Require at least one DC with the requried datastore - let(:connection_options) {{ - :serviceContent => { - :datacenters => [ - { :name => 'MockDC', :datastores => [datastorename] } - ] - } - }} - - let(:create_virtual_disk_task) { mock_RbVmomi_VIM_Task() } - let(:reconfig_vm_task) { mock_RbVmomi_VIM_Task() } - - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - - # NOTE - This method should not be using `_connection`, instead it should be using `@conection` - # This should not be required once https://github.com/puppetlabs/vmpooler/issues/213 is resolved - mock_ds = subject.find_datastore(datastorename) - allow(mock_ds).to receive(:_connection).and_return(connection) unless mock_ds.nil? - - # Mocking for find_vmdks - allow(connection.serviceContent.propertyCollector).to receive(:collectMultiple).and_return(collectMultiple_response) - - # Mocking for creating the disk - allow(connection.serviceContent.virtualDiskManager).to receive(:CreateVirtualDisk_Task).and_return(create_virtual_disk_task) - allow(create_virtual_disk_task).to receive(:wait_for_completion).and_return(true) - - # Mocking for adding disk to the VM - allow(vm_object).to receive(:ReconfigVM_Task).and_return(reconfig_vm_task) - allow(reconfig_vm_task).to receive(:wait_for_completion).and_return(true) - end - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected).at_least(:once) - - subject.add_disk(vm_object,disk_size,datastorename) - end - - context 'Succesfully addding disk' do - it 'should return true' do - expect(subject.add_disk(vm_object,disk_size,datastorename)).to be true - end - - it 'should request a disk of appropriate size' do - expect(connection.serviceContent.virtualDiskManager).to receive(:CreateVirtualDisk_Task) - .with(create_virtual_disk_with_size(disk_size)) - .and_return(create_virtual_disk_task) - - - subject.add_disk(vm_object,disk_size,datastorename) - end - end - - context 'Requested disk size is 0' do - it 'should raise an error' do - expect(subject.add_disk(vm_object,0,datastorename)).to be false - end - end - - context 'No datastores or datastore missing' do - let(:connection_options) {{ - :serviceContent => { - :datacenters => [ - { :name => 'MockDC', :datastores => ['missing_datastore'] } - ] - } - }} - - it 'should return false' do - expect{ subject.add_disk(vm_object,disk_size,datastorename) }.to raise_error(NoMethodError) - end - end - - context 'VM does not have a SCSI Controller' do - let(:vm_object) { - mock_vm = mock_RbVmomi_VIM_VirtualMachine({ - :name => vmname, - }) - - mock_vm - } - - it 'should raise an error' do - expect{ subject.add_disk(vm_object,disk_size,datastorename) }.to raise_error(NoMethodError) - end - end - end - - describe '#find_datastore' do - let(:datastorename) { 'datastore' } - let(:datastore_list) { [] } - - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - end - - context 'No datastores in the datacenter' do - let(:connection_options) {{ - :serviceContent => { - :datacenters => [ - { :name => 'MockDC', :datastores => [] } - ] - } - }} - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - subject.find_datastore(datastorename) - end - - it 'should return nil if the datastore is not found' do - result = subject.find_datastore(datastorename) - expect(result).to be_nil - end - end - - context 'Many datastores in the datacenter' do - let(:connection_options) {{ - :serviceContent => { - :datacenters => [ - { :name => 'MockDC', :datastores => ['ds1','ds2',datastorename,'ds3'] } - ] - } - }} - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - subject.find_datastore(datastorename) - end - - it 'should return nil if the datastore is not found' do - result = subject.find_datastore('missing_datastore') - expect(result).to be_nil - end - - it 'should find the datastore in the datacenter' do - result = subject.find_datastore(datastorename) - - expect(result).to_not be_nil - expect(result.is_a?(RbVmomi::VIM::Datastore)).to be true - expect(result.name).to eq(datastorename) - end - end - end - - describe '#find_device' do - let(:devicename) { 'device1' } - let(:vm_object) { - mock_vm = mock_RbVmomi_VIM_VirtualMachine() - mock_vm.config.hardware.device << mock_RbVmomi_VIM_VirtualMachineDevice({:label => 'device1'}) - mock_vm.config.hardware.device << mock_RbVmomi_VIM_VirtualMachineDevice({:label => 'device2'}) - - mock_vm - } - - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - end - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - subject.find_device(vm_object,devicename) - end - - it 'should return a device if the device name matches' do - result = subject.find_device(vm_object,devicename) - - expect(result.deviceInfo.label).to eq(devicename) - end - - it 'should return nil if the device name does not match' do - result = subject.find_device(vm_object,'missing_device') - - expect(result).to be_nil - end - end - - describe '#find_disk_controller' do - let(:vm_object) { - mock_vm = mock_RbVmomi_VIM_VirtualMachine() - - mock_vm - } - - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - end - - it 'should ensure the connection' do - # TODO There's no reason for this as the connection is not used in this method - expect(subject).to receive(:ensure_connected).at_least(:once) - - result = subject.find_disk_controller(vm_object) - end - - it 'should return nil when there are no devices' do - result = subject.find_disk_controller(vm_object) - - expect(result).to be_nil - end - - [0,1,14].each do |testcase| - it "should return a device for a single VirtualSCSIController with #{testcase} attached disks" do - mock_scsi = mock_RbVmomi_VIM_VirtualSCSIController() - vm_object.config.hardware.device << mock_scsi - vm_object.config.hardware.device << mock_RbVmomi_VIM_VirtualMachineDevice({:label => 'device1'}) - vm_object.config.hardware.device << mock_RbVmomi_VIM_VirtualMachineDevice({:label => 'device2'}) - - # Add the disks - (1..testcase).each do - vm_object.config.hardware.device << mock_RbVmomi_VIM_VirtualDisk({ :controllerKey => mock_scsi.key }) - end - - result = subject.find_disk_controller(vm_object) - - expect(result).to eq(mock_scsi) - end - end - - [15].each do |testcase| - it "should return nil for a single VirtualSCSIController with #{testcase} attached disks" do - mock_scsi = mock_RbVmomi_VIM_VirtualSCSIController() - vm_object.config.hardware.device << mock_scsi - vm_object.config.hardware.device << mock_RbVmomi_VIM_VirtualMachineDevice({:label => 'device1'}) - vm_object.config.hardware.device << mock_RbVmomi_VIM_VirtualMachineDevice({:label => 'device2'}) - - # Add the disks - (1..testcase).each do - vm_object.config.hardware.device << mock_RbVmomi_VIM_VirtualDisk({ :controllerKey => mock_scsi.key }) - end - - result = subject.find_disk_controller(vm_object) - - expect(result).to be_nil - end - end - - it 'should raise if a VirtualDisk is missing a controller' do - # Note - Typically this is not possible as a VirtualDisk requires a controller (SCSI, PVSCSI or IDE) - mock_scsi = mock_RbVmomi_VIM_VirtualDisk() - vm_object.config.hardware.device << mock_scsi - vm_object.config.hardware.device << mock_RbVmomi_VIM_VirtualMachineDevice({:label => 'device1'}) - vm_object.config.hardware.device << mock_RbVmomi_VIM_VirtualMachineDevice({:label => 'device2'}) - - expect{subject.find_disk_controller(vm_object)}.to raise_error(NoMethodError) - end - end - - describe '#find_disk_devices' do - let(:vm_object) { - mock_vm = mock_RbVmomi_VIM_VirtualMachine() - - mock_vm - } - - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - end - - it 'should ensure the connection' do - # TODO There's no reason for this as the connection is not used in this method - expect(subject).to receive(:ensure_connected) - - result = subject.find_disk_devices(vm_object) - end - - it 'should return empty hash when there are no devices' do - result = subject.find_disk_devices(vm_object) - - expect(result).to eq({}) - end - - it 'should return empty hash when there are no VirtualSCSIController or VirtualDisk devices' do - vm_object.config.hardware.device << mock_RbVmomi_VIM_VirtualMachineDevice({:label => 'device1'}) - vm_object.config.hardware.device << mock_RbVmomi_VIM_VirtualMachineDevice({:label => 'device2'}) - - result = subject.find_disk_devices(vm_object) - - expect(result).to eq({}) - end - - it 'should return a device for a VirtualSCSIController device with no children' do - mock_scsi = mock_RbVmomi_VIM_VirtualSCSIController() - vm_object.config.hardware.device << mock_scsi - vm_object.config.hardware.device << mock_RbVmomi_VIM_VirtualMachineDevice({:label => 'device1'}) - - result = subject.find_disk_devices(vm_object) - - expect(result.count).to eq(1) - expect(result[mock_scsi.key]).to_not be_nil - expect(result[mock_scsi.key]['children']).to eq([]) - expect(result[mock_scsi.key]['device']).to eq(mock_scsi) - end - - it 'should return a device for a VirtualDisk device' do - mock_disk = mock_RbVmomi_VIM_VirtualDisk() - vm_object.config.hardware.device << mock_disk - vm_object.config.hardware.device << mock_RbVmomi_VIM_VirtualMachineDevice({:label => 'device1'}) - - result = subject.find_disk_devices(vm_object) - - expect(result.count).to eq(1) - expect(result[mock_disk.controllerKey]).to_not be_nil - expect(result[mock_disk.controllerKey]['children'][0]).to eq(mock_disk) - end - - it 'should return one device for many VirtualDisk devices on the same controller' do - controller1Key = rand(2000) - controller2Key = controller1Key + 1 - mock_disk1 = mock_RbVmomi_VIM_VirtualDisk({:controllerKey => controller1Key}) - mock_disk2 = mock_RbVmomi_VIM_VirtualDisk({:controllerKey => controller1Key}) - mock_disk3 = mock_RbVmomi_VIM_VirtualDisk({:controllerKey => controller2Key}) - - vm_object.config.hardware.device << mock_disk2 - vm_object.config.hardware.device << mock_disk1 - vm_object.config.hardware.device << mock_disk3 - - result = subject.find_disk_devices(vm_object) - - expect(result.count).to eq(2) - - expect(result[controller1Key]).to_not be_nil - expect(result[controller2Key]).to_not be_nil - - expect(result[controller1Key]['children']).to contain_exactly(mock_disk1,mock_disk2) - expect(result[controller2Key]['children']).to contain_exactly(mock_disk3) - end - end - - describe '#find_disk_unit_number' do - let(:vm_object) { - mock_vm = mock_RbVmomi_VIM_VirtualMachine() - - mock_vm - } - let(:controller) { mock_RbVmomi_VIM_VirtualSCSIController() } - - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - end - - it 'should ensure the connection' do - # TODO There's no reason for this as the connection is not used in this method - expect(subject).to receive(:ensure_connected).at_least(:once) - - result = subject.find_disk_unit_number(vm_object,controller) - end - - it 'should return 0 when there are no devices' do - result = subject.find_disk_unit_number(vm_object,controller) - - expect(result).to eq(0) - end - - context 'with a single SCSI Controller' do - before(:each) do - vm_object.config.hardware.device << controller - end - - it 'should return 1 when the host bus controller is at 0' do - controller.scsiCtlrUnitNumber = 0 - - result = subject.find_disk_unit_number(vm_object,controller) - - expect(result).to eq(1) - end - - it 'should return the next lowest id when disks are attached' do - expected_id = 9 - controller.scsiCtlrUnitNumber = 0 - - (1..expected_id-1).each do |disk_id| - mock_disk = mock_RbVmomi_VIM_VirtualDisk({ - :controllerKey => controller.key, - :unitNumber => disk_id, - }) - vm_object.config.hardware.device << mock_disk - end - result = subject.find_disk_unit_number(vm_object,controller) - - expect(result).to eq(expected_id) - end - - it 'should return nil when there are no spare units' do - controller.scsiCtlrUnitNumber = 0 - - (1..15).each do |disk_id| - mock_disk = mock_RbVmomi_VIM_VirtualDisk({ - :controllerKey => controller.key, - :unitNumber => disk_id, - }) - vm_object.config.hardware.device << mock_disk - end - result = subject.find_disk_unit_number(vm_object,controller) - - expect(result).to eq(nil) - end - end - end - - describe '#find_folder' do - let(:foldername) { 'folder'} - let(:missing_foldername) { 'missing_folder'} - - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - allow(connection.serviceInstance).to receive(:find_datacenter).and_return(datacenter_object) - end - - context 'with no folder hierarchy' do - let(:datacenter_object) { mock_RbVmomi_VIM_Datacenter() } - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - subject.find_folder(foldername) - end - - it 'should return nil if the folder is not found' do - expect(subject.find_folder(missing_foldername)).to be_nil - end - end - - context 'with a single layer folder hierarchy' do - let(:datacenter_object) { mock_RbVmomi_VIM_Datacenter({ - :vmfolder_tree => { - 'folder1' => nil, - 'folder2' => nil, - foldername => nil, - 'folder3' => nil, - } - }) } - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - subject.find_folder(foldername) - end - - it 'should return the folder when found' do - result = subject.find_folder(foldername) - expect(result).to_not be_nil - expect(result.name).to eq(foldername) - end - - it 'should return nil if the folder is not found' do - expect(subject.find_folder(missing_foldername)).to be_nil - end - end - - context 'with a VM with the same name as a folder in a single layer folder hierarchy' do - # The folder hierarchy should include a VM with same name as folder, and appear BEFORE the - # folder in the child list. - let(:datacenter_object) { mock_RbVmomi_VIM_Datacenter({ - :vmfolder_tree => { - 'folder1' => nil, - 'vm1' => { :object_type => 'vm', :name => foldername }, - foldername => nil, - 'folder3' => nil, - } - }) } - - it 'should not return a VM' do - pending('https://github.com/puppetlabs/vmpooler/issues/204') - result = subject.find_folder(foldername) - expect(result).to_not be_nil - expect(result.name).to eq(foldername) - expect(result.is_a? RbVmomi::VIM::VirtualMachine).to be false - end - end - - context 'with a multi layer folder hierarchy' do - let(:end_folder_name) { 'folder'} - let(:foldername) { 'folder2/folder4/' + end_folder_name} - let(:datacenter_object) { mock_RbVmomi_VIM_Datacenter({ - :vmfolder_tree => { - 'folder1' => nil, - 'folder2' => { - :children => { - 'folder3' => nil, - 'folder4' => { - :children => { - end_folder_name => nil, - }, - } - }, - }, - 'folder5' => nil, - } - }) } - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - subject.find_folder(foldername) - end - - it 'should return the folder when found' do - result = subject.find_folder(foldername) - expect(result).to_not be_nil - expect(result.name).to eq(end_folder_name) - end - - it 'should return nil if the folder is not found' do - expect(subject.find_folder(missing_foldername)).to be_nil - end - end - - context 'with a VM with the same name as a folder in a multi layer folder hierarchy' do - # The folder hierarchy should include a VM with same name as folder mid-hierarchy (i.e. not at the end level) - # and appear BEFORE the folder in the child list. - let(:end_folder_name) { 'folder'} - let(:foldername) { 'folder2/folder4/' + end_folder_name} - let(:datacenter_object) { mock_RbVmomi_VIM_Datacenter({ - :vmfolder_tree => { - 'folder1' => nil, - 'folder2' => { - :children => { - 'folder3' => nil, - 'vm1' => { :object_type => 'vm', :name => 'folder4' }, - 'folder4' => { - :children => { - end_folder_name => nil, - }, - } - }, - }, - 'folder5' => nil, - } - }) } - - it 'should not return a VM' do - pending('https://github.com/puppetlabs/vmpooler/issues/204') - result = subject.find_folder(foldername) - expect(result).to_not be_nil - expect(result.name).to eq(foldername) - expect(result.is_a? RbVmomi::VIM::VirtualMachine).to be false - end - end - end - - describe '#get_host_utilization' do - let(:cpu_model) { 'vendor line type sku v4 speed' } - let(:model) { 'v4' } - let(:different_model) { 'different_model' } - let(:limit) { 80 } - let(:default_limit) { 90 } - - context "host with a different model" do - let(:host) { mock_RbVmomi_VIM_HostSystem() } - it 'should return nil' do - expect(subject.get_host_utilization(host,different_model,limit)).to be_nil - end - end - - context "host in maintenance mode" do - let(:host) { mock_RbVmomi_VIM_HostSystem({ - :maintenance_mode => true, - }) - } - it 'should return nil' do - host.runtime.inMaintenanceMode = true - - expect(subject.get_host_utilization(host,model,limit)).to be_nil - end - end - - context "host with status of not green" do - let(:host) { mock_RbVmomi_VIM_HostSystem({ - :overall_status => 'purple_alert', - }) - } - it 'should return nil' do - expect(subject.get_host_utilization(host,model,limit)).to be_nil - end - end - - # CPU utilization - context "host which exceeds limit in CPU utilization" do - let(:host) { mock_RbVmomi_VIM_HostSystem({ - :overall_cpu_usage => 100, - :overall_memory_usage => 1, - :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024, - }) - } - it 'should return nil' do - expect(subject.get_host_utilization(host,model,limit)).to be_nil - end - end - - context "host which exceeds default limit in CPU utilization" do - let(:host) { mock_RbVmomi_VIM_HostSystem({ - :overall_cpu_usage => default_limit + 1.0, - :overall_memory_usage => 1, - :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024, - }) - } - it 'should return nil' do - expect(subject.get_host_utilization(host,model)).to be_nil - end - end - - context "host which does not exceed default limit in CPU utilization" do - let(:host) { mock_RbVmomi_VIM_HostSystem({ - :overall_cpu_usage => default_limit, - :overall_memory_usage => 1, - :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024, - }) - } - it 'should not return nil' do - expect(subject.get_host_utilization(host,model)).to_not be_nil - end - end - - # Memory utilization - context "host which exceeds limit in Memory utilization" do - let(:host) { mock_RbVmomi_VIM_HostSystem({ - :overall_cpu_usage => 1, - :overall_memory_usage => 100, - :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024, - }) - } - it 'should return nil' do - # Set the Memory Usage to 100% - expect(subject.get_host_utilization(host,model,limit)).to be_nil - end - end - - context "host which exceeds default limit in Memory utilization" do - let(:host) { mock_RbVmomi_VIM_HostSystem({ - :overall_cpu_usage => 1, - :overall_memory_usage => default_limit + 1.0, - :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024, - }) - } - it 'should return nil' do - expect(subject.get_host_utilization(host,model)).to be_nil - end - end - - context "host which does not exceed default limit in Memory utilization" do - let(:host) { mock_RbVmomi_VIM_HostSystem({ - :overall_cpu_usage => 1, - :overall_memory_usage => default_limit, - :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024, - }) - } - it 'should not return nil' do - expect(subject.get_host_utilization(host,model)).to_not be_nil - end - end - - context "host which does not exceed limits" do - # Set CPU to 10% - # Set Memory to 20% - let(:host) { mock_RbVmomi_VIM_HostSystem({ - :overall_cpu_usage => 10, - :overall_memory_usage => 20, - :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024, - }) - } - it 'should return the sum of CPU and Memory utilization' do - expect(subject.get_host_utilization(host,model,limit)[0]).to eq(10 + 20) - end - - it 'should return the host' do - expect(subject.get_host_utilization(host,model,limit)[1]).to eq(host) - end - end - end - - describe '#host_has_cpu_model?' do - let(:cpu_model) { 'vendor line type sku v4 speed' } - let(:model) { 'v4' } - let(:different_model) { 'different_model' } - let(:host) { mock_RbVmomi_VIM_HostSystem({ - :cpu_model => cpu_model, - }) - } - - it 'should return true if the model matches' do - expect(subject.host_has_cpu_model?(host,model)).to eq(true) - end - - it 'should return false if the model is different' do - expect(subject.host_has_cpu_model?(host,different_model)).to eq(false) - end - end - - describe '#get_host_cpu_arch_version' do - let(:cpu_model) { 'vendor line type sku v4 speed' } - let(:model) { 'v4' } - let(:different_model) { 'different_model' } - let(:host) { mock_RbVmomi_VIM_HostSystem({ - :cpu_model => cpu_model, - :num_cpu => 2, - }) - } - - it 'should return the fifth element in the string delimited by spaces' do - expect(subject.get_host_cpu_arch_version(host)).to eq(model) - end - - it 'should use the description of the first CPU' do - host.hardware.cpuPkg[0].description = 'vendor line type sku v6 speed' - expect(subject.get_host_cpu_arch_version(host)).to eq('v6') - end - end - - describe '#cpu_utilization_for' do - [{ :cpu_usage => 10.0, - :core_speed => 10.0, - :num_cores => 2, - :expected_value => 50.0, - }, - { :cpu_usage => 10.0, - :core_speed => 10.0, - :num_cores => 4, - :expected_value => 25.0, - }, - { :cpu_usage => 14.0, - :core_speed => 12.0, - :num_cores => 5, - :expected_value => 23.0 + 1.0/3.0, - }, - ].each do |testcase| - context "CPU Usage of #{testcase[:cpu_usage]}MHz with #{testcase[:num_cores]} x #{testcase[:core_speed]}MHz cores" do - it "should be #{testcase[:expected_value]}%" do - host = mock_RbVmomi_VIM_HostSystem({ - :num_cores_per_cpu => testcase[:num_cores], - :cpu_speed => testcase[:core_speed], - :overall_cpu_usage => testcase[:cpu_usage], - }) - - expect(subject.cpu_utilization_for(host)).to eq(testcase[:expected_value]) - end - end - end - end - - describe '#memory_utilization_for' do - [{ :memory_usage_gigbytes => 10.0, - :memory_size_bytes => 10.0 * 1024 * 1024, - :expected_value => 100.0, - }, - { :memory_usage_gigbytes => 15.0, - :memory_size_bytes => 25.0 * 1024 * 1024, - :expected_value => 60.0, - }, - { :memory_usage_gigbytes => 9.0, - :memory_size_bytes => 31.0 * 1024 * 1024, - :expected_value => 29.03225806451613, - }, - ].each do |testcase| - context "Memory Usage of #{testcase[:memory_usage_gigbytes]}GBytes with #{testcase[:memory_size_bytes]}Bytes of total memory" do - it "should be #{testcase[:expected_value]}%" do - host = mock_RbVmomi_VIM_HostSystem({ - :memory_size => testcase[:memory_size_bytes], - :overall_memory_usage => testcase[:memory_usage_gigbytes], - }) - - expect(subject.memory_utilization_for(host)).to eq(testcase[:expected_value]) - end - end - end - end - - describe '#find_least_used_host' do - let(:cluster_name) { 'cluster' } - let(:missing_cluster_name) { 'missing_cluster' } - let(:datacenter_object) { mock_RbVmomi_VIM_Datacenter() } - - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - - # This mocking is a little fragile but hard to do without a real vCenter instance - allow(connection.serviceInstance).to receive(:find_datacenter).and_return(datacenter_object) - datacenter_object.hostFolder.childEntity = [cluster_object] - end - - context 'missing cluster' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({ - :name => cluster_name, - :hosts => [{ - :name => cluster_name, - }]})} - let(:expected_host) { cluster_object.host[0] } - - it 'should raise an error' do - expect{subject.find_least_used_host(missing_cluster_name)}.to raise_error(NoMethodError,/undefined method/) - end - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - expect{subject.find_least_used_host(missing_cluster_name)}.to raise_error(NoMethodError) - end - end - - context 'standalone host within limits' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({ - :name => cluster_name, - :hosts => [{ - :name => cluster_name, - }]})} - let(:expected_host) { cluster_object.host[0] } - - it 'should return the standalone host' do - result = subject.find_least_used_host(cluster_name) - - expect(result).to be(expected_host) - end - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - result = subject.find_least_used_host(cluster_name) - end - end - - context 'standalone host outside the limits' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({ - :name => cluster_name, - :hosts => [{ - :name => cluster_name, - :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024, - }]})} - let(:expected_host) { cluster_object.host[0] } - - it 'should raise an error' do - expect{subject.find_least_used_host(missing_cluster_name)}.to raise_error(NoMethodError,/undefined method/) - end - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - expect{subject.find_least_used_host(missing_cluster_name)}.to raise_error(NoMethodError) - end - end - - context 'cluster of 3 hosts within limits' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({ - :name => cluster_name, - :hosts => [ - { :overall_cpu_usage => 11, :overall_memory_usage => 11, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 1, :overall_memory_usage => 1, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 21, :overall_memory_usage => 21, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - ]}) } - let(:expected_host) { cluster_object.host[1] } - - it 'should return the standalone host' do - result = subject.find_least_used_host(cluster_name) - - expect(result).to be(expected_host) - end - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - result = subject.find_least_used_host(cluster_name) - end - end - - context 'cluster of 3 hosts all outside of the limits' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({ - :name => cluster_name, - :hosts => [ - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - ]}) } - let(:expected_host) { cluster_object.host[1] } - - it 'should raise an error' do - expect{subject.find_least_used_host(missing_cluster_name)}.to raise_error(NoMethodError,/undefined method/) - end - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - expect{subject.find_least_used_host(missing_cluster_name)}.to raise_error(NoMethodError) - end - end - - context 'cluster of 5 hosts of which one is out of limits and one has wrong CPU type' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({ - :name => cluster_name, - :hosts => [ - { :overall_cpu_usage => 31, :overall_memory_usage => 31, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :cpu_model => 'different cpu model', :overall_cpu_usage => 1, :overall_memory_usage => 1, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 11, :overall_memory_usage => 11, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 21, :overall_memory_usage => 21, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - ]}) } - let(:expected_host) { cluster_object.host[1] } - - it 'should return the standalone host' do - result = subject.find_least_used_host(cluster_name) - - expect(result).to be(expected_host) - end - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - result = subject.find_least_used_host(cluster_name) - end - end - - context 'cluster of 3 hosts all outside of the limits' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({ - :name => cluster_name, - :hosts => [ - { :overall_cpu_usage => 10, :overall_memory_usage => 10, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 10, :overall_memory_usage => 10, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 10, :overall_memory_usage => 10, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - ]}) } - let(:expected_host) { cluster_object.host[1] } - - it 'should return a host' do - pending('https://github.com/puppetlabs/vmpooler/issues/206') - result = subject.find_least_used_host(missing_cluster_name) - expect(result).to_not be_nil - end - - it 'should ensure the connection' do - pending('https://github.com/puppetlabs/vmpooler/issues/206') - expect(subject).to receive(:ensure_connected) - - result = subject.find_least_used_host(cluster_name) - end - end - end - - describe '#find_cluster' do - let(:cluster) {'cluster'} - let(:missing_cluster) {'missing_cluster'} - - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - allow(connection.serviceInstance).to receive(:find_datacenter).and_return(datacenter_object) - end - - context 'no clusters in the datacenter' do - let(:datacenter_object) { mock_RbVmomi_VIM_Datacenter() } - - before(:each) do - end - - it 'should return nil if the cluster is not found' do - expect(subject.find_cluster(missing_cluster)).to be_nil - end - end - - context 'with a single layer folder hierarchy' do - let(:datacenter_object) { mock_RbVmomi_VIM_Datacenter({ - :hostfolder_tree => { - 'cluster1' => {:object_type => 'compute_resource'}, - 'cluster2' => {:object_type => 'compute_resource'}, - cluster => {:object_type => 'compute_resource'}, - 'cluster3' => {:object_type => 'compute_resource'}, - } - }) } - - it 'should return the cluster when found' do - result = subject.find_cluster(cluster) - - expect(result).to_not be_nil - expect(result.name).to eq(cluster) - end - - it 'should return nil if the cluster is not found' do - expect(subject.find_cluster(missing_cluster)).to be_nil - end - end - - context 'with a multi layer folder hierarchy' do - let(:datacenter_object) { mock_RbVmomi_VIM_Datacenter({ - :hostfolder_tree => { - 'cluster1' => {:object_type => 'compute_resource'}, - 'folder2' => { - :children => { - cluster => {:object_type => 'compute_resource'}, - } - }, - 'cluster3' => {:object_type => 'compute_resource'}, - } - }) } - - it 'should return the cluster when found' do - pending('https://github.com/puppetlabs/vmpooler/issues/205') - result = subject.find_cluster(cluster) - - expect(result).to_not be_nil - expect(result.name).to eq(cluster) - end - - it 'should return nil if the cluster is not found' do - expect(subject.find_cluster(missing_cluster)).to be_nil - end - end - end - - describe '#get_cluster_host_utilization' do - context 'standalone host within limits' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({:hosts => [{}]}) } - - it 'should return array with one element' do - result = subject.get_cluster_host_utilization(cluster_object) - expect(result).to_not be_nil - expect(result.count).to eq(1) - end - end - - context 'standalone host which is out the limits' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({:hosts => [ - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - ]}) } - - it 'should return array with 0 elements' do - result = subject.get_cluster_host_utilization(cluster_object) - expect(result).to_not be_nil - expect(result.count).to eq(0) - end - end - - context 'cluster with 3 hosts within limits' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({:hosts => [ - { :overall_cpu_usage => 1, :overall_memory_usage => 1, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 11, :overall_memory_usage => 11, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 21, :overall_memory_usage => 21, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - ]}) } - - it 'should return array with 3 elements' do - result = subject.get_cluster_host_utilization(cluster_object) - expect(result).to_not be_nil - expect(result.count).to eq(3) - end - end - - context 'cluster with 5 hosts of which 3 within limits' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({:hosts => [ - { :overall_cpu_usage => 1, :overall_memory_usage => 1, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 11, :overall_memory_usage => 11, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 21, :overall_memory_usage => 21, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - ]}) } - - it 'should return array with 3 elements' do - result = subject.get_cluster_host_utilization(cluster_object) - expect(result).to_not be_nil - expect(result.count).to eq(3) - end - end - - context 'cluster with 3 hosts of which none are within the limits' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({:hosts => [ - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - ]}) } - - it 'should return array with 0 elements' do - result = subject.get_cluster_host_utilization(cluster_object) - expect(result).to_not be_nil - expect(result.count).to eq(0) - end - end - end - - describe '#find_least_used_compatible_host' do - let(:vm) { mock_RbVmomi_VIM_VirtualMachine() } - - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - end - - context 'standalone host within limits' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({:hosts => [{}]}) } - let(:standalone_host) { cluster_object.host[0] } - - before(:each) do - # This mocking is a little fragile but hard to do without a real vCenter instance - vm.summary.runtime.host = standalone_host - end - - it 'should return the standalone host' do - result = subject.find_least_used_compatible_host(vm) - - expect(result).to_not be_nil - expect(result[0]).to be(standalone_host) - expect(result[1]).to eq(standalone_host.name) - end - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - result = subject.find_least_used_compatible_host(vm) - end - end - - context 'standalone host outside of limits' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({:hosts => [ - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - ]}) } - let(:standalone_host) { cluster_object.host[0] } - - before(:each) do - # This mocking is a little fragile but hard to do without a real vCenter instance - vm.summary.runtime.host = standalone_host - end - - it 'should raise error' do - expect{subject.find_least_used_compatible_host(vm)}.to raise_error(NoMethodError,/undefined method/) - end - end - - context 'cluster of 3 hosts within limits' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({:hosts => [ - { :overall_cpu_usage => 11, :overall_memory_usage => 11, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 1, :overall_memory_usage => 1, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 21, :overall_memory_usage => 21, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - ]}) } - let(:expected_host) { cluster_object.host[1] } - - before(:each) do - # This mocking is a little fragile but hard to do without a real vCenter instance - vm.summary.runtime.host = expected_host - end - - it 'should return the least used host' do - result = subject.find_least_used_compatible_host(vm) - - expect(result).to_not be_nil - expect(result[0]).to be(expected_host) - expect(result[1]).to eq(expected_host.name) - end - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - result = subject.find_least_used_compatible_host(vm) - end - end - - context 'cluster of 3 hosts all outside of the limits' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({:hosts => [ - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - ]}) } - let(:expected_host) { cluster_object.host[1] } - - before(:each) do - # This mocking is a little fragile but hard to do without a real vCenter instance - vm.summary.runtime.host = expected_host - end - - it 'should raise error' do - expect{subject.find_least_used_compatible_host(vm)}.to raise_error(NoMethodError,/undefined method/) - end - end - - context 'cluster of 5 hosts of which one is out of limits and one has wrong CPU type' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({:hosts => [ - { :overall_cpu_usage => 31, :overall_memory_usage => 31, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :cpu_model => 'different cpu model', :overall_cpu_usage => 1, :overall_memory_usage => 1, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 11, :overall_memory_usage => 11, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 100, :overall_memory_usage => 100, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 21, :overall_memory_usage => 21, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - ]}) } - let(:expected_host) { cluster_object.host[2] } - - before(:each) do - # This mocking is a little fragile but hard to do without a real vCenter instance - vm.summary.runtime.host = expected_host - end - - it 'should return the least used host' do - result = subject.find_least_used_compatible_host(vm) - - expect(result).to_not be_nil - expect(result[0]).to be(expected_host) - expect(result[1]).to eq(expected_host.name) - end - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - result = subject.find_least_used_compatible_host(vm) - end - end - - context 'cluster of 3 hosts all with the same utilisation' do - let(:cluster_object) { mock_RbVmomi_VIM_ComputeResource({:hosts => [ - { :overall_cpu_usage => 10, :overall_memory_usage => 10, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 10, :overall_memory_usage => 10, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - { :overall_cpu_usage => 10, :overall_memory_usage => 10, :cpu_speed => 100, :num_cores_per_cpu => 1, :num_cpu => 1, :memory_size => 100.0 * 1024 * 1024 }, - ]}) } - let(:expected_host) { cluster_object.host[1] } - - before(:each) do - # This mocking is a little fragile but hard to do without a real vCenter instance - vm.summary.runtime.host = expected_host - end - - it 'should return a host' do - pending('https://github.com/puppetlabs/vmpooler/issues/206 is fixed') - result = subject.find_least_used_compatible_host(vm) - - expect(result).to_not be_nil - end - - it 'should ensure the connection' do - pending('https://github.com/puppetlabs/vmpooler/issues/206 is fixed') - expect(subject).to receive(:ensure_connected) - - result = subject.find_least_used_compatible_host(vm) - end - end - end - - describe '#find_pool' do - let(:poolname) { 'pool'} - let(:missing_poolname) { 'missing_pool'} - - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - allow(connection.serviceInstance).to receive(:find_datacenter).and_return(datacenter_object) - end - - context 'with empty folder hierarchy' do - let(:datacenter_object) { mock_RbVmomi_VIM_Datacenter() } - - it 'should ensure the connection' do - pending('https://github.com/puppetlabs/vmpooler/issues/209') - expect(subject).to receive(:ensure_connected) - - subject.find_pool(poolname) - end - - it 'should return nil if the pool is not found' do - pending('https://github.com/puppetlabs/vmpooler/issues/209') - expect(subject.find_pool(missing_poolname)).to be_nil - end - end - - [ - # Single layer Host folder hierarchy - { - :context => 'single layer folder hierarchy with a resource pool', - :poolpath => 'pool', - :poolname => 'pool', - :hostfolder_tree => { - 'folder1' => nil, - 'folder2' => nil, - 'pool' => {:object_type => 'resource_pool'}, - 'folder3' => nil, - }, - }, - { - :context => 'single layer folder hierarchy with a child resource pool', - :poolpath => 'parentpool/pool', - :poolname => 'pool', - :hostfolder_tree => { - 'folder1' => nil, - 'folder2' => nil, - 'parentpool' => {:object_type => 'resource_pool', :children => { - 'pool' => {:object_type => 'resource_pool'}, - }}, - 'folder3' => nil, - }, - }, - { - :context => 'single layer folder hierarchy with a resource pool within a cluster', - :poolpath => 'cluster/pool', - :poolname => 'pool', - :hostfolder_tree => { - 'folder1' => nil, - 'folder2' => nil, - 'cluster' => {:object_type => 'cluster_compute_resource', :children => { - 'pool' => {:object_type => 'resource_pool'}, - }}, - 'folder3' => nil, - }, - }, - # Multi layer Host folder hierarchy - { - :context => 'multi layer folder hierarchy with a resource pool', - :poolpath => 'folder2/folder4/pool', - :poolname => 'pool', - :hostfolder_tree => { - 'folder1' => nil, - 'folder2' => { :children => { - 'folder3' => nil, - 'folder4' => { :children => { - 'pool' => {:object_type => 'resource_pool'}, - }}, - }}, - 'folder5' => nil, - }, - }, - { - :context => 'multi layer folder hierarchy with a child resource pool', - :poolpath => 'folder2/folder4/parentpool/pool', - :poolname => 'pool', - :hostfolder_tree => { - 'folder1' => nil, - 'folder2' => { :children => { - 'folder3' => nil, - 'folder4' => { :children => { - 'parentpool' => {:object_type => 'resource_pool', :children => { - 'pool' => {:object_type => 'resource_pool'}, - }}, - }}, - }}, - 'folder5' => nil, - }, - }, - { - :context => 'multi layer folder hierarchy with a resource pool within a cluster', - :poolpath => 'folder2/folder4/cluster/pool', - :poolname => 'pool', - :hostfolder_tree => { - 'folder1' => nil, - 'folder2' => { :children => { - 'folder3' => nil, - 'folder4' => { :children => { - 'cluster' => {:object_type => 'cluster_compute_resource', :children => { - 'pool' => {:object_type => 'resource_pool'}, - }}, - }}, - }}, - 'folder5' => nil, - }, - }, - ].each do |testcase| - context testcase[:context] do - let(:datacenter_object) { mock_RbVmomi_VIM_Datacenter({ :hostfolder_tree => testcase[:hostfolder_tree]}) } - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - subject.find_pool(testcase[:poolpath]) - end - - it 'should return the pool when found' do - result = subject.find_pool(testcase[:poolpath]) - - expect(result).to_not be_nil - expect(result.name).to eq(testcase[:poolname]) - expect(result.is_a?(RbVmomi::VIM::ResourcePool)).to be true - end - - it 'should return nil if the poolname is not found' do - pending('https://github.com/puppetlabs/vmpooler/issues/209') - expect(subject.find_pool(missing_poolname)).to be_nil - end - end - end - - # Tests for issue https://github.com/puppetlabs/vmpooler/issues/210 - [ - { - :context => 'multi layer folder hierarchy with a resource pool the same name as a folder', - :poolpath => 'folder2/folder4/cluster/pool', - :poolname => 'pool', - :hostfolder_tree => { - 'folder1' => nil, - 'folder2' => { :children => { - 'folder3' => nil, - 'bad_pool' => {:object_type => 'resource_pool', :name => 'folder4'}, - 'folder4' => { :children => { - 'cluster' => {:object_type => 'cluster_compute_resource', :children => { - 'pool' => {:object_type => 'resource_pool'}, - }}, - }}, - }}, - 'folder5' => nil, - }, - }, - { - :context => 'multi layer folder hierarchy with a cluster the same name as a folder', - :poolpath => 'folder2/folder4/cluster/pool', - :poolname => 'pool', - :hostfolder_tree => { - 'folder1' => nil, - 'folder2' => { :children => { - 'folder3' => nil, - 'bad_cluster' => {:object_type => 'cluster_compute_resource', :name => 'folder4'}, - 'folder4' => { :children => { - 'cluster' => {:object_type => 'cluster_compute_resource', :children => { - 'pool' => {:object_type => 'resource_pool'}, - }}, - }}, - }}, - 'folder5' => nil, - }, - }, - ].each do |testcase| - context testcase[:context] do - let(:datacenter_object) { mock_RbVmomi_VIM_Datacenter({ :hostfolder_tree => testcase[:hostfolder_tree]}) } - - it 'should ensure the connection' do - pending('https://github.com/puppetlabs/vmpooler/issues/210') - expect(subject).to receive(:ensure_connected) - - subject.find_pool(testcase[:poolpath]) - end - - it 'should return the pool when found' do - pending('https://github.com/puppetlabs/vmpooler/issues/210') - result = subject.find_pool(testcase[:poolpath]) - - expect(result).to_not be_nil - expect(result.name).to eq(testcase[:poolname]) - expect(result.is_a?(RbVmomi::VIM::ResourcePool)).to be true - end - end - end - end - - describe '#find_snapshot' do - let(:snapshot_name) {'snapshot'} - let(:missing_snapshot_name) {'missing_snapshot'} - let(:vm) { mock_RbVmomi_VIM_VirtualMachine(mock_options) } - let(:snapshot_object) { mock_RbVmomi_VIM_VirtualMachine() } - - context 'VM with no snapshots' do - let(:mock_options) {{ :snapshot_tree => nil }} - it 'should return nil' do - expect(subject.find_snapshot(vm,snapshot_name)).to be_nil - end - end - - context 'VM with a single layer of snapshots' do - let(:mock_options) {{ - :snapshot_tree => { - 'snapshot1' => nil, - 'snapshot2' => nil, - 'snapshot3' => nil, - 'snapshot4' => nil, - snapshot_name => { :ref => snapshot_object}, - } - }} - - it 'should return snapshot which matches the name' do - result = subject.find_snapshot(vm,snapshot_name) - expect(result).to be(snapshot_object) - end - - it 'should return nil which no matches are found' do - result = subject.find_snapshot(vm,missing_snapshot_name) - expect(result).to be_nil - end - end - - context 'VM with a nested layers of snapshots' do - let(:mock_options) {{ - :snapshot_tree => { - 'snapshot1' => nil, - 'snapshot2' => nil, - 'snapshot3' => { :children => { - 'snapshot4' => nil, - 'snapshot5' => { :children => { - snapshot_name => { :ref => snapshot_object}, - }}, - }}, - 'snapshot6' => nil, - } - }} - - it 'should return snapshot which matches the name' do - result = subject.find_snapshot(vm,snapshot_name) - expect(result).to be(snapshot_object) - end - - it 'should return nil which no matches are found' do - result = subject.find_snapshot(vm,missing_snapshot_name) - expect(result).to be_nil - end - end - end - - describe '#find_vm' do - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - - allow(subject).to receive(:find_vm_light).and_return('vmlight') - allow(subject).to receive(:find_vm_heavy).and_return( { vmname => 'vmheavy' }) - end - - it 'should ensure the connection' do - # TODO This seems like overkill as we immediately call vm_light and heavy which - # does the same thing. Also the connection isn't actually used in this method - expect(subject).to receive(:ensure_connected) - - subject.find_vm(vmname) - end - - it 'should call find_vm_light' do - expect(subject).to receive(:find_vm_light).and_return('vmlight') - - expect(subject.find_vm(vmname)).to eq('vmlight') - end - - it 'should not call find_vm_heavy if find_vm_light finds the VM' do - expect(subject).to receive(:find_vm_light).and_return('vmlight') - expect(subject).to receive(:find_vm_heavy).exactly(0).times - - expect(subject.find_vm(vmname)).to eq('vmlight') - end - - it 'should call find_vm_heavy when find_vm_light returns nil' do - expect(subject).to receive(:find_vm_light).and_return(nil) - expect(subject).to receive(:find_vm_heavy).and_return( { vmname => 'vmheavy' }) - - expect(subject.find_vm(vmname)).to eq('vmheavy') - end - end - - describe '#find_vm_light' do - let(:missing_vm) { 'missing_vm' } - - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - - allow(connection.searchIndex).to receive(:FindByDnsName).and_return(nil) - end - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected) - - subject.find_vm_light(vmname) - end - - it 'should call FindByDnsName with the correct parameters' do - expect(connection.searchIndex).to receive(:FindByDnsName).with({ - :vmSearch => true, - dnsName: vmname, - }) - - subject.find_vm_light(vmname) - end - - it 'should return the VM object when found' do - vm_object = mock_RbVmomi_VIM_VirtualMachine() - expect(connection.searchIndex).to receive(:FindByDnsName).with({ - :vmSearch => true, - dnsName: vmname, - }).and_return(vm_object) - - expect(subject.find_vm_light(vmname)).to be(vm_object) - end - - it 'should return nil if the VM is not found' do - expect(connection.searchIndex).to receive(:FindByDnsName).with({ - :vmSearch => true, - dnsName: missing_vm, - }).and_return(nil) - - expect(subject.find_vm_light(missing_vm)).to be_nil - end - end - - describe '#find_vm_heavy' do - let(:missing_vm) { 'missing_vm' } - # Return an empty result by default - let(:retrieve_result) {{}} - - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - - allow(connection.propertyCollector).to receive(:RetrievePropertiesEx).and_return(mock_RbVmomi_VIM_RetrieveResult(retrieve_result)) - end - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected).at_least(:once) - - subject.find_vm_heavy(vmname) - end - - context 'Search result is empty' do - it 'should return empty hash' do - expect(subject.find_vm_heavy(vmname)).to eq({}) - end - end - - context 'Search result contains VMs but no matches' do - let(:retrieve_result) { - { :response => [ - { 'name' => 'no_match001'}, - { 'name' => 'no_match002'}, - { 'name' => 'no_match003'}, - { 'name' => 'no_match004'}, - ] - } - } - - it 'should return empty hash' do - expect(subject.find_vm_heavy(vmname)).to eq({}) - end - end - - context 'Search contains a single match' do - let(:vm_object) { mock_RbVmomi_VIM_VirtualMachine({ :name => vmname })} - let(:retrieve_result) { - { :response => [ - { 'name' => 'no_match001'}, - { 'name' => 'no_match002'}, - { 'name' => vmname, :object => vm_object }, - { 'name' => 'no_match003'}, - { 'name' => 'no_match004'}, - ] - } - } - - it 'should return single result' do - result = subject.find_vm_heavy(vmname) - expect(result.keys.count).to eq(1) - end - - it 'should return the matching VM Object' do - result = subject.find_vm_heavy(vmname) - expect(result[vmname]).to be(vm_object) - end - end - - context 'Search contains a two matches' do - let(:vm_object1) { mock_RbVmomi_VIM_VirtualMachine({ :name => vmname })} - let(:vm_object2) { mock_RbVmomi_VIM_VirtualMachine({ :name => vmname })} - let(:retrieve_result) { - { :response => [ - { 'name' => 'no_match001'}, - { 'name' => 'no_match002'}, - { 'name' => vmname, :object => vm_object1 }, - { 'name' => 'no_match003'}, - { 'name' => 'no_match004'}, - { 'name' => vmname, :object => vm_object2 }, - ] - } - } - - it 'should return one result' do - result = subject.find_vm_heavy(vmname) - expect(result.keys.count).to eq(1) - end - - it 'should return the last matching VM Object' do - result = subject.find_vm_heavy(vmname) - expect(result[vmname]).to be(vm_object2) - end - end - end - - describe '#find_vmdks' do - let(:datastorename) { 'datastore' } - let(:connection_options) {{ - :serviceContent => { - :datacenters => [ - { :name => 'MockDC', :datastores => [datastorename] } - ] - } - }} - - let(:collectMultiple_response) { {} } - - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - - # NOTE - This method should not be using `_connection`, instead it should be using `@conection` - mock_ds = subject.find_datastore(datastorename) - allow(mock_ds).to receive(:_connection).and_return(connection) - allow(connection.serviceContent.propertyCollector).to receive(:collectMultiple).and_return(collectMultiple_response) - end - - it 'should not use _connction to get the underlying connection object' do - pending('https://github.com/puppetlabs/vmpooler/issues/213') - - mock_ds = subject.find_datastore(datastorename) - expect(mock_ds).to receive(:_connection).exactly(0).times - - begin - # ignore all errors. What's important is that it doesn't call _connection - subject.find_vmdks(vmname,datastorename) - rescue - end - end - - it 'should ensure the connection' do - expect(subject).to receive(:ensure_connected).at_least(:once) - - subject.find_vmdks(vmname,datastorename) - end - - context 'Searching all files for all VMs on a Datastore' do - # This is fairly fragile mocking - let(:collectMultiple_response) { { - 'FakeVMObject1' => { 'layoutEx.file' => - [ - mock_RbVmomi_VIM_VirtualMachineFileLayoutExFileInfo({ :key => 101, :name => "[#{datastorename}] mock1/mock1_0.vmdk"}) - ]}, - vmname => { 'layoutEx.file' => - [ - # VMDKs which should match - mock_RbVmomi_VIM_VirtualMachineFileLayoutExFileInfo({ :key => 1, :name => "[#{datastorename}] #{vmname}/#{vmname}_0.vmdk"}), - mock_RbVmomi_VIM_VirtualMachineFileLayoutExFileInfo({ :key => 2, :name => "[#{datastorename}] #{vmname}/#{vmname}_1.vmdk"}), - # VMDKs which should not match - mock_RbVmomi_VIM_VirtualMachineFileLayoutExFileInfo({ :key => 102, :name => "[otherdatastore] #{vmname}/#{vmname}_0.vmdk"}), - mock_RbVmomi_VIM_VirtualMachineFileLayoutExFileInfo({ :key => 103, :name => "[otherdatastore] #{vmname}/#{vmname}.vmdk"}), - mock_RbVmomi_VIM_VirtualMachineFileLayoutExFileInfo({ :key => 104, :name => "[otherdatastore] #{vmname}/#{vmname}_abc.vmdk"}), - ]}, - } } - - it 'should return empty array if no VMDKs match the VM name' do - expect(subject.find_vmdks('missing_vm_name',datastorename)).to eq([]) - end - - it 'should return matching VMDKs for the VM' do - result = subject.find_vmdks(vmname,datastorename) - expect(result).to_not be_nil - expect(result.count).to eq(2) - # The keys for each VMDK should be less that 100 as per the mocks - result.each do |fileinfo| - expect(fileinfo.key).to be < 100 - end - end - end - end - - describe '#get_base_vm_container_from' do - let(:local_connection) { mock_RbVmomi_VIM_Connection() } - - before(:each) do - allow(subject).to receive(:ensure_connected) - end - - it 'should ensure the connection' do - pending('https://github.com/puppetlabs/vmpooler/issues/212') - expect(subject).to receive(:ensure_connected).with(local_connection,credentials) - - subject.get_base_vm_container_from(local_connection) - end - - it 'should return a recursive view of type VirtualMachine' do - result = subject.get_base_vm_container_from(local_connection) - - expect(result.recursive).to be true - expect(result.type).to eq(['VirtualMachine']) - end - end - - describe '#get_snapshot_list' do - let(:snapshot_name) {'snapshot'} - let(:snapshot_tree) { mock_RbVmomi_VIM_VirtualMachine(mock_options).snapshot.rootSnapshotList } - let(:snapshot_object) { mock_RbVmomi_VIM_VirtualMachine() } - - it 'should raise if the snapshot tree is nil' do - expect{ subject.get_snapshot_list(nil,snapshot_name)}.to raise_error(NoMethodError) - end - - context 'VM with a single layer of snapshots' do - let(:mock_options) {{ - :snapshot_tree => { - 'snapshot1' => nil, - 'snapshot2' => nil, - 'snapshot3' => nil, - 'snapshot4' => nil, - snapshot_name => { :ref => snapshot_object}, - } - }} - - it 'should return snapshot which matches the name' do - result = subject.get_snapshot_list(snapshot_tree,snapshot_name) - expect(result).to be(snapshot_object) - end - end - - context 'VM with a nested layers of snapshots' do - let(:mock_options) {{ - :snapshot_tree => { - 'snapshot1' => nil, - 'snapshot2' => nil, - 'snapshot3' => { :children => { - 'snapshot4' => nil, - 'snapshot5' => { :children => { - snapshot_name => { :ref => snapshot_object}, - }}, - }}, - 'snapshot6' => nil, - } - }} - - it 'should return snapshot which matches the name' do - result = subject.get_snapshot_list(snapshot_tree,snapshot_name) - expect(result).to be(snapshot_object) - end - end - end - - describe '#migrate_vm_host' do - let(:vm_object) { mock_RbVmomi_VIM_VirtualMachine({ :name => vmname })} - let(:host_object) { mock_RbVmomi_VIM_HostSystem({ :name => 'HOST' })} - let(:relocate_task) { mock_RbVmomi_VIM_Task() } - - before(:each) do - allow(vm_object).to receive(:RelocateVM_Task).and_return(relocate_task) - allow(relocate_task).to receive(:wait_for_completion) - end - - it 'should call RelovateVM_Task' do - expect(vm_object).to receive(:RelocateVM_Task).and_return(relocate_task) - - subject.migrate_vm_host(vm_object,host_object) - end - - it 'should use a Relocation Spec object with correct host' do - expect(vm_object).to receive(:RelocateVM_Task).with(relocation_spec_with_host(host_object)) - - subject.migrate_vm_host(vm_object,host_object) - end - - it 'should wait for the relocation to complete' do - expect(relocate_task).to receive(:wait_for_completion) - - subject.migrate_vm_host(vm_object,host_object) - end - - it 'should return the result of the relocation' do - expect(relocate_task).to receive(:wait_for_completion).and_return('RELOCATE_RESULT') - - expect(subject.migrate_vm_host(vm_object,host_object)).to eq('RELOCATE_RESULT') - end - end - - describe '#close' do - context 'no connection has been made' do - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",nil) - end - - it 'should not error' do - pending('https://github.com/puppetlabs/vmpooler/issues/211') - subject.close - end - end - - context 'on an open connection' do - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - end - - it 'should close the underlying connection object' do - expect(connection).to receive(:close) - subject.close - end - end - end -end From 888ffc4afc1ff6e81c90cc40e89ff7c050bc967e Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Wed, 5 Apr 2017 10:54:58 -0700 Subject: [PATCH 29/32] (POOLER-52) Add a generic connection pool Previously VMPooler had no concept of a connection pooler. While there is an up to date connection pooler Gem (connection_pool), that supports MRI and jRuby, it lacked metrics which are useful to diagnose errors and judge pool size. This commit: - Brings in the connection_pool gem - Creates a new class called generic_connection_pool which inherits from the ConnectionPool class in the connection_pool gem. - Extends the connection pool object with a new function called `with_metrics` This copies the code from the original `with` method but emits metrics for how long it took to get an object from the pool, and then how many objects are left in the pool. This is sent using VMPooler's metrics object. Extending the object was used instead of overriding as it was not possible to inject into the existing function and monkey patching did not seem the correct way. In order use the metics, the GenericConnectionPool object modifies the initialize method to use :metrics and :metrics_prefix options - Also added tests for the GenericConnectionPool class to ensure the new functions are tested. Note that the functionality that was not extended is not tested in VMPooler. --- Gemfile | 1 + lib/vmpooler.rb | 2 +- lib/vmpooler/generic_connection_pool.rb | 53 +++++++++++++ spec/unit/generic_connection_pool_spec.rb | 91 +++++++++++++++++++++++ 4 files changed, 146 insertions(+), 1 deletion(-) create mode 100644 lib/vmpooler/generic_connection_pool.rb create mode 100644 spec/unit/generic_connection_pool_spec.rb diff --git a/Gemfile b/Gemfile index 550b983..26f420f 100644 --- a/Gemfile +++ b/Gemfile @@ -16,6 +16,7 @@ gem 'redis', '>= 3.2' gem 'sinatra', '>= 1.4' gem 'net-ldap', '<= 0.12.1' # keep compatibility w/ jruby & mri-1.9.3 gem 'statsd-ruby', '>= 1.3.0', :require => 'statsd' +gem 'connection_pool', '>= 2.2.1' # Test deps group :test do diff --git a/lib/vmpooler.rb b/lib/vmpooler.rb index 590f26f..919ddf9 100644 --- a/lib/vmpooler.rb +++ b/lib/vmpooler.rb @@ -12,7 +12,7 @@ module Vmpooler require 'yaml' require 'set' - %w[api graphite logger pool_manager statsd dummy_statsd providers].each do |lib| + %w[api graphite logger pool_manager statsd dummy_statsd generic_connection_pool providers].each do |lib| begin require "vmpooler/#{lib}" rescue LoadError diff --git a/lib/vmpooler/generic_connection_pool.rb b/lib/vmpooler/generic_connection_pool.rb new file mode 100644 index 0000000..ca18576 --- /dev/null +++ b/lib/vmpooler/generic_connection_pool.rb @@ -0,0 +1,53 @@ +require 'connection_pool' + +module Vmpooler + class PoolManager + class GenericConnectionPool < ConnectionPool + # Extend the ConnectionPool class with instrumentation + # https://github.com/mperham/connection_pool/blob/master/lib/connection_pool.rb + + def initialize(options = {}, &block) + super(options, &block) + @metrics = options[:metrics] + @metric_prefix = options[:metric_prefix] + @metric_prefix = 'connectionpool' if @metric_prefix.nil? || @metric_prefix == '' + end + + if Thread.respond_to?(:handle_interrupt) + # MRI + def with_metrics(options = {}) + Thread.handle_interrupt(Exception => :never) do + start = Time.now + conn = checkout(options) + timespan_ms = ((Time.now - start) * 1000).to_i + @metrics.gauge(@metric_prefix + '.available', @available.length) unless @metrics.nil? + @metrics.timing(@metric_prefix + '.waited', timespan_ms) unless @metrics.nil? + begin + Thread.handle_interrupt(Exception => :immediate) do + yield conn + end + ensure + checkin + @metrics.gauge(@metric_prefix + '.available', @available.length) unless @metrics.nil? + end + end + end + else + # jruby 1.7.x + def with_metrics(options = {}) + start = Time.now + conn = checkout(options) + timespan_ms = ((Time.now - start) * 1000).to_i + @metrics.gauge(@metric_prefix + '.available', @available.length) unless @metrics.nil? + @metrics.timing(@metric_prefix + '.waited', timespan_ms) unless @metrics.nil? + begin + yield conn + ensure + checkin + @metrics.gauge(@metric_prefix + '.available', @available.length) unless @metrics.nil? + end + end + end + end + end +end diff --git a/spec/unit/generic_connection_pool_spec.rb b/spec/unit/generic_connection_pool_spec.rb new file mode 100644 index 0000000..fac7478 --- /dev/null +++ b/spec/unit/generic_connection_pool_spec.rb @@ -0,0 +1,91 @@ +require 'spec_helper' + +describe 'GenericConnectionPool' do + let(:metrics) { Vmpooler::DummyStatsd.new } + let(:metric_prefix) { 'prefix' } + let(:default_metric_prefix) { 'connectionpool' } + let(:connection_object) { double('connection') } + let(:pool_size) { 1 } + let(:pool_timeout) { 1 } + + subject { Vmpooler::PoolManager::GenericConnectionPool.new( + metrics: metrics, + metric_prefix: metric_prefix, + size: pool_size, + timeout: pool_timeout + ) { connection_object } + } + + + describe "#with_metrics" do + before(:each) do + expect(subject).not_to be_nil + end + + context 'When metrics are configured' do + it 'should emit a gauge metric when the connection is grabbed and released' do + expect(metrics).to receive(:gauge).with(/\.available/,Integer).exactly(2).times + + subject.with_metrics do |conn1| + # do nothing + end + end + + it 'should emit a timing metric when the connection is grabbed' do + expect(metrics).to receive(:timing).with(/\.waited/,Integer).exactly(1).times + + subject.with_metrics do |conn1| + # do nothing + end + end + + it 'should emit metrics with the specified prefix' do + expect(metrics).to receive(:gauge).with(/#{metric_prefix}\./,Integer).at_least(1).times + expect(metrics).to receive(:timing).with(/#{metric_prefix}\./,Integer).at_least(1).times + + subject.with_metrics do |conn1| + # do nothing + end + end + + context 'Metrix prefix is missing' do + let(:metric_prefix) { nil } + + it 'should emit metrics with default prefix' do + expect(metrics).to receive(:gauge).with(/#{default_metric_prefix}\./,Integer).at_least(1).times + expect(metrics).to receive(:timing).with(/#{default_metric_prefix}\./,Integer).at_least(1).times + + subject.with_metrics do |conn1| + # do nothing + end + end + end + + context 'Metrix prefix is empty' do + let(:metric_prefix) { '' } + + it 'should emit metrics with default prefix' do + expect(metrics).to receive(:gauge).with(/#{default_metric_prefix}\./,Integer).at_least(1).times + expect(metrics).to receive(:timing).with(/#{default_metric_prefix}\./,Integer).at_least(1).times + + subject.with_metrics do |conn1| + # do nothing + end + end + end + end + + context 'When metrics are not configured' do + let(:metrics) { nil } + + it 'should not emit any metrics' do + # if any metrics are called it would result in a method error on Nil. + + subject.with_metrics do |conn1| + # do nothing + end + end + end + + end +end From 2f37c1e9b54b99d978e99124d86dd5b244e88ff9 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Wed, 5 Apr 2017 10:55:49 -0700 Subject: [PATCH 30/32] (POOLER-52) Modify dummy provider to use a connection pool Previously a connection pooler class was added. This commit modifies the Dummy VM Provider to use a connection pooler. While the Dummy provider strictly speaking does not use connections, this allows testing to see what happens when connection pools are stressed or exhausted. This commit: - Modifies functions to use a connection pool object for the public API functions - Modifies the VMPooler YAML with new settings for connection pool size and timeout --- lib/vmpooler/providers/dummy.rb | 382 +++++++++++++++++------------- spec/unit/providers/dummy_spec.rb | 4 +- vmpooler.yaml.example | 8 + 3 files changed, 222 insertions(+), 172 deletions(-) diff --git a/lib/vmpooler/providers/dummy.rb b/lib/vmpooler/providers/dummy.rb index 2b124a3..1173511 100644 --- a/lib/vmpooler/providers/dummy.rb +++ b/lib/vmpooler/providers/dummy.rb @@ -19,6 +19,22 @@ module Vmpooler # duplicate actions to put the @dummylist hashtable into a bad state, for example; # Deleting a VM while it's in the middle of adding a disk. @write_lock = Mutex.new + + # Create a dummy connection pool + connpool_size = provider_config['connection_pool_size'].nil? ? 1 : provider_config['connection_pool_size'].to_i + connpool_timeout = provider_config['connection_pool_timeout'].nil? ? 10 : provider_config['connection_pool_timeout'].to_i + logger.log('d', "[#{name}] ConnPool - Creating a connection pool of size #{connpool_size} with timeout #{connpool_timeout}") + @connection_pool = Vmpooler::PoolManager::GenericConnectionPool.new( + metrics: metrics, + metric_prefix: "#{name}_provider_connection_pool", + size: connpool_size, + timeout: connpool_timeout + ) do + # Create a mock connection object + new_conn = { create_timestamp: Time.now, conn_id: rand(2048).to_s } + logger.log('d', "[#{name}] ConnPool - Creating a connection object ID #{new_conn[:conn_id]}") + new_conn + end end def name @@ -27,21 +43,30 @@ module Vmpooler def vms_in_pool(pool_name) vmlist = [] - get_dummy_pool_object(pool_name).each do |vm| - vmlist << { 'name' => vm['name'] } + + @connection_pool.with_metrics do |_conn| + get_dummy_pool_object(pool_name).each do |vm| + vmlist << { 'name' => vm['name'] } + end end vmlist end def get_vm_host(pool_name, vm_name) - current_vm = get_dummy_vm(pool_name, vm_name) + current_vm = nil + @connection_pool.with_metrics do |_conn| + current_vm = get_dummy_vm(pool_name, vm_name) + end current_vm.nil? ? raise("VM #{vm_name} does not exist") : current_vm['vm_host'] end def find_least_used_compatible_host(pool_name, vm_name) - current_vm = get_dummy_vm(pool_name, vm_name) + current_vm = nil + @connection_pool.with_metrics do |_conn| + current_vm = get_dummy_vm(pool_name, vm_name) + end # Unless migratevm_couldmove_percent is specified, don't migrate return current_vm['vm_host'] if provider_config['migratevm_couldmove_percent'].nil? @@ -56,64 +81,68 @@ module Vmpooler end def migrate_vm_to_host(pool_name, vm_name, dest_host_name) - current_vm = get_dummy_vm(pool_name, vm_name) - - # Inject migration delay - unless provider_config['migratevm_max_time'].nil? - migrate_time = 1 + rand(provider_config['migratevm_max_time']) - sleep(migrate_time) - end - - # Inject clone failure - unless provider_config['migratevm_fail_percent'].nil? - raise('Dummy Failure for migratevm_fail_percent') if 1 + rand(100) <= provider_config['migratevm_fail_percent'] - end - - @write_lock.synchronize do + @connection_pool.with_metrics do |_conn| current_vm = get_dummy_vm(pool_name, vm_name) - current_vm['vm_host'] = dest_host_name - write_backing_file + + # Inject migration delay + unless provider_config['migratevm_max_time'].nil? + migrate_time = 1 + rand(provider_config['migratevm_max_time']) + sleep(migrate_time) + end + + # Inject clone failure + unless provider_config['migratevm_fail_percent'].nil? + raise('Dummy Failure for migratevm_fail_percent') if 1 + rand(100) <= provider_config['migratevm_fail_percent'] + end + + @write_lock.synchronize do + current_vm = get_dummy_vm(pool_name, vm_name) + current_vm['vm_host'] = dest_host_name + write_backing_file + end end true end def get_vm(pool_name, vm_name) - dummy = get_dummy_vm(pool_name, vm_name) - return nil if dummy.nil? - - # Randomly power off the VM - unless dummy['powerstate'] != 'PoweredOn' || provider_config['getvm_poweroff_percent'].nil? - if 1 + rand(100) <= provider_config['getvm_poweroff_percent'] - @write_lock.synchronize do - dummy = get_dummy_vm(pool_name, vm_name) - dummy['powerstate'] = 'PoweredOff' - write_backing_file - end - logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy Powered Off") - end - end - - # Randomly rename the host - unless dummy['hostname'] != dummy['name'] || provider_config['getvm_rename_percent'].nil? - if 1 + rand(100) <= provider_config['getvm_rename_percent'] - @write_lock.synchronize do - dummy = get_dummy_vm(pool_name, vm_name) - dummy['hostname'] = 'DUMMY' + dummy['name'] - write_backing_file - end - logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy renamed") - end - end - obj = {} - obj['name'] = dummy['name'] - obj['hostname'] = dummy['hostname'] - obj['boottime'] = dummy['boottime'] - obj['template'] = dummy['template'] - obj['poolname'] = dummy['poolname'] - obj['powerstate'] = dummy['powerstate'] - obj['snapshots'] = dummy['snapshots'] + @connection_pool.with_metrics do |_conn| + dummy = get_dummy_vm(pool_name, vm_name) + return nil if dummy.nil? + + # Randomly power off the VM + unless dummy['powerstate'] != 'PoweredOn' || provider_config['getvm_poweroff_percent'].nil? + if 1 + rand(100) <= provider_config['getvm_poweroff_percent'] + @write_lock.synchronize do + dummy = get_dummy_vm(pool_name, vm_name) + dummy['powerstate'] = 'PoweredOff' + write_backing_file + end + logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy Powered Off") + end + end + + # Randomly rename the host + unless dummy['hostname'] != dummy['name'] || provider_config['getvm_rename_percent'].nil? + if 1 + rand(100) <= provider_config['getvm_rename_percent'] + @write_lock.synchronize do + dummy = get_dummy_vm(pool_name, vm_name) + dummy['hostname'] = 'DUMMY' + dummy['name'] + write_backing_file + end + logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy renamed") + end + end + + obj['name'] = dummy['name'] + obj['hostname'] = dummy['hostname'] + obj['boottime'] = dummy['boottime'] + obj['template'] = dummy['template'] + obj['poolname'] = dummy['poolname'] + obj['powerstate'] = dummy['powerstate'] + obj['snapshots'] = dummy['snapshots'] + end obj end @@ -150,172 +179,185 @@ module Vmpooler logger.log('d', "[ ] [#{pool_name}] '#{dummy_hostname}' is being cloned from '#{template_name}'") - # Inject clone time delay - unless provider_config['createvm_max_time'].nil? - @write_lock.synchronize do - vm['dummy_state'] = 'CLONING' - write_backing_file - end - clone_time = 1 + rand(provider_config['createvm_max_time']) - sleep(clone_time) - end - - begin - # Inject clone failure - unless provider_config['createvm_fail_percent'].nil? - raise('Dummy Failure for createvm_fail_percent') if 1 + rand(100) <= provider_config['createvm_fail_percent'] + @connection_pool.with_metrics do |_conn| + # Inject clone time delay + unless provider_config['createvm_max_time'].nil? + @write_lock.synchronize do + vm['dummy_state'] = 'CLONING' + write_backing_file + end + clone_time = 1 + rand(provider_config['createvm_max_time']) + sleep(clone_time) end - # Assert the VM is ready for use - @write_lock.synchronize do - vm['dummy_state'] = 'RUNNING' - write_backing_file + begin + # Inject clone failure + unless provider_config['createvm_fail_percent'].nil? + raise('Dummy Failure for createvm_fail_percent') if 1 + rand(100) <= provider_config['createvm_fail_percent'] + end + + # Assert the VM is ready for use + @write_lock.synchronize do + vm['dummy_state'] = 'RUNNING' + write_backing_file + end + rescue => _err + @write_lock.synchronize do + remove_dummy_vm(pool_name, dummy_hostname) + write_backing_file + end + raise end - rescue => _err - @write_lock.synchronize do - remove_dummy_vm(pool_name, dummy_hostname) - write_backing_file - end - raise end get_vm(pool_name, dummy_hostname) end def create_disk(pool_name, vm_name, disk_size) - vm_object = get_dummy_vm(pool_name, vm_name) - raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil? - - # Inject create time delay - unless provider_config['createdisk_max_time'].nil? - delay = 1 + rand(provider_config['createdisk_max_time']) - sleep(delay) - end - - # Inject create failure - unless provider_config['createdisk_fail_percent'].nil? - raise('Dummy Failure for createdisk_fail_percent') if 1 + rand(100) <= provider_config['createdisk_fail_percent'] - end - - @write_lock.synchronize do + @connection_pool.with_metrics do |_conn| vm_object = get_dummy_vm(pool_name, vm_name) - vm_object['disks'] << disk_size - write_backing_file + raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil? + + # Inject create time delay + unless provider_config['createdisk_max_time'].nil? + delay = 1 + rand(provider_config['createdisk_max_time']) + sleep(delay) + end + + # Inject create failure + unless provider_config['createdisk_fail_percent'].nil? + raise('Dummy Failure for createdisk_fail_percent') if 1 + rand(100) <= provider_config['createdisk_fail_percent'] + end + + @write_lock.synchronize do + vm_object = get_dummy_vm(pool_name, vm_name) + vm_object['disks'] << disk_size + write_backing_file + end end true end def create_snapshot(pool_name, vm_name, snapshot_name) - vm_object = get_dummy_vm(pool_name, vm_name) - raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil? - - # Inject create time delay - unless provider_config['createsnapshot_max_time'].nil? - delay = 1 + rand(provider_config['createsnapshot_max_time']) - sleep(delay) - end - - # Inject create failure - unless provider_config['createsnapshot_fail_percent'].nil? - raise('Dummy Failure for createsnapshot_fail_percent') if 1 + rand(100) <= provider_config['createsnapshot_fail_percent'] - end - - @write_lock.synchronize do + @connection_pool.with_metrics do |_conn| vm_object = get_dummy_vm(pool_name, vm_name) - vm_object['snapshots'] << snapshot_name - write_backing_file + raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil? + + # Inject create time delay + unless provider_config['createsnapshot_max_time'].nil? + delay = 1 + rand(provider_config['createsnapshot_max_time']) + sleep(delay) + end + + # Inject create failure + unless provider_config['createsnapshot_fail_percent'].nil? + raise('Dummy Failure for createsnapshot_fail_percent') if 1 + rand(100) <= provider_config['createsnapshot_fail_percent'] + end + + @write_lock.synchronize do + vm_object = get_dummy_vm(pool_name, vm_name) + vm_object['snapshots'] << snapshot_name + write_backing_file + end end true end def revert_snapshot(pool_name, vm_name, snapshot_name) - vm_object = get_dummy_vm(pool_name, vm_name) - raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil? + vm_object = nil + @connection_pool.with_metrics do |_conn| + vm_object = get_dummy_vm(pool_name, vm_name) + raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil? - # Inject create time delay - unless provider_config['revertsnapshot_max_time'].nil? - delay = 1 + rand(provider_config['revertsnapshot_max_time']) - sleep(delay) - end + # Inject create time delay + unless provider_config['revertsnapshot_max_time'].nil? + delay = 1 + rand(provider_config['revertsnapshot_max_time']) + sleep(delay) + end - # Inject create failure - unless provider_config['revertsnapshot_fail_percent'].nil? - raise('Dummy Failure for revertsnapshot_fail_percent') if 1 + rand(100) <= provider_config['revertsnapshot_fail_percent'] + # Inject create failure + unless provider_config['revertsnapshot_fail_percent'].nil? + raise('Dummy Failure for revertsnapshot_fail_percent') if 1 + rand(100) <= provider_config['revertsnapshot_fail_percent'] + end end vm_object['snapshots'].include?(snapshot_name) end def destroy_vm(pool_name, vm_name) - vm = get_dummy_vm(pool_name, vm_name) - return false if vm.nil? - return false if vm['poolname'] != pool_name + @connection_pool.with_metrics do |_conn| + vm = get_dummy_vm(pool_name, vm_name) + return false if vm.nil? + return false if vm['poolname'] != pool_name - # Shutdown down the VM if it's poweredOn - if vm['powerstate'] == 'PoweredOn' - logger.log('d', "[ ] [#{pool_name}] '#{vm_name}' is being shut down") + # Shutdown down the VM if it's poweredOn + if vm['powerstate'] == 'PoweredOn' + logger.log('d', "[ ] [#{pool_name}] '#{vm_name}' is being shut down") - # Inject shutdown delay time - unless provider_config['destroyvm_max_shutdown_time'].nil? - shutdown_time = 1 + rand(provider_config['destroyvm_max_shutdown_time']) - sleep(shutdown_time) + # Inject shutdown delay time + unless provider_config['destroyvm_max_shutdown_time'].nil? + shutdown_time = 1 + rand(provider_config['destroyvm_max_shutdown_time']) + sleep(shutdown_time) + end + + @write_lock.synchronize do + vm = get_dummy_vm(pool_name, vm_name) + vm['powerstate'] = 'PoweredOff' + write_backing_file + end end + # Inject destroy VM delay + unless provider_config['destroyvm_max_time'].nil? + destroy_time = 1 + rand(provider_config['destroyvm_max_time']) + sleep(destroy_time) + end + + # Inject destroy VM failure + unless provider_config['destroyvm_fail_percent'].nil? + raise('Dummy Failure for migratevm_fail_percent') if 1 + rand(100) <= provider_config['destroyvm_fail_percent'] + end + + # 'Destroy' the VM @write_lock.synchronize do - vm = get_dummy_vm(pool_name, vm_name) - vm['powerstate'] = 'PoweredOff' + remove_dummy_vm(pool_name, vm_name) write_backing_file end end - # Inject destroy VM delay - unless provider_config['destroyvm_max_time'].nil? - destroy_time = 1 + rand(provider_config['destroyvm_max_time']) - sleep(destroy_time) - end - - # Inject destroy VM failure - unless provider_config['destroyvm_fail_percent'].nil? - raise('Dummy Failure for migratevm_fail_percent') if 1 + rand(100) <= provider_config['destroyvm_fail_percent'] - end - - # 'Destroy' the VM - @write_lock.synchronize do - remove_dummy_vm(pool_name, vm_name) - write_backing_file - end - true end def vm_ready?(pool_name, vm_name) - vm_object = get_dummy_vm(pool_name, vm_name) - return false if vm_object.nil? - return false if vm_object['poolname'] != pool_name - return true if vm_object['ready'] + @connection_pool.with_metrics do |_conn| + vm_object = get_dummy_vm(pool_name, vm_name) + return false if vm_object.nil? + return false if vm_object['poolname'] != pool_name + return true if vm_object['ready'] - timeout = provider_config['is_ready_timeout'] || 5 + timeout = provider_config['is_ready_timeout'] || 5 - Timeout.timeout(timeout) do - while vm_object['dummy_state'] != 'RUNNING' - sleep(2) - vm_object = get_dummy_vm(pool_name, vm_name) + Timeout.timeout(timeout) do + while vm_object['dummy_state'] != 'RUNNING' + sleep(2) + vm_object = get_dummy_vm(pool_name, vm_name) + end end - end - # Simulate how long it takes from a VM being powered on until - # it's ready to receive a connection - sleep(2) + # Simulate how long it takes from a VM being powered on until + # it's ready to receive a connection + sleep(2) - unless provider_config['vmready_fail_percent'].nil? - raise('Dummy Failure for vmready_fail_percent') if 1 + rand(100) <= provider_config['vmready_fail_percent'] - end + unless provider_config['vmready_fail_percent'].nil? + raise('Dummy Failure for vmready_fail_percent') if 1 + rand(100) <= provider_config['vmready_fail_percent'] + end - @write_lock.synchronize do - vm_object['ready'] = true - write_backing_file + @write_lock.synchronize do + vm_object['ready'] = true + write_backing_file + end end true diff --git a/spec/unit/providers/dummy_spec.rb b/spec/unit/providers/dummy_spec.rb index 5baf092..f0156f9 100644 --- a/spec/unit/providers/dummy_spec.rb +++ b/spec/unit/providers/dummy_spec.rb @@ -76,11 +76,11 @@ describe 'Vmpooler::PoolManager::Provider::Dummy' do let(:config) { YAML.load(<<-EOT --- :config: - max_tries: 3 - retry_factor: 10 :providers: :dummy: key1: 'value1' + # Drop the connection pool timeout way down for spec tests so they fail fast + connection_pool_timeout: 1 :pools: - name: '#{pool_name}' size: 5 diff --git a/vmpooler.yaml.example b/vmpooler.yaml.example index b7d3ea6..86fd852 100644 --- a/vmpooler.yaml.example +++ b/vmpooler.yaml.example @@ -50,6 +50,14 @@ # The filename used to store the backing text file. If this is not specified the VM state is only # kept in memory, and is lost when the Provider is shutdown # +# - connection_pool_size (Optional) +# The size of the dummy connection pool. This can be used to simulate constrained provider resources e.g. 200 pools sharing on connection +# (optional; default 1) +# +# - connection_pool_timeout (Optional) +# The number of seconds to wait for a connection object from the pool. If the timeout is exceeded an error is raised +# (optional; default 10 seconds) +# # - migratevm_couldmove_percent # Percent chance that a VM could be moved to another host # (optional; default 0%) From df783f0ed061c7f0b2acff4f4350e0aee6a6ebad Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Tue, 18 Apr 2017 16:21:34 -0700 Subject: [PATCH 31/32] (POOLER-52) Use a Connection Pooler for vSphere connections Previously the vSphere Provider would share a single vSphere connection for all pools under management. This would cause issues in large environments as this would cause errors to be thrown or operations to slow down. This commit modifies the vSphere Provider to use a connection pool when communicating with the vSphere API - Uses the GenericConnectionPool object to manage the connection pool - Uses a default connection pool size of: Whatever is biggest from: - How many pools this provider services - Maximum number of cloning tasks allowed - Need at least 2 connections so that a pool can have inventory functions performed while cloning etc. - A large connection_pool_timeout is used as a connection object is consumed during a VM clone, which can take up to 2 minutes - Removes the `get_connection` method as that is now obsolete due to the connection pool - Removes the `close` method as it is now obsolete - Modified the spec tests slightly, to stop mocking get_connection as it no longer exists, and set a super low pool timeout so that if a test fails, it will fail quickly instead of taking the default time of 60+ seconds --- lib/vmpooler/providers/vsphere.rb | 317 ++++++++++++++-------------- spec/unit/providers/vsphere_spec.rb | 117 ++-------- 2 files changed, 185 insertions(+), 249 deletions(-) diff --git a/lib/vmpooler/providers/vsphere.rb b/lib/vmpooler/providers/vsphere.rb index 6dc9819..3e1401a 100644 --- a/lib/vmpooler/providers/vsphere.rb +++ b/lib/vmpooler/providers/vsphere.rb @@ -2,159 +2,188 @@ module Vmpooler class PoolManager class Provider class VSphere < Vmpooler::PoolManager::Provider::Base + def initialize(config, logger, metrics, name, options) + super(config, logger, metrics, name, options) + + task_limit = global_config[:config].nil? || global_config[:config]['task_limit'].nil? ? 10 : global_config[:config]['task_limit'].to_i + # The default connection pool size is: + # Whatever is biggest from: + # - How many pools this provider services + # - Maximum number of cloning tasks allowed + # - Need at least 2 connections so that a pool can have inventory functions performed while cloning etc. + default_connpool_size = [provided_pools.count, task_limit, 2].max + connpool_size = provider_config['connection_pool_size'].nil? ? default_connpool_size : provider_config['connection_pool_size'].to_i + # The default connection pool timeout should be quite large - 60 seconds + connpool_timeout = provider_config['connection_pool_timeout'].nil? ? 60 : provider_config['connection_pool_timeout'].to_i + logger.log('d', "[#{name}] ConnPool - Creating a connection pool of size #{connpool_size} with timeout #{connpool_timeout}") + @connection_pool = Vmpooler::PoolManager::GenericConnectionPool.new( + metrics: metrics, + metric_prefix: "#{name}_provider_connection_pool", + size: connpool_size, + timeout: connpool_timeout + ) do + logger.log('d', "[#{name}] Connection Pool - Creating a connection object") + new_conn = connect_to_vsphere + + new_conn + end + end + def name 'vsphere' end def vms_in_pool(pool_name) - connection = get_connection - - foldername = pool_config(pool_name)['folder'] - folder_object = find_folder(foldername, connection) - vms = [] + @connection_pool.with_metrics do |connection| + foldername = pool_config(pool_name)['folder'] + folder_object = find_folder(foldername, connection) - return vms if folder_object.nil? + return vms if folder_object.nil? - folder_object.childEntity.each do |vm| - vms << { 'name' => vm.name } + folder_object.childEntity.each do |vm| + vms << { 'name' => vm.name } + end end - vms end def get_vm_host(_pool_name, vm_name) - connection = get_connection - - vm_object = find_vm(vm_name, connection) - return nil if vm_object.nil? - host_name = nil - host_name = vm_object.summary.runtime.host.name if vm_object.summary && vm_object.summary.runtime && vm_object.summary.runtime.host + @connection_pool.with_metrics do |connection| + vm_object = find_vm(vm_name, connection) + return host_name if vm_object.nil? + + host_name = vm_object.summary.runtime.host.name if vm_object.summary && vm_object.summary.runtime && vm_object.summary.runtime.host + end host_name end def find_least_used_compatible_host(_pool_name, vm_name) - connection = get_connection + hostname = nil + @connection_pool.with_metrics do |connection| + vm_object = find_vm(vm_name, connection) - vm_object = find_vm(vm_name, connection) + return hostname if vm_object.nil? + host_object = find_least_used_vpshere_compatible_host(vm_object) - return nil if vm_object.nil? - host_object = find_least_used_vpshere_compatible_host(vm_object) - - return nil if host_object.nil? - host_object[0].name + return hostname if host_object.nil? + hostname = host_object[0].name + end + hostname end def migrate_vm_to_host(pool_name, vm_name, dest_host_name) pool = pool_config(pool_name) raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil? - connection = get_connection + @connection_pool.with_metrics do |connection| + vm_object = find_vm(vm_name, connection) + raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil? - vm_object = find_vm(vm_name, connection) - raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil? + target_cluster_name = get_target_cluster_from_config(pool_name) + cluster = find_cluster(target_cluster_name, connection) + raise("Pool #{pool_name} specifies cluster #{target_cluster_name} which does not exist for the provider #{name}") if cluster.nil? - target_cluster_name = get_target_cluster_from_config(pool_name) - cluster = find_cluster(target_cluster_name, connection) - raise("Pool #{pool_name} specifies cluster #{target_cluster_name} which does not exist for the provider #{name}") if cluster.nil? - - # Go through each host and initiate a migration when the correct host name is found - cluster.host.each do |host| - if host.name == dest_host_name - migrate_vm_host(vm_object, host) - return true + # Go through each host and initiate a migration when the correct host name is found + cluster.host.each do |host| + if host.name == dest_host_name + migrate_vm_host(vm_object, host) + return true + end end end - false end def get_vm(_pool_name, vm_name) - connection = get_connection + vm_hash = nil + @connection_pool.with_metrics do |connection| + vm_object = find_vm(vm_name, connection) + return vm_hash if vm_object.nil? - vm_object = find_vm(vm_name, connection) - return nil if vm_object.nil? - - vm_folder_path = get_vm_folder_path(vm_object) - # Find the pool name based on the folder path - pool_name = nil - template_name = nil - global_config[:pools].each do |pool| - if pool['folder'] == vm_folder_path - pool_name = pool['name'] - template_name = pool['template'] + vm_folder_path = get_vm_folder_path(vm_object) + # Find the pool name based on the folder path + pool_name = nil + template_name = nil + global_config[:pools].each do |pool| + if pool['folder'] == vm_folder_path + pool_name = pool['name'] + template_name = pool['template'] + end end - end - generate_vm_hash(vm_object, template_name, pool_name) + vm_hash = generate_vm_hash(vm_object, template_name, pool_name) + end + vm_hash end def create_vm(pool_name, new_vmname) pool = pool_config(pool_name) raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil? + vm_hash = nil + @connection_pool.with_metrics do |connection| + # Assume all pool config is valid i.e. not missing + template_path = pool['template'] + target_folder_path = pool['folder'] + target_datastore = pool['datastore'] + target_cluster_name = get_target_cluster_from_config(pool_name) - connection = get_connection + # Extract the template VM name from the full path + raise("Pool #{pool_name} did specify a full path for the template for the provider #{name}") unless template_path =~ /\// + templatefolders = template_path.split('/') + template_name = templatefolders.pop - # Assume all pool config is valid i.e. not missing - template_path = pool['template'] - target_folder_path = pool['folder'] - target_datastore = pool['datastore'] - target_cluster_name = get_target_cluster_from_config(pool_name) + # Get the actual objects from vSphere + template_folder_object = find_folder(templatefolders.join('/'), connection) + raise("Pool #{pool_name} specifies a template folder of #{templatefolders.join('/')} which does not exist for the provider #{name}") if template_folder_object.nil? - # Extract the template VM name from the full path - raise("Pool #{pool_name} did specify a full path for the template for the provider #{name}") unless template_path =~ /\// - templatefolders = template_path.split('/') - template_name = templatefolders.pop + template_vm_object = template_folder_object.find(template_name) + raise("Pool #{pool_name} specifies a template VM of #{template_name} which does not exist for the provider #{name}") if template_vm_object.nil? - # Get the actual objects from vSphere - template_folder_object = find_folder(templatefolders.join('/'), connection) - raise("Pool #{pool_name} specifies a template folder of #{templatefolders.join('/')} which does not exist for the provider #{name}") if template_folder_object.nil? + # Annotate with creation time, origin template, etc. + # Add extraconfig options that can be queried by vmtools + config_spec = RbVmomi::VIM.VirtualMachineConfigSpec( + annotation: JSON.pretty_generate( + name: new_vmname, + created_by: provider_config['username'], + base_template: template_path, + creation_timestamp: Time.now.utc + ), + extraConfig: [ + { key: 'guestinfo.hostname', value: new_vmname } + ] + ) - template_vm_object = template_folder_object.find(template_name) - raise("Pool #{pool_name} specifies a template VM of #{template_name} which does not exist for the provider #{name}") if template_vm_object.nil? + # Choose a cluster/host to place the new VM on + target_host_object = find_least_used_host(target_cluster_name, connection) - # Annotate with creation time, origin template, etc. - # Add extraconfig options that can be queried by vmtools - config_spec = RbVmomi::VIM.VirtualMachineConfigSpec( - annotation: JSON.pretty_generate( + # Put the VM in the specified folder and resource pool + relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec( + datastore: find_datastore(target_datastore, connection), + host: target_host_object, + diskMoveType: :moveChildMostDiskBacking + ) + + # Create a clone spec + clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec( + location: relocate_spec, + config: config_spec, + powerOn: true, + template: false + ) + + # Create the new VM + new_vm_object = template_vm_object.CloneVM_Task( + folder: find_folder(target_folder_path, connection), name: new_vmname, - created_by: provider_config['username'], - base_template: template_path, - creation_timestamp: Time.now.utc - ), - extraConfig: [ - { key: 'guestinfo.hostname', value: new_vmname } - ] - ) + spec: clone_spec + ).wait_for_completion - # Choose a cluster/host to place the new VM on - target_host_object = find_least_used_host(target_cluster_name, connection) - - # Put the VM in the specified folder and resource pool - relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec( - datastore: find_datastore(target_datastore, connection), - host: target_host_object, - diskMoveType: :moveChildMostDiskBacking - ) - - # Create a clone spec - clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec( - location: relocate_spec, - config: config_spec, - powerOn: true, - template: false - ) - - # Create the new VM - new_vm_object = template_vm_object.CloneVM_Task( - folder: find_folder(target_folder_path, connection), - name: new_vmname, - spec: clone_spec - ).wait_for_completion - - generate_vm_hash(new_vm_object, template_path, pool_name) + vm_hash = generate_vm_hash(new_vm_object, template_path, pool_name) + end + vm_hash end def create_disk(pool_name, vm_name, disk_size) @@ -164,62 +193,58 @@ module Vmpooler datastore_name = pool['datastore'] raise("Pool #{pool_name} does not have a datastore defined for the provider #{name}") if datastore_name.nil? - connection = get_connection - - vm_object = find_vm(vm_name, connection) - raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil? - - add_disk(vm_object, disk_size, datastore_name, connection) + @connection_pool.with_metrics do |connection| + vm_object = find_vm(vm_name, connection) + raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil? + add_disk(vm_object, disk_size, datastore_name, connection) + end true end def create_snapshot(pool_name, vm_name, new_snapshot_name) - connection = get_connection + @connection_pool.with_metrics do |connection| + vm_object = find_vm(vm_name, connection) + raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil? - vm_object = find_vm(vm_name, connection) - raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil? - - old_snap = find_snapshot(vm_object, new_snapshot_name) - raise("Snapshot #{new_snapshot_name} for VM #{vm_name} in pool #{pool_name} already exists for the provider #{name}") unless old_snap.nil? - - vm_object.CreateSnapshot_Task( - name: new_snapshot_name, - description: 'vmpooler', - memory: true, - quiesce: true - ).wait_for_completion + old_snap = find_snapshot(vm_object, new_snapshot_name) + raise("Snapshot #{new_snapshot_name} for VM #{vm_name} in pool #{pool_name} already exists for the provider #{name}") unless old_snap.nil? + vm_object.CreateSnapshot_Task( + name: new_snapshot_name, + description: 'vmpooler', + memory: true, + quiesce: true + ).wait_for_completion + end true end def revert_snapshot(pool_name, vm_name, snapshot_name) - connection = get_connection + @connection_pool.with_metrics do |connection| + vm_object = find_vm(vm_name, connection) + raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil? - vm_object = find_vm(vm_name, connection) - raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil? - - snapshot_object = find_snapshot(vm_object, snapshot_name) - raise("Snapshot #{snapshot_name} for VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if snapshot_object.nil? - - snapshot_object.RevertToSnapshot_Task.wait_for_completion + snapshot_object = find_snapshot(vm_object, snapshot_name) + raise("Snapshot #{snapshot_name} for VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if snapshot_object.nil? + snapshot_object.RevertToSnapshot_Task.wait_for_completion + end true end def destroy_vm(_pool_name, vm_name) - connection = get_connection + @connection_pool.with_metrics do |connection| + vm_object = find_vm(vm_name, connection) + # If a VM doesn't exist then it is effectively deleted + return true if vm_object.nil? - vm_object = find_vm(vm_name, connection) - # If a VM doesn't exist then it is effectively deleted - return true if vm_object.nil? - - # Poweroff the VM if it's running - vm_object.PowerOffVM_Task.wait_for_completion if vm_object.runtime && vm_object.runtime.powerState && vm_object.runtime.powerState == 'poweredOn' - - # Kill it with fire - vm_object.Destroy_Task.wait_for_completion + # Poweroff the VM if it's running + vm_object.PowerOffVM_Task.wait_for_completion if vm_object.runtime && vm_object.runtime.powerState && vm_object.runtime.powerState == 'poweredOn' + # Kill it with fire + vm_object.Destroy_Task.wait_for_completion + end true end @@ -263,16 +288,6 @@ module Vmpooler DISK_TYPE = 'thin'.freeze DISK_MODE = 'persistent'.freeze - def get_connection - begin - @connection.serviceInstance.CurrentTime - rescue - @connection = connect_to_vsphere - end - - @connection - end - def connect_to_vsphere max_tries = global_config[:config]['max_tries'] || 3 retry_factor = global_config[:config]['retry_factor'] || 10 @@ -667,10 +682,6 @@ module Vmpooler relospec = RbVmomi::VIM.VirtualMachineRelocateSpec(host: host) vm.RelocateVM_Task(spec: relospec).wait_for_completion end - - def close - @connection.close - end end end end diff --git a/spec/unit/providers/vsphere_spec.rb b/spec/unit/providers/vsphere_spec.rb index 948e6da..30b1d09 100644 --- a/spec/unit/providers/vsphere_spec.rb +++ b/spec/unit/providers/vsphere_spec.rb @@ -53,6 +53,8 @@ describe 'Vmpooler::PoolManager::Provider::VSphere' do username: "vcenter_user" password: "vcenter_password" insecure: true + # Drop the connection pool timeout way down for spec tests so they fail fast + connection_pool_timeout: 1 :pools: - name: '#{poolname}' alias: [ 'mockpool' ] @@ -84,7 +86,7 @@ EOT let(:pool_config) { config[:pools][0] } before(:each) do - allow(subject).to receive(:get_connection).and_return(connection) + allow(subject).to receive(:connect_to_vsphere).and_return(connection) end context 'Given a pool folder that is missing' do @@ -93,7 +95,7 @@ EOT end it 'should get a connection' do - expect(subject).to receive(:get_connection).and_return(connection) + expect(subject).to receive(:connect_to_vsphere).and_return(connection) subject.vms_in_pool(poolname) end @@ -111,7 +113,7 @@ EOT end it 'should get a connection' do - expect(subject).to receive(:get_connection).and_return(connection) + expect(subject).to receive(:connect_to_vsphere).and_return(connection) subject.vms_in_pool(poolname) end @@ -140,7 +142,7 @@ EOT end it 'should get a connection' do - expect(subject).to receive(:get_connection).and_return(connection) + expect(subject).to receive(:connect_to_vsphere).and_return(connection) subject.vms_in_pool(poolname) end @@ -155,7 +157,7 @@ EOT describe '#get_vm_host' do before(:each) do - allow(subject).to receive(:get_connection).and_return(connection) + allow(subject).to receive(:connect_to_vsphere).and_return(connection) expect(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object) end @@ -163,7 +165,7 @@ EOT let(:vm_object) { nil } it 'should get a connection' do - expect(subject).to receive(:get_connection).and_return(connection) + expect(subject).to receive(:connect_to_vsphere).and_return(connection) subject.get_vm_host(poolname,vmname) end @@ -185,7 +187,7 @@ EOT end it 'should get a connection' do - expect(subject).to receive(:get_connection).and_return(connection) + expect(subject).to receive(:connect_to_vsphere).and_return(connection) subject.get_vm_host(poolname,vmname) end @@ -208,7 +210,7 @@ EOT end it 'should get a connection' do - expect(subject).to receive(:get_connection).and_return(connection) + expect(subject).to receive(:connect_to_vsphere).and_return(connection) subject.get_vm_host(poolname,vmname) end @@ -223,7 +225,7 @@ EOT let(:vm_object) { nil } before(:each) do - allow(subject).to receive(:get_connection).and_return(connection) + allow(subject).to receive(:connect_to_vsphere).and_return(connection) expect(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object) end @@ -231,7 +233,7 @@ EOT let(:vm_object) { nil } it 'should get a connection' do - expect(subject).to receive(:get_connection).and_return(connection) + expect(subject).to receive(:connect_to_vsphere).and_return(connection) subject.find_least_used_compatible_host(poolname,vmname) end @@ -250,7 +252,7 @@ EOT end it 'should get a connection' do - expect(subject).to receive(:get_connection).and_return(connection) + expect(subject).to receive(:connect_to_vsphere).and_return(connection) subject.find_least_used_compatible_host(poolname,vmname) end @@ -272,7 +274,7 @@ EOT end it 'should get a connection' do - expect(subject).to receive(:get_connection).and_return(connection) + expect(subject).to receive(:connect_to_vsphere).and_return(connection) subject.find_least_used_compatible_host(poolname,vmname) end @@ -293,7 +295,7 @@ EOT before(:each) do config[:pools][0]['clone_target'] = cluster_name - allow(subject).to receive(:get_connection).and_return(connection) + allow(subject).to receive(:connect_to_vsphere).and_return(connection) allow(subject).to receive(:find_vm).and_return(vm_object) end @@ -389,7 +391,7 @@ EOT describe '#get_vm' do let(:vm_object) { nil } before(:each) do - allow(subject).to receive(:get_connection).and_return(connection) + allow(subject).to receive(:connect_to_vsphere).and_return(connection) expect(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object) end @@ -510,7 +512,7 @@ EOT let(:new_vm_object) { mock_RbVmomi_VIM_VirtualMachine({ :name => vmname }) } before(:each) do - allow(subject).to receive(:get_connection).and_return(connection) + allow(subject).to receive(:connect_to_vsphere).and_return(connection) allow(connection.serviceInstance).to receive(:find_datacenter).and_return(datacenter_object) end @@ -585,7 +587,7 @@ EOT let(:datastorename) { 'datastore0' } let(:disk_size) { 10 } before(:each) do - allow(subject).to receive(:get_connection).and_return(connection) + allow(subject).to receive(:connect_to_vsphere).and_return(connection) allow(subject).to receive(:find_vm).with(vmname, connection).and_return(vm_object) end @@ -643,7 +645,7 @@ EOT let(:vm_object) { mock_RbVmomi_VIM_VirtualMachine({ :name => vmname, :snapshot_tree => snapshot_tree }) } before(:each) do - allow(subject).to receive(:get_connection).and_return(connection) + allow(subject).to receive(:connect_to_vsphere).and_return(connection) allow(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object) end @@ -698,7 +700,7 @@ EOT let(:vm_object) { mock_RbVmomi_VIM_VirtualMachine({ :name => vmname, :snapshot_tree => snapshot_tree }) } before(:each) do - allow(subject).to receive(:get_connection).and_return(connection) + allow(subject).to receive(:connect_to_vsphere).and_return(connection) allow(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object) end @@ -747,7 +749,7 @@ EOT let(:destroy_task) { mock_RbVmomi_VIM_Task() } before(:each) do - allow(subject).to receive(:get_connection).and_return(connection) + allow(subject).to receive(:connect_to_vsphere).and_return(connection) end context 'Given a missing VM name' do @@ -876,57 +878,6 @@ EOT end # vSphere helper methods - describe '#get_connection' do - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - end - - context 'when connection is ok' do - it 'should not attempt to reconnect' do - expect(subject).to receive(:connect_to_vsphere).exactly(0).times - - subject.get_connection() - end - - it 'should return a connection' do - result = subject.get_connection() - - expect(result).to be(connection) - end - end - - context 'when connection has broken' do - before(:each) do - expect(connection.serviceInstance).to receive(:CurrentTime).and_raise(RuntimeError,'MockConnectionError') - end - - it 'should not increment the connect.open metric' do - # https://github.com/puppetlabs/vmpooler/issues/195 - expect(metrics).to receive(:increment).with('connect.open').exactly(0).times - allow(subject).to receive(:connect_to_vsphere) - - subject.get_connection() - end - - it 'should call connect_to_vsphere to reconnect' do - allow(metrics).to receive(:increment) - expect(subject).to receive(:connect_to_vsphere).with(no_args) - - subject.get_connection() - end - - it 'should return a new connection' do - new_connection = mock_RbVmomi_VIM_Connection(connection_options) - expect(subject).to receive(:connect_to_vsphere).with(no_args).and_return(new_connection) - - result = subject.get_connection() - - expect(result).to be(new_connection) - end - end - end - describe '#connect_to_vsphere' do before(:each) do allow(RbVmomi::VIM).to receive(:connect).and_return(connection) @@ -2828,30 +2779,4 @@ EOT expect(subject.migrate_vm_host(vm_object,host_object)).to eq('RELOCATE_RESULT') end end - - describe '#close' do - context 'no connection has been made' do - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",nil) - end - - it 'should not error' do - pending('https://github.com/puppetlabs/vmpooler/issues/211') - subject.close - end - end - - context 'on an open connection' do - before(:each) do - # NOTE - Using instance_variable_set is a code smell of code that is not testable - subject.instance_variable_set("@connection",connection) - end - - it 'should close the underlying connection object' do - expect(connection).to receive(:close) - subject.close - end - end - end end From 85b0f035aa2b1c524ab7bc9b970d6b97da0af5bf Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Fri, 12 May 2017 14:54:30 -0700 Subject: [PATCH 32/32] (POOLER-52) Add recovery to vSphere connections The generic connection pooler is only responsible for managing the connection objects, however the providers themselves are responsible for ensuring that the connection is alive/healthy etc. Previously, the older vSphere helper would reconnect however this was lost when the connection pooler was introduced. This commit adds a method that checks the connection before use, and then reconnects if the connection is in a bad state. --- lib/vmpooler/providers/vsphere.rb | 52 ++++++++++++++---- spec/unit/generic_connection_pool_spec.rb | 41 ++++++++++++++ spec/unit/providers/vsphere_spec.rb | 67 +++++++++++++++++++++++ 3 files changed, 148 insertions(+), 12 deletions(-) diff --git a/lib/vmpooler/providers/vsphere.rb b/lib/vmpooler/providers/vsphere.rb index 3e1401a..ee6b3bc 100644 --- a/lib/vmpooler/providers/vsphere.rb +++ b/lib/vmpooler/providers/vsphere.rb @@ -2,6 +2,9 @@ module Vmpooler class PoolManager class Provider class VSphere < Vmpooler::PoolManager::Provider::Base + # The connection_pool method is normally used only for testing + attr_reader :connection_pool + def initialize(config, logger, metrics, name, options) super(config, logger, metrics, name, options) @@ -23,9 +26,12 @@ module Vmpooler timeout: connpool_timeout ) do logger.log('d', "[#{name}] Connection Pool - Creating a connection object") + # Need to wrap the vSphere connection object in another object. The generic connection pooler will preserve + # the object reference for the connection, which means it cannot "reconnect" by creating an entirely new connection + # object. Instead by wrapping it in a Hash, the Hash object reference itself never changes but the content of the + # Hash can change, and is preserved across invocations. new_conn = connect_to_vsphere - - new_conn + { connection: new_conn } end end @@ -35,7 +41,8 @@ module Vmpooler def vms_in_pool(pool_name) vms = [] - @connection_pool.with_metrics do |connection| + @connection_pool.with_metrics do |pool_object| + connection = ensured_vsphere_connection(pool_object) foldername = pool_config(pool_name)['folder'] folder_object = find_folder(foldername, connection) @@ -51,7 +58,8 @@ module Vmpooler def get_vm_host(_pool_name, vm_name) host_name = nil - @connection_pool.with_metrics do |connection| + @connection_pool.with_metrics do |pool_object| + connection = ensured_vsphere_connection(pool_object) vm_object = find_vm(vm_name, connection) return host_name if vm_object.nil? @@ -62,7 +70,8 @@ module Vmpooler def find_least_used_compatible_host(_pool_name, vm_name) hostname = nil - @connection_pool.with_metrics do |connection| + @connection_pool.with_metrics do |pool_object| + connection = ensured_vsphere_connection(pool_object) vm_object = find_vm(vm_name, connection) return hostname if vm_object.nil? @@ -78,7 +87,8 @@ module Vmpooler pool = pool_config(pool_name) raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil? - @connection_pool.with_metrics do |connection| + @connection_pool.with_metrics do |pool_object| + connection = ensured_vsphere_connection(pool_object) vm_object = find_vm(vm_name, connection) raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil? @@ -99,7 +109,8 @@ module Vmpooler def get_vm(_pool_name, vm_name) vm_hash = nil - @connection_pool.with_metrics do |connection| + @connection_pool.with_metrics do |pool_object| + connection = ensured_vsphere_connection(pool_object) vm_object = find_vm(vm_name, connection) return vm_hash if vm_object.nil? @@ -123,7 +134,8 @@ module Vmpooler pool = pool_config(pool_name) raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil? vm_hash = nil - @connection_pool.with_metrics do |connection| + @connection_pool.with_metrics do |pool_object| + connection = ensured_vsphere_connection(pool_object) # Assume all pool config is valid i.e. not missing template_path = pool['template'] target_folder_path = pool['folder'] @@ -193,7 +205,8 @@ module Vmpooler datastore_name = pool['datastore'] raise("Pool #{pool_name} does not have a datastore defined for the provider #{name}") if datastore_name.nil? - @connection_pool.with_metrics do |connection| + @connection_pool.with_metrics do |pool_object| + connection = ensured_vsphere_connection(pool_object) vm_object = find_vm(vm_name, connection) raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil? @@ -203,7 +216,8 @@ module Vmpooler end def create_snapshot(pool_name, vm_name, new_snapshot_name) - @connection_pool.with_metrics do |connection| + @connection_pool.with_metrics do |pool_object| + connection = ensured_vsphere_connection(pool_object) vm_object = find_vm(vm_name, connection) raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil? @@ -221,7 +235,8 @@ module Vmpooler end def revert_snapshot(pool_name, vm_name, snapshot_name) - @connection_pool.with_metrics do |connection| + @connection_pool.with_metrics do |pool_object| + connection = ensured_vsphere_connection(pool_object) vm_object = find_vm(vm_name, connection) raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil? @@ -234,7 +249,8 @@ module Vmpooler end def destroy_vm(_pool_name, vm_name) - @connection_pool.with_metrics do |connection| + @connection_pool.with_metrics do |pool_object| + connection = ensured_vsphere_connection(pool_object) vm_object = find_vm(vm_name, connection) # If a VM doesn't exist then it is effectively deleted return true if vm_object.nil? @@ -288,6 +304,18 @@ module Vmpooler DISK_TYPE = 'thin'.freeze DISK_MODE = 'persistent'.freeze + def ensured_vsphere_connection(connection_pool_object) + connection_pool_object[:connection] = connect_to_vsphere unless vsphere_connection_ok?(connection_pool_object[:connection]) + connection_pool_object[:connection] + end + + def vsphere_connection_ok?(connection) + _result = connection.serviceInstance.CurrentTime + return true + rescue + return false + end + def connect_to_vsphere max_tries = global_config[:config]['max_tries'] || 3 retry_factor = global_config[:config]['retry_factor'] || 10 diff --git a/spec/unit/generic_connection_pool_spec.rb b/spec/unit/generic_connection_pool_spec.rb index fac7478..ff57472 100644 --- a/spec/unit/generic_connection_pool_spec.rb +++ b/spec/unit/generic_connection_pool_spec.rb @@ -16,6 +16,47 @@ describe 'GenericConnectionPool' do ) { connection_object } } + describe "When consuming a pool object" do + let(:pool_size) { 1 } + let(:pool_timeout) { 1 } + let(:connection_object) {{ + connection: 'connection' + }} + + it 'should return a connection object when grabbing one from the pool' do + subject.with_metrics do |conn_pool_object| + expect(conn_pool_object).to be(connection_object) + end + end + + it 'should return the same connection object when calling the pool multiple times' do + subject.with_metrics do |conn_pool_object| + expect(conn_pool_object).to be(connection_object) + end + subject.with_metrics do |conn_pool_object| + expect(conn_pool_object).to be(connection_object) + end + subject.with_metrics do |conn_pool_object| + expect(conn_pool_object).to be(connection_object) + end + end + + it 'should preserve connection state across mulitple pool calls' do + new_connection = 'new_connection' + # Ensure the connection is not modified + subject.with_metrics do |conn_pool_object| + expect(conn_pool_object).to be(connection_object) + expect(conn_pool_object[:connection]).to_not eq(new_connection) + # Change the connection + conn_pool_object[:connection] = new_connection + end + # Ensure the connection is modified + subject.with_metrics do |conn_pool_object| + expect(conn_pool_object).to be(connection_object) + expect(conn_pool_object[:connection]).to eq(new_connection) + end + end + end describe "#with_metrics" do before(:each) do diff --git a/spec/unit/providers/vsphere_spec.rb b/spec/unit/providers/vsphere_spec.rb index 30b1d09..74ccc66 100644 --- a/spec/unit/providers/vsphere_spec.rb +++ b/spec/unit/providers/vsphere_spec.rb @@ -75,6 +75,10 @@ EOT subject { Vmpooler::PoolManager::Provider::VSphere.new(config, logger, metrics, 'vsphere', provider_options) } + before(:each) do + allow(subject).to receive(:vsphere_connection_ok?).and_return(true) + end + describe '#name' do it 'should be vsphere' do expect(subject.name).to eq('vsphere') @@ -878,6 +882,69 @@ EOT end # vSphere helper methods + describe '#ensured_vsphere_connection' do + let(:config) { YAML.load(<<-EOT +--- +:config: +:providers: + :vsphere: + # Drop the connection pool timeout way down for spec tests so they fail fast + connection_pool_timeout: 1 + connection_pool_size: 1 +:pools: +EOT + ) + } + let(:connection1) { mock_RbVmomi_VIM_Connection(connection_options) } + let(:connection2) { mock_RbVmomi_VIM_Connection(connection_options) } + + before(:each) do + allow(subject).to receive(:connect_to_vsphere).and_return(connection1) + end + + # This is to ensure that the pool_size of 1 is in effect + it 'should return the same connection object when calling the pool multiple times' do + subject.connection_pool.with_metrics do |pool_object| + expect(pool_object[:connection]).to be(connection1) + end + subject.connection_pool.with_metrics do |pool_object| + expect(pool_object[:connection]).to be(connection1) + end + subject.connection_pool.with_metrics do |pool_object| + expect(pool_object[:connection]).to be(connection1) + end + end + + context 'when the connection breaks' do + before(:each) do + # Emulate the connection state being good, then bad, then good again + expect(subject).to receive(:vsphere_connection_ok?).and_return(true, false, true) + expect(subject).to receive(:connect_to_vsphere).and_return(connection1, connection2) + end + + it 'should restore the connection' do + subject.connection_pool.with_metrics do |pool_object| + # This line needs to be added to all instances of the connection_pool allocation + connection = subject.ensured_vsphere_connection(pool_object) + + expect(connection).to be(connection1) + end + + subject.connection_pool.with_metrics do |pool_object| + connection = subject.ensured_vsphere_connection(pool_object) + # The second connection would have failed. This test ensures that a + # new connection object was created. + expect(connection).to be(connection2) + end + + subject.connection_pool.with_metrics do |pool_object| + connection = subject.ensured_vsphere_connection(pool_object) + expect(connection).to be(connection2) + end + end + end + end + describe '#connect_to_vsphere' do before(:each) do allow(RbVmomi::VIM).to receive(:connect).and_return(connection)