From c502f92cd342e867299ea84150142ca00c1bfef1 Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Wed, 1 Mar 2017 20:49:09 -0800 Subject: [PATCH 1/3] (POOLER-70) Add initial VM Provider service Previously all of the VM provisioning code was intertwined with the VM lifecycle code e.g. The VSphere specific code is mixed with Redis code. This makes it impossible to add aditional providers or disable VSphere integration. This commit begins the process to refactor the VSphere code out of the lifecycle code by introducing the concept of VM Providers. A Provider will contain the logic/ code to manage VMs i.e. create/destroy/inquire. Therefore the Pool Manager can query a strict interface into one or more Providers. Initially only a VSphere provider will be available. This commit adds the base class for all providers and describes the API or contract that the Pool Manager will use to manage VMs. --- lib/vmpooler.rb | 2 +- lib/vmpooler/providers.rb | 7 ++ lib/vmpooler/providers/base.rb | 107 +++++++++++++++++++++++++++++++ spec/unit/providers/base_spec.rb | 92 ++++++++++++++++++++++++++ 4 files changed, 207 insertions(+), 1 deletion(-) create mode 100644 lib/vmpooler/providers.rb create mode 100644 lib/vmpooler/providers/base.rb create mode 100644 spec/unit/providers/base_spec.rb diff --git a/lib/vmpooler.rb b/lib/vmpooler.rb index 26c3faf..953dac1 100644 --- a/lib/vmpooler.rb +++ b/lib/vmpooler.rb @@ -12,7 +12,7 @@ module Vmpooler require 'yaml' require 'set' - %w(api graphite logger pool_manager vsphere_helper statsd dummy_statsd).each do |lib| + %w(api graphite logger pool_manager vsphere_helper statsd dummy_statsd providers).each do |lib| begin require "vmpooler/#{lib}" rescue LoadError diff --git a/lib/vmpooler/providers.rb b/lib/vmpooler/providers.rb new file mode 100644 index 0000000..eb35436 --- /dev/null +++ b/lib/vmpooler/providers.rb @@ -0,0 +1,7 @@ +%w( base vsphere ).each do |lib| + begin + require "vmpooler/providers/#{lib}" + rescue LoadError + require File.expand_path(File.join(File.dirname(__FILE__), 'providers', lib)) + end +end diff --git a/lib/vmpooler/providers/base.rb b/lib/vmpooler/providers/base.rb new file mode 100644 index 0000000..4e7877b --- /dev/null +++ b/lib/vmpooler/providers/base.rb @@ -0,0 +1,107 @@ +module Vmpooler + class PoolManager + class Provider + class Base + # These defs must be overidden in child classes + + def initialize(options) + @provider_options = options + end + + # returns + # [String] Name of the provider service + def name + 'base' + end + + # inputs + # pool : hashtable from config file + # returns + # hashtable + # name : name of the device <---- TODO is this all? + def vms_in_pool(pool) + fail "#{self.class.name} does not implement vms_in_pool" + end + + # inputs + # vm_name: string + # returns + # [String] hostname = Name of the host computer running the vm. If this is not a Virtual Machine, it returns the vm_name + def get_vm_host(vm_name) + fail "#{self.class.name} does not implement get_vm_host" + end + + # inputs + # vm_name: string + # returns + # [String] hostname = Name of the most appropriate host computer to run this VM. Useful for load balancing VMs in a cluster + # If this is not a Virtual Machine, it returns the vm_name + def find_least_used_compatible_host(vm_name) + fail "#{self.class.name} does not implement find_least_used_compatible_host" + end + + # inputs + # vm_name: string + # dest_host_name: string (Name of the host to migrate `vm_name` to) + # returns + # [Boolean] Returns true on success or false on failure + def migrate_vm_to_host(vm_name, dest_host_name) + fail "#{self.class.name} does not implement migrate_vm_to_host" + end + + # inputs + # vm_name: string + # returns + # nil if it doesn't exist + # Hastable of the VM + # [String] name = Name of the VM + # [String] hostname = Name reported by Vmware tools (host.summary.guest.hostName) + # [String] template = This is the name of template exposed by the API. It must _match_ the poolname + # [String] poolname = Name of the pool the VM is located + # [Time] boottime = Time when the VM was created/booted + # [String] powerstate = Current power state of a VM. Valid values (as per vCenter API) + # - 'PoweredOn','PoweredOff' + def get_vm(vm_name) + fail "#{self.class.name} does not implement get_vm" + end + + # inputs + # pool : hashtable from config file + # new_vmname : string Name the new VM should use + # returns + # Hashtable of the VM as per get_vm + def create_vm(pool,new_vmname) + fail "#{self.class.name} does not implement create_vm" + end + + # inputs + # vm_name: string + # pool: string + # returns + # boolean : true if success, false on error + def destroy_vm(vm_name,pool) + fail "#{self.class.name} does not implement destroy_vm" + end + + # inputs + # vm : string + # pool: string + # timeout: int (Seconds) + # returns + # result: boolean + def is_vm_ready?(vm,pool,timeout) + fail "#{self.class.name} does not implement is_vm_ready?" + end + + # inputs + # vm : string + # returns + # result: boolean + def vm_exists?(vm) + !get_vm(vm).nil? + end + + end + end + end +end diff --git a/spec/unit/providers/base_spec.rb b/spec/unit/providers/base_spec.rb new file mode 100644 index 0000000..24f2b7c --- /dev/null +++ b/spec/unit/providers/base_spec.rb @@ -0,0 +1,92 @@ +require 'spec_helper' + +# This spec does not really exercise code paths but is merely used +# to enforce that certain methods are defined in the base classes + +describe 'Vmpooler::PoolManager::Provider::Base' do + let(:config) { {} } + let(:fake_vm) { + fake_vm = {} + fake_vm['name'] = 'vm1' + fake_vm['hostname'] = 'vm1' + fake_vm['template'] = 'pool1' + fake_vm['boottime'] = Time.now + fake_vm['powerstate'] = 'PoweredOn' + + fake_vm + } + + subject { Vmpooler::PoolManager::Provider::Base.new(config) } + + describe '#name' do + it 'should be base' do + expect(subject.name).to eq('base') + end + end + + describe '#vms_in_pool' do + it 'should raise error' do + expect{subject.vms_in_pool('pool')}.to raise_error(/does not implement vms_in_pool/) + end + end + + describe '#get_vm_host' do + it 'should raise error' do + expect{subject.get_vm_host('vm')}.to raise_error(/does not implement get_vm_host/) + end + end + + describe '#find_least_used_compatible_host' do + it 'should raise error' do + expect{subject.find_least_used_compatible_host('vm')}.to raise_error(/does not implement find_least_used_compatible_host/) + end + end + + describe '#migrate_vm_to_host' do + it 'should raise error' do + expect{subject.migrate_vm_to_host('vm','host')}.to raise_error(/does not implement migrate_vm_to_host/) + end + end + + describe '#get_vm' do + it 'should raise error' do + expect{subject.get_vm('vm')}.to raise_error(/does not implement get_vm/) + end + end + + describe '#create_vm' do + it 'should raise error' do + expect{subject.create_vm('pool','newname')}.to raise_error(/does not implement create_vm/) + end + end + + describe '#destroy_vm' do + it 'should raise error' do + expect{subject.destroy_vm('vm','pool')}.to raise_error(/does not implement destroy_vm/) + end + end + + describe '#is_vm_ready?' do + it 'should raise error' do + expect{subject.is_vm_ready?('vm','pool','timeout')}.to raise_error(/does not implement is_vm_ready?/) + end + end + + describe '#vm_exists?' do + it 'should raise error' do + expect{subject.vm_exists?('vm')}.to raise_error(/does not implement/) + end + + it 'should return true when get_vm returns an object' do + allow(subject).to receive(:get_vm).with('vm').and_return(fake_vm) + + expect(subject.vm_exists?('vm')).to eq(true) + end + + it 'should return false when get_vm returns nil' do + allow(subject).to receive(:get_vm).with('vm').and_return(nil) + + expect(subject.vm_exists?('vm')).to eq(false) + end + end +end From ac8a34de8640b974b1904e5f3372a65ec8ba882f Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Wed, 1 Mar 2017 21:05:35 -0800 Subject: [PATCH 2/3] (POOLER-70) Rename the use of vsphere to provider VM provisioning will be handled by VM Providers. This commit renames the use of vsphere to provider where appropriate and changes the per-pool helper from vsphere to providers to more accurately represent it's intended use. --- lib/vmpooler/pool_manager.rb | 126 ++++---- spec/unit/pool_manager_spec.rb | 556 ++++++++++++++++----------------- 2 files changed, 339 insertions(+), 343 deletions(-) diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index 079e31c..c8c033b 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -12,17 +12,17 @@ module Vmpooler # Connect to Redis $redis = redis - # vSphere object - $vsphere = {} + # VM Provider objects + $providers = {} # Our thread-tracker object $threads = {} end # Check the state of a VM - def check_pending_vm(vm, pool, timeout, vsphere) + def check_pending_vm(vm, pool, timeout, provider) Thread.new do - _check_pending_vm(vm, pool, timeout, vsphere) + _check_pending_vm(vm, pool, timeout, provider) end end @@ -39,8 +39,8 @@ module Vmpooler end end - def _check_pending_vm(vm, pool, timeout, vsphere) - host = vsphere.find_vm(vm) + def _check_pending_vm(vm, pool, timeout, provider) + host = provider.find_vm(vm) if ! host fail_pending_vm(vm, pool, timeout, false) @@ -95,7 +95,7 @@ module Vmpooler end end - def check_ready_vm(vm, pool, ttl, vsphere) + def check_ready_vm(vm, pool, ttl, provider) Thread.new do if ttl > 0 if (((Time.now - host.runtime.bootTime) / 60).to_s[/^\d+\.\d{1}/].to_f) > ttl @@ -114,7 +114,7 @@ module Vmpooler $redis.hset('vmpooler__vm__' + vm, 'check', Time.now) - host = vsphere.find_vm(vm) + host = provider.find_vm(vm) if host if @@ -158,14 +158,14 @@ module Vmpooler end end - def check_running_vm(vm, pool, ttl, vsphere) + def check_running_vm(vm, pool, ttl, provider) Thread.new do - _check_running_vm(vm, pool, ttl, vsphere) + _check_running_vm(vm, pool, ttl, provider) end end - def _check_running_vm(vm, pool, ttl, vsphere) - host = vsphere.find_vm(vm) + def _check_running_vm(vm, pool, ttl, provider) + host = provider.find_vm(vm) if host queue_from, queue_to = 'running', 'completed' @@ -189,10 +189,10 @@ module Vmpooler end # Clone a VM - def clone_vm(pool, vsphere) + def clone_vm(pool, provider) Thread.new do begin - _clone_vm(pool, vsphere) + _clone_vm(pool, provider) rescue => err $logger.log('s', "[!] [#{pool['name']}] failed while cloning VM with an error: #{err}") raise @@ -200,7 +200,7 @@ module Vmpooler end end - def _clone_vm(pool, vsphere) + def _clone_vm(pool, provider) template = pool['template'] folder = pool['folder'] datastore = pool['datastore'] @@ -213,7 +213,7 @@ module Vmpooler end if templatefolders - vm[vm['template']] = vsphere.find_folder(templatefolders.join('/')).find(vm['template']) + vm[vm['template']] = provider.find_folder(templatefolders.join('/')).find(vm['template']) else fail 'Please provide a full path to the template' end @@ -249,14 +249,14 @@ module Vmpooler # Choose a clone target if target - $clone_target = vsphere.find_least_used_host(target) + $clone_target = provider.find_least_used_host(target) elsif $config[:config]['clone_target'] - $clone_target = vsphere.find_least_used_host($config[:config]['clone_target']) + $clone_target = provider.find_least_used_host($config[:config]['clone_target']) end # Put the VM in the specified folder and resource pool relocateSpec = RbVmomi::VIM.VirtualMachineRelocateSpec( - datastore: vsphere.find_datastore(datastore), + datastore: provider.find_datastore(datastore), host: $clone_target, diskMoveType: :moveChildMostDiskBacking ) @@ -275,7 +275,7 @@ module Vmpooler begin start = Time.now vm[vm['template']].CloneVM_Task( - folder: vsphere.find_folder(folder), + folder: provider.find_folder(folder), name: vm['hostname'], spec: spec ).wait_for_completion @@ -297,7 +297,7 @@ module Vmpooler end # Destroy a VM - def destroy_vm(vm, pool, vsphere) + def destroy_vm(vm, pool, provider) Thread.new do $redis.srem('vmpooler__completed__' + pool, vm) $redis.hdel('vmpooler__active__' + pool, vm) @@ -306,7 +306,7 @@ module Vmpooler # Auto-expire metadata key $redis.expire('vmpooler__vm__' + vm, ($config[:redis]['data_ttl'].to_i * 60 * 60)) - host = vsphere.find_vm(vm) + host = provider.find_vm(vm) if host start = Time.now @@ -329,14 +329,14 @@ module Vmpooler end end - def create_vm_disk(vm, disk_size, vsphere) + def create_vm_disk(vm, disk_size, provider) Thread.new do - _create_vm_disk(vm, disk_size, vsphere) + _create_vm_disk(vm, disk_size, provider) end end - def _create_vm_disk(vm, disk_size, vsphere) - host = vsphere.find_vm(vm) + def _create_vm_disk(vm, disk_size, provider) + host = provider.find_vm(vm) if (host) && ((! disk_size.nil?) && (! disk_size.empty?) && (disk_size.to_i > 0)) $logger.log('s', "[ ] [disk_manager] '#{vm}' is attaching a #{disk_size}gb disk") @@ -353,7 +353,7 @@ module Vmpooler end if ((! datastore.nil?) && (! datastore.empty?)) - vsphere.add_disk(host, disk_size, datastore) + provider.add_disk(host, disk_size, datastore) rdisks = $redis.hget('vmpooler__vm__' + vm, 'disk') disks = rdisks ? rdisks.split(':') : [] @@ -369,14 +369,14 @@ module Vmpooler end end - def create_vm_snapshot(vm, snapshot_name, vsphere) + def create_vm_snapshot(vm, snapshot_name, provider) Thread.new do - _create_vm_snapshot(vm, snapshot_name, vsphere) + _create_vm_snapshot(vm, snapshot_name, provider) end end - def _create_vm_snapshot(vm, snapshot_name, vsphere) - host = vsphere.find_vm(vm) + def _create_vm_snapshot(vm, snapshot_name, provider) + host = provider.find_vm(vm) if (host) && ((! snapshot_name.nil?) && (! snapshot_name.empty?)) $logger.log('s', "[ ] [snapshot_manager] '#{vm}' is being snapshotted") @@ -398,17 +398,17 @@ module Vmpooler end end - def revert_vm_snapshot(vm, snapshot_name, vsphere) + def revert_vm_snapshot(vm, snapshot_name, provider) Thread.new do - _revert_vm_snapshot(vm, snapshot_name, vsphere) + _revert_vm_snapshot(vm, snapshot_name, provider) end end - def _revert_vm_snapshot(vm, snapshot_name, vsphere) - host = vsphere.find_vm(vm) + def _revert_vm_snapshot(vm, snapshot_name, provider) + host = provider.find_vm(vm) if host - snapshot = vsphere.find_snapshot(host, snapshot_name) + snapshot = provider.find_snapshot(host, snapshot_name) if snapshot $logger.log('s', "[ ] [snapshot_manager] '#{vm}' is being reverted to snapshot '#{snapshot_name}'") @@ -427,11 +427,11 @@ module Vmpooler def check_disk_queue(maxloop = 0, loop_delay = 5) $logger.log('d', "[*] [disk_manager] starting worker thread") - $vsphere['disk_manager'] ||= Vmpooler::VsphereHelper.new $config, $metrics + $providers['disk_manager'] ||= Vmpooler::VsphereHelper.new $config, $metrics $threads['disk_manager'] = Thread.new do loop_count = 1 loop do - _check_disk_queue $vsphere['disk_manager'] + _check_disk_queue $providers['disk_manager'] sleep(loop_delay) unless maxloop.zero? @@ -442,13 +442,13 @@ module Vmpooler end end - def _check_disk_queue(vsphere) + def _check_disk_queue(provider) vm = $redis.spop('vmpooler__tasks__disk') unless vm.nil? begin vm_name, disk_size = vm.split(':') - create_vm_disk(vm_name, disk_size, vsphere) + create_vm_disk(vm_name, disk_size, provider) rescue $logger.log('s', "[!] [disk_manager] disk creation appears to have failed") end @@ -458,12 +458,12 @@ module Vmpooler def check_snapshot_queue(maxloop = 0, loop_delay = 5) $logger.log('d', "[*] [snapshot_manager] starting worker thread") - $vsphere['snapshot_manager'] ||= Vmpooler::VsphereHelper.new $config, $metrics + $providers['snapshot_manager'] ||= Vmpooler::VsphereHelper.new $config, $metrics $threads['snapshot_manager'] = Thread.new do loop_count = 1 loop do - _check_snapshot_queue $vsphere['snapshot_manager'] + _check_snapshot_queue $providers['snapshot_manager'] sleep(loop_delay) unless maxloop.zero? @@ -474,13 +474,13 @@ module Vmpooler end end - def _check_snapshot_queue(vsphere) + def _check_snapshot_queue(provider) vm = $redis.spop('vmpooler__tasks__snapshot') unless vm.nil? begin vm_name, snapshot_name = vm.split(':') - create_vm_snapshot(vm_name, snapshot_name, vsphere) + create_vm_snapshot(vm_name, snapshot_name, provider) rescue $logger.log('s', "[!] [snapshot_manager] snapshot appears to have failed") end @@ -491,7 +491,7 @@ module Vmpooler unless vm.nil? begin vm_name, snapshot_name = vm.split(':') - revert_vm_snapshot(vm_name, snapshot_name, vsphere) + revert_vm_snapshot(vm_name, snapshot_name, provider) rescue $logger.log('s', "[!] [snapshot_manager] snapshot revert appears to have failed") end @@ -504,16 +504,16 @@ module Vmpooler migration_limit if migration_limit >= 1 end - def migrate_vm(vm, pool, vsphere) + def migrate_vm(vm, pool, provider) Thread.new do - _migrate_vm(vm, pool, vsphere) + _migrate_vm(vm, pool, provider) end end - def _migrate_vm(vm, pool, vsphere) + def _migrate_vm(vm, pool, provider) begin $redis.srem('vmpooler__migrating__' + pool, vm) - vm_object = vsphere.find_vm(vm) + vm_object = provider.find_vm(vm) parent_host, parent_host_name = get_vm_host_info(vm_object) migration_limit = migration_limit $config[:config]['migration_limit'] migration_count = $redis.scard('vmpooler__migration') @@ -527,11 +527,11 @@ module Vmpooler return else $redis.sadd('vmpooler__migration', vm) - host, host_name = vsphere.find_least_used_compatible_host(vm_object) + host, host_name = provider.find_least_used_compatible_host(vm_object) if host == parent_host $logger.log('s', "[ ] [#{pool}] No migration required for '#{vm}' running on #{parent_host_name}") else - finish = migrate_vm_and_record_timing(vm_object, vm, pool, host, parent_host_name, host_name, vsphere) + finish = migrate_vm_and_record_timing(vm_object, vm, pool, host, parent_host_name, host_name, provider) $logger.log('s', "[>] [#{pool}] '#{vm}' migrated from #{parent_host_name} to #{host_name} in #{finish} seconds") end remove_vmpooler_migration_vm(pool, vm) @@ -556,9 +556,9 @@ module Vmpooler end end - def migrate_vm_and_record_timing(vm_object, vm_name, pool, host, source_host_name, dest_host_name, vsphere) + def migrate_vm_and_record_timing(vm_object, vm_name, pool, host, source_host_name, dest_host_name, provider) start = Time.now - vsphere.migrate_vm_host(vm_object, host) + provider.migrate_vm_host(vm_object, host) finish = '%.2f' % (Time.now - start) $metrics.timing("migrate.#{pool}", finish) $metrics.increment("migrate_from.#{source_host_name}") @@ -572,12 +572,12 @@ module Vmpooler def check_pool(pool, maxloop = 0, loop_delay = 5) $logger.log('d', "[*] [#{pool['name']}] starting worker thread") - $vsphere[pool['name']] ||= Vmpooler::VsphereHelper.new $config, $metrics + $providers[pool['name']] ||= Vmpooler::VsphereHelper.new $config, $metrics $threads[pool['name']] = Thread.new do loop_count = 1 loop do - _check_pool(pool, $vsphere[pool['name']]) + _check_pool(pool, $providers[pool['name']]) sleep(loop_delay) unless maxloop.zero? @@ -588,11 +588,11 @@ module Vmpooler end end - def _check_pool(pool, vsphere) + def _check_pool(pool, provider) # INVENTORY inventory = {} begin - base = vsphere.find_folder(pool['folder']) + base = provider.find_folder(pool['folder']) base.childEntity.each do |vm| if @@ -619,7 +619,7 @@ module Vmpooler if inventory[vm] begin vm_lifetime = $redis.hget('vmpooler__vm__' + vm, 'lifetime') || $config[:config]['vm_lifetime'] || 12 - check_running_vm(vm, pool['name'], vm_lifetime, vsphere) + check_running_vm(vm, pool['name'], vm_lifetime, provider) rescue => err $logger.log('d', "[!] [#{pool['name']}] _check_pool with an error while evaluating running VMs: #{err}") end @@ -630,7 +630,7 @@ module Vmpooler $redis.smembers("vmpooler__ready__#{pool['name']}").each do |vm| if inventory[vm] begin - check_ready_vm(vm, pool['name'], pool['ready_ttl'] || 0, vsphere) + check_ready_vm(vm, pool['name'], pool['ready_ttl'] || 0, provider) rescue => err $logger.log('d', "[!] [#{pool['name']}] _check_pool failed with an error while evaluating ready VMs: #{err}") end @@ -642,7 +642,7 @@ module Vmpooler pool_timeout = pool['timeout'] || $config[:config]['timeout'] || 15 if inventory[vm] begin - check_pending_vm(vm, pool['name'], pool_timeout, vsphere) + check_pending_vm(vm, pool['name'], pool_timeout, provider) rescue => err $logger.log('d', "[!] [#{pool['name']}] _check_pool failed with an error while evaluating pending VMs: #{err}") end @@ -655,7 +655,7 @@ module Vmpooler $redis.smembers("vmpooler__completed__#{pool['name']}").each do |vm| if inventory[vm] begin - destroy_vm(vm, pool['name'], vsphere) + destroy_vm(vm, pool['name'], provider) rescue => err $redis.srem("vmpooler__completed__#{pool['name']}", vm) $redis.hdel("vmpooler__active__#{pool['name']}", vm) @@ -692,7 +692,7 @@ module Vmpooler $redis.smembers("vmpooler__migrating__#{pool['name']}").each do |vm| if inventory[vm] begin - migrate_vm(vm, pool['name'], vsphere) + migrate_vm(vm, pool['name'], provider) rescue => err $logger.log('s', "[x] [#{pool['name']}] '#{vm}' failed to migrate: #{err}") end @@ -722,7 +722,7 @@ module Vmpooler if $redis.get('vmpooler__tasks__clone').to_i < $config[:config]['task_limit'].to_i begin $redis.incr('vmpooler__tasks__clone') - clone_vm(pool, vsphere) + clone_vm(pool, provider) rescue => err $logger.log('s', "[!] [#{pool['name']}] clone failed during check_pool with an error: #{err}") $redis.decr('vmpooler__tasks__clone') diff --git a/spec/unit/pool_manager_spec.rb b/spec/unit/pool_manager_spec.rb index 0c2a619..125f357 100644 --- a/spec/unit/pool_manager_spec.rb +++ b/spec/unit/pool_manager_spec.rb @@ -23,7 +23,7 @@ describe 'Pool Manager' do subject { Vmpooler::PoolManager.new(config, logger, redis, metrics) } describe '#check_pending_vm' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } before do expect(subject).not_to be_nil @@ -31,9 +31,9 @@ describe 'Pool Manager' do it 'calls _check_pending_vm' do expect(Thread).to receive(:new).and_yield - expect(subject).to receive(:_check_pending_vm).with(vm,pool,timeout,vsphere) + expect(subject).to receive(:_check_pending_vm).with(vm,pool,timeout,provider) - subject.check_pending_vm(vm, pool, timeout, vsphere) + subject.check_pending_vm(vm, pool, timeout, provider) end end @@ -94,7 +94,7 @@ describe 'Pool Manager' do end describe '#_check_pending_vm' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } before do expect(subject).not_to be_nil @@ -102,28 +102,28 @@ describe 'Pool Manager' do context 'host does not exist or not in pool' do it 'calls fail_pending_vm' do - expect(vsphere).to receive(:find_vm).and_return(nil) + expect(provider).to receive(:find_vm).and_return(nil) expect(subject).to receive(:fail_pending_vm).with(vm, pool, timeout, false) - subject._check_pending_vm(vm, pool, timeout, vsphere) + subject._check_pending_vm(vm, pool, timeout, provider) end end context 'host is in pool' do it 'calls move_pending_vm_to_ready if host is ready' do - expect(vsphere).to receive(:find_vm).and_return(host) + expect(provider).to receive(:find_vm).and_return(host) expect(subject).to receive(:open_socket).and_return(nil) expect(subject).to receive(:move_pending_vm_to_ready).with(vm, pool, host) - subject._check_pending_vm(vm, pool, timeout, vsphere) + subject._check_pending_vm(vm, pool, timeout, provider) end it 'calls fail_pending_vm if an error is raised' do - expect(vsphere).to receive(:find_vm).and_return(host) + expect(provider).to receive(:find_vm).and_return(host) expect(subject).to receive(:open_socket).and_raise(SocketError,'getaddrinfo: No such host is known') expect(subject).to receive(:fail_pending_vm).with(vm, pool, timeout) - subject._check_pending_vm(vm, pool, timeout, vsphere) + subject._check_pending_vm(vm, pool, timeout, provider) end end end @@ -266,7 +266,7 @@ describe 'Pool Manager' do end describe '#check_ready_vm' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } let(:ttl) { 0 } let(:config) { @@ -285,38 +285,38 @@ EOT end it 'should raise an error if a TTL above zero is specified' do - expect { subject.check_ready_vm(vm,pool,5,vsphere) }.to raise_error(NameError) # This is an implementation bug + expect { subject.check_ready_vm(vm,pool,5,provider) }.to raise_error(NameError) # This is an implementation bug end context 'a VM that does not need to be checked' do it 'should do nothing' do redis.hset("vmpooler__vm__#{vm}", 'check',Time.now.to_s) - subject.check_ready_vm(vm, pool, ttl, vsphere) + subject.check_ready_vm(vm, pool, ttl, provider) end end context 'a VM that does not exist' do before do - allow(vsphere).to receive(:find_vm).and_return(nil) + allow(provider).to receive(:find_vm).and_return(nil) end it 'should set the current check timestamp' do allow(subject).to receive(:open_socket) expect(redis.hget("vmpooler__vm__#{vm}", 'check')).to be_nil - subject.check_ready_vm(vm, pool, ttl, vsphere) + subject.check_ready_vm(vm, pool, ttl, provider) expect(redis.hget("vmpooler__vm__#{vm}", 'check')).to_not be_nil end it 'should log a message' do expect(logger).to receive(:log).with('s', "[!] [#{pool}] '#{vm}' not found in vCenter inventory, removed from 'ready' queue") allow(subject).to receive(:open_socket) - subject.check_ready_vm(vm, pool, ttl, vsphere) + subject.check_ready_vm(vm, pool, ttl, provider) end it 'should remove the VM from the ready queue' do allow(subject).to receive(:open_socket) expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(true) - subject.check_ready_vm(vm, pool, ttl, vsphere) + subject.check_ready_vm(vm, pool, ttl, provider) expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(false) end end @@ -329,7 +329,7 @@ EOT allow(host).to receive_message_chain(:summary, :guest).and_return( double('guest') ) allow(host).to receive_message_chain(:summary, :guest, :hostName).and_return (vm) - allow(vsphere).to receive(:find_vm).and_return(host) + allow(provider).to receive(:find_vm).and_return(host) end context 'and is ready' do @@ -341,7 +341,7 @@ EOT end it 'should only set the next check interval' do - subject.check_ready_vm(vm, pool, ttl, vsphere) + subject.check_ready_vm(vm, pool, ttl, provider) end end @@ -356,13 +356,13 @@ EOT it 'should move the VM to the completed queue' do expect(redis).to receive(:smove).with("vmpooler__ready__#{pool}", "vmpooler__completed__#{pool}", vm) - subject.check_ready_vm(vm, pool, ttl, vsphere) + subject.check_ready_vm(vm, pool, ttl, provider) end it 'should move the VM to the completed queue in Redis' do expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(true) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(false) - subject.check_ready_vm(vm, pool, ttl, vsphere) + subject.check_ready_vm(vm, pool, ttl, provider) expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(false) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(true) end @@ -370,7 +370,7 @@ EOT it 'should log messages about being powered off' do expect(logger).to receive(:log).with('d', "[!] [#{pool}] '#{vm}' appears to be powered off, removed from 'ready' queue") - subject.check_ready_vm(vm, pool, ttl, vsphere) + subject.check_ready_vm(vm, pool, ttl, provider) end end @@ -385,13 +385,13 @@ EOT it 'should move the VM to the completed queue' do expect(redis).to receive(:smove).with("vmpooler__ready__#{pool}", "vmpooler__completed__#{pool}", vm) - subject.check_ready_vm(vm, pool, ttl, vsphere) + subject.check_ready_vm(vm, pool, ttl, provider) end it 'should move the VM to the completed queue in Redis' do expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(true) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(false) - subject.check_ready_vm(vm, pool, ttl, vsphere) + subject.check_ready_vm(vm, pool, ttl, provider) expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(false) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(true) end @@ -399,7 +399,7 @@ EOT it 'should log messages about being misnamed' do expect(logger).to receive(:log).with('d', "[!] [#{pool}] '#{vm}' has mismatched hostname, removed from 'ready' queue") - subject.check_ready_vm(vm, pool, ttl, vsphere) + subject.check_ready_vm(vm, pool, ttl, provider) end end @@ -414,13 +414,13 @@ EOT it 'should move the VM to the completed queue' do expect(redis).to receive(:smove).with("vmpooler__ready__#{pool}", "vmpooler__completed__#{pool}", vm) - subject.check_ready_vm(vm, pool, ttl, vsphere) + subject.check_ready_vm(vm, pool, ttl, provider) end it 'should move the VM to the completed queue in Redis' do expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(true) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(false) - subject.check_ready_vm(vm, pool, ttl, vsphere) + subject.check_ready_vm(vm, pool, ttl, provider) expect(redis.sismember("vmpooler__ready__#{pool}", vm)).to be(false) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(true) end @@ -428,14 +428,14 @@ EOT it 'should log messages about being unreachable' do expect(logger).to receive(:log).with('d', "[!] [#{pool}] '#{vm}' is unreachable, removed from 'ready' queue") - subject.check_ready_vm(vm, pool, ttl, vsphere) + subject.check_ready_vm(vm, pool, ttl, provider) end end end end describe '#check_running_vm' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } let (:ttl) { 5 } before do @@ -444,14 +444,14 @@ EOT it 'calls _check_running_vm' do expect(Thread).to receive(:new).and_yield - expect(subject).to receive(:_check_running_vm).with(vm, pool, ttl, vsphere) + expect(subject).to receive(:_check_running_vm).with(vm, pool, ttl, provider) - subject.check_running_vm(vm, pool, ttl, vsphere) + subject.check_running_vm(vm, pool, ttl, provider) end end describe '#_check_running_vm' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } before do expect(subject).not_to be_nil @@ -462,9 +462,9 @@ EOT end it 'does nothing with a missing VM' do - allow(vsphere).to receive(:find_vm).and_return(nil) + allow(provider).to receive(:find_vm).and_return(nil) expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) - subject._check_running_vm(vm, pool, timeout, vsphere) + subject._check_running_vm(vm, pool, timeout, provider) expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) end @@ -474,36 +474,36 @@ EOT it 'should not move VM when not poweredOn' do # I'm not sure this test is useful. There is no codepath # in _check_running_vm that looks at Power State - allow(vsphere).to receive(:find_vm).and_return vm_host + allow(provider).to receive(:find_vm).and_return vm_host allow(vm_host).to receive(:runtime).and_return true allow(vm_host).to receive_message_chain(:runtime, :powerState).and_return 'poweredOff' expect(logger).not_to receive(:log).with('d', "[!] [#{pool}] '#{vm}' appears to be powered off or dead") expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) - subject._check_running_vm(vm, pool, timeout, vsphere) + subject._check_running_vm(vm, pool, timeout, provider) expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) end it 'should not move VM if it has no checkout time' do - allow(vsphere).to receive(:find_vm).and_return vm_host + allow(provider).to receive(:find_vm).and_return vm_host expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) - subject._check_running_vm(vm, pool, 0, vsphere) + subject._check_running_vm(vm, pool, 0, provider) expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) end it 'should not move VM if TTL is zero' do - allow(vsphere).to receive(:find_vm).and_return vm_host + allow(provider).to receive(:find_vm).and_return vm_host redis.hset("vmpooler__active__#{pool}", vm,(Time.now - timeout*60*60).to_s) expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) - subject._check_running_vm(vm, pool, 0, vsphere) + subject._check_running_vm(vm, pool, 0, provider) expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) end it 'should move VM when past TTL' do - allow(vsphere).to receive(:find_vm).and_return vm_host + allow(provider).to receive(:find_vm).and_return vm_host redis.hset("vmpooler__active__#{pool}", vm,(Time.now - timeout*60*60).to_s) expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(true) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(false) - subject._check_running_vm(vm, pool, timeout, vsphere) + subject._check_running_vm(vm, pool, timeout, provider) expect(redis.sismember("vmpooler__running__#{pool}", vm)).to be(false) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(true) end @@ -549,7 +549,7 @@ EOT end describe '#clone_vm' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } let(:config) { YAML.load(<<-EOT @@ -571,17 +571,17 @@ EOT it 'calls _clone_vm' do expect(Thread).to receive(:new).and_yield - expect(subject).to receive(:_clone_vm).with(pool_object,vsphere) + expect(subject).to receive(:_clone_vm).with(pool_object,provider) - subject.clone_vm(pool_object,vsphere) + subject.clone_vm(pool_object,provider) end it 'logs a message if an error is raised' do expect(Thread).to receive(:new).and_yield expect(logger).to receive(:log) - expect(subject).to receive(:_clone_vm).with(pool_object,vsphere).and_raise('an_error') + expect(subject).to receive(:_clone_vm).with(pool_object,provider).and_raise('an_error') - expect{subject.clone_vm(pool_object,vsphere)}.to raise_error(/an_error/) + expect{subject.clone_vm(pool_object,provider)}.to raise_error(/an_error/) end end @@ -590,10 +590,6 @@ EOT expect(subject).not_to be_nil end - before(:each) do - #expect(Thread).to receive(:new).and_yield - end - let (:folder) { 'vmfolder' } let (:folder_object) { double('folder_object') } let (:template_name) { pool } @@ -618,7 +614,7 @@ EOT ) } - let (:vsphere) { double('vsphere') } + let (:provider) { double('provider') } let (:template_folder_object) { double('template_folder_object') } let (:template_vm_object) { double('template_vm_object') } let (:clone_task) { double('clone_task') } @@ -630,7 +626,7 @@ EOT end it 'should raise an error' do - expect{subject._clone_vm(pool_object,vsphere)}.to raise_error(/Please provide a full path to the template/) + expect{subject._clone_vm(pool_object,provider)}.to raise_error(/Please provide a full path to the template/) end end @@ -640,7 +636,7 @@ EOT end it 'should raise an error' do - expect{subject._clone_vm(pool_object,vsphere)}.to raise_error(/Please provide a full path to the template/) + expect{subject._clone_vm(pool_object,provider)}.to raise_error(/Please provide a full path to the template/) end end @@ -661,14 +657,14 @@ EOT context 'a valid template' do before(:each) do expect(template_folder_object).to receive(:find).with(template_name).and_return(template_vm_object) - expect(vsphere).to receive(:find_folder).with('template').and_return(template_folder_object) + expect(provider).to receive(:find_folder).with('template').and_return(template_folder_object) end context 'with no errors during cloning' do before(:each) do - expect(vsphere).to receive(:find_least_used_host).with(target).and_return('least_used_host') - expect(vsphere).to receive(:find_datastore).with(datastore).and_return('datastore') - expect(vsphere).to receive(:find_folder).with('vmfolder').and_return(folder_object) + expect(provider).to receive(:find_least_used_host).with(target).and_return('least_used_host') + expect(provider).to receive(:find_datastore).with(datastore).and_return('datastore') + expect(provider).to receive(:find_folder).with('vmfolder').and_return(folder_object) expect(template_vm_object).to receive(:CloneVM_Task).and_return(clone_task) expect(clone_task).to receive(:wait_for_completion) expect(metrics).to receive(:timing).with(/clone\./,/0/) @@ -678,7 +674,7 @@ EOT expect(logger).to receive(:log).at_least(:once) expect(redis.scard("vmpooler__pending__#{pool}")).to eq(0) - subject._clone_vm(pool_object,vsphere) + subject._clone_vm(pool_object,provider) expect(redis.scard("vmpooler__pending__#{template_name}")).to eq(1) # Get the new VM Name from the pending pool queue as it should be the only entry @@ -693,14 +689,14 @@ EOT expect(logger).to receive(:log).with('d',/\[ \] \[#{template_name}\] '(.+)' is being cloned from '#{template_name}'/) allow(logger).to receive(:log) - subject._clone_vm(pool_object,vsphere) + subject._clone_vm(pool_object,provider) end it 'should log a message that it completed being cloned' do expect(logger).to receive(:log).with('s',/\[\+\] \[#{template_name}\] '(.+)' cloned from '#{template_name}' in [0-9.]+ seconds/) allow(logger).to receive(:log) - subject._clone_vm(pool_object,vsphere) + subject._clone_vm(pool_object,provider) end end @@ -711,9 +707,9 @@ EOT # https://www.vmware.com/support/developer/converter-sdk/conv50_apireference/vim.VirtualMachine.html#clone context 'with an error during cloning' do before(:each) do - expect(vsphere).to receive(:find_least_used_host).with(target).and_return('least_used_host') - expect(vsphere).to receive(:find_datastore).with(datastore).and_return(nil) - expect(vsphere).to receive(:find_folder).with('vmfolder').and_return(folder_object) + expect(provider).to receive(:find_least_used_host).with(target).and_return('least_used_host') + expect(provider).to receive(:find_datastore).with(datastore).and_return(nil) + expect(provider).to receive(:find_folder).with('vmfolder').and_return(folder_object) expect(template_vm_object).to receive(:CloneVM_Task).and_return(clone_task) expect(clone_task).to receive(:wait_for_completion).and_raise(RuntimeError,'SomeError') expect(metrics).to receive(:timing).with(/clone\./,/0/).exactly(0).times @@ -722,7 +718,7 @@ EOT it 'should raise an error within the Thread' do expect(logger).to receive(:log).at_least(:once) - expect{subject._clone_vm(pool_object,vsphere)}.to raise_error(/SomeError/) + expect{subject._clone_vm(pool_object,provider)}.to raise_error(/SomeError/) end it 'should log a message that is being cloned from a template' do @@ -731,7 +727,7 @@ EOT # Swallow the error begin - subject._clone_vm(pool_object,vsphere) + subject._clone_vm(pool_object,provider) rescue end end @@ -742,7 +738,7 @@ EOT # Swallow the error begin - subject._clone_vm(pool_object,vsphere) + subject._clone_vm(pool_object,provider) rescue end end @@ -753,14 +749,14 @@ EOT context 'a valid template' do before(:each) do expect(template_folder_object).to receive(:find).with(template_name).and_return(template_vm_object) - expect(vsphere).to receive(:find_folder).with('template').and_return(template_folder_object) + expect(provider).to receive(:find_folder).with('template').and_return(template_folder_object) end context 'with no errors during cloning' do before(:each) do - expect(vsphere).to receive(:find_least_used_host).with(target).and_return('least_used_host') - expect(vsphere).to receive(:find_datastore).with(datastore).and_return('datastore') - expect(vsphere).to receive(:find_folder).with('vmfolder').and_return(folder_object) + expect(provider).to receive(:find_least_used_host).with(target).and_return('least_used_host') + expect(provider).to receive(:find_datastore).with(datastore).and_return('datastore') + expect(provider).to receive(:find_folder).with('vmfolder').and_return(folder_object) expect(template_vm_object).to receive(:CloneVM_Task).and_return(clone_task) expect(clone_task).to receive(:wait_for_completion) expect(metrics).to receive(:timing).with(/clone\./,/0/) @@ -770,7 +766,7 @@ EOT expect(logger).to receive(:log).at_least(:once) expect(redis.scard("vmpooler__pending__#{pool}")).to eq(0) - subject._clone_vm(pool_object,vsphere) + subject._clone_vm(pool_object,provider) expect(redis.scard("vmpooler__pending__#{pool}")).to eq(1) # Get the new VM Name from the pending pool queue as it should be the only entry @@ -785,7 +781,7 @@ EOT redis.incr('vmpooler__tasks__clone') redis.incr('vmpooler__tasks__clone') expect(redis.get('vmpooler__tasks__clone')).to eq('2') - subject._clone_vm(pool_object,vsphere) + subject._clone_vm(pool_object,provider) expect(redis.get('vmpooler__tasks__clone')).to eq('1') end @@ -793,14 +789,14 @@ EOT expect(logger).to receive(:log).with('d',/\[ \] \[#{pool}\] '(.+)' is being cloned from '#{template_name}'/) allow(logger).to receive(:log) - subject._clone_vm(pool_object,vsphere) + subject._clone_vm(pool_object,provider) end it 'should log a message that it completed being cloned' do expect(logger).to receive(:log).with('s',/\[\+\] \[#{pool}\] '(.+)' cloned from '#{template_name}' in [0-9.]+ seconds/) allow(logger).to receive(:log) - subject._clone_vm(pool_object,vsphere) + subject._clone_vm(pool_object,provider) end end @@ -811,9 +807,9 @@ EOT # https://www.vmware.com/support/developer/converter-sdk/conv50_apireference/vim.VirtualMachine.html#clone context 'with an error during cloning' do before(:each) do - expect(vsphere).to receive(:find_least_used_host).with(target).and_return('least_used_host') - expect(vsphere).to receive(:find_datastore).with(datastore).and_return(nil) - expect(vsphere).to receive(:find_folder).with('vmfolder').and_return(folder_object) + expect(provider).to receive(:find_least_used_host).with(target).and_return('least_used_host') + expect(provider).to receive(:find_datastore).with(datastore).and_return(nil) + expect(provider).to receive(:find_folder).with('vmfolder').and_return(folder_object) expect(template_vm_object).to receive(:CloneVM_Task).and_return(clone_task) expect(clone_task).to receive(:wait_for_completion).and_raise(RuntimeError,'SomeError') expect(metrics).to receive(:timing).with(/clone\./,/0/).exactly(0).times @@ -822,7 +818,7 @@ EOT it 'should raise an error within the Thread' do expect(logger).to receive(:log).at_least(:once) - expect{subject._clone_vm(pool_object,vsphere)}.to raise_error(/SomeError/) + expect{subject._clone_vm(pool_object,provider)}.to raise_error(/SomeError/) end it 'should log a message that is being cloned from a template' do @@ -831,7 +827,7 @@ EOT # Swallow the error begin - subject._clone_vm(pool_object,vsphere) + subject._clone_vm(pool_object,provider) rescue end end @@ -842,7 +838,7 @@ EOT # Swallow the error begin - subject._clone_vm(pool_object,vsphere) + subject._clone_vm(pool_object,provider) rescue end end @@ -851,7 +847,7 @@ EOT end describe "#destroy_vm" do - let (:vsphere) { double('vsphere') } + let (:provider) { double('provider') } let(:config) { YAML.load(<<-EOT @@ -883,12 +879,12 @@ EOT } before(:each) do - expect(vsphere).to receive(:find_vm).and_return(nil) + expect(provider).to receive(:find_vm).and_return(nil) end it 'should call redis expire with 0' do expect(redis.hget("vmpooler__vm__#{vm}", 'checkout')).to_not be_nil - subject.destroy_vm(vm,pool,vsphere) + subject.destroy_vm(vm,pool,provider) expect(redis.hget("vmpooler__vm__#{vm}", 'checkout')).to be_nil end end @@ -897,17 +893,17 @@ EOT let(:config) {} it 'should raise an error' do - expect{ subject.destroy_vm(vm,pool,vsphere) }.to raise_error(NoMethodError) + expect{ subject.destroy_vm(vm,pool,provider) }.to raise_error(NoMethodError) end end context 'when a VM does not exist' do before(:each) do - expect(vsphere).to receive(:find_vm).and_return(nil) + expect(provider).to receive(:find_vm).and_return(nil) end - it 'should not call any vsphere methods' do - subject.destroy_vm(vm,pool,vsphere) + it 'should not call any provider methods' do + subject.destroy_vm(vm,pool,provider) end end @@ -916,7 +912,7 @@ EOT let (:poweroff_task) { double('poweroff_task') } before(:each) do - expect(vsphere).to receive(:find_vm).and_return(host) + expect(provider).to receive(:find_vm).and_return(host) allow(host).to receive(:runtime).and_return(true) end @@ -929,7 +925,7 @@ EOT end it 'should raise an error in the thread' do - expect { subject.destroy_vm(vm,pool,vsphere) }.to raise_error(/DestroyFailure/) + expect { subject.destroy_vm(vm,pool,provider) }.to raise_error(/DestroyFailure/) end end @@ -943,7 +939,7 @@ EOT end it 'should raise an error in the thread' do - expect { subject.destroy_vm(vm,pool,vsphere) }.to raise_error(/PowerOffFailure/) + expect { subject.destroy_vm(vm,pool,provider) }.to raise_error(/PowerOffFailure/) end end @@ -957,7 +953,7 @@ EOT it 'should log a message the VM was destroyed' do expect(logger).to receive(:log).with('s', /\[-\] \[#{pool}\] '#{vm}' destroyed in [0-9.]+ seconds/) - subject.destroy_vm(vm,pool,vsphere) + subject.destroy_vm(vm,pool,provider) end end @@ -975,21 +971,21 @@ EOT expect(logger).to receive(:log).with('d', "[ ] [#{pool}] '#{vm}' is being shut down") allow(logger).to receive(:log) - subject.destroy_vm(vm,pool,vsphere) + subject.destroy_vm(vm,pool,provider) end it 'should log a message the VM was destroyed' do expect(logger).to receive(:log).with('s', /\[-\] \[#{pool}\] '#{vm}' destroyed in [0-9.]+ seconds/) allow(logger).to receive(:log) - subject.destroy_vm(vm,pool,vsphere) + subject.destroy_vm(vm,pool,provider) end end end end describe '#create_vm_disk' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } let(:disk_size) { 15 } before do @@ -998,14 +994,14 @@ EOT it 'calls _create_vm_disk' do expect(Thread).to receive(:new).and_yield - expect(subject).to receive(:_create_vm_disk).with(vm, disk_size, vsphere) + expect(subject).to receive(:_create_vm_disk).with(vm, disk_size, provider) - subject.create_vm_disk(vm, disk_size, vsphere) + subject.create_vm_disk(vm, disk_size, provider) end end describe "#_create_vm_disk" do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } let(:disk_size) { '15' } let(:datastore) { 'datastore0'} let(:config) { @@ -1023,39 +1019,39 @@ EOT end before(:each) do - allow(vsphere).to receive(:find_vm).with(vm).and_return(host) + allow(provider).to receive(:find_vm).with(vm).and_return(host) create_running_vm(pool,vm,token) end it 'should not do anything if the VM does not exist' do - expect(vsphere).to receive(:find_vm).with(vm).and_return(nil) + expect(provider).to receive(:find_vm).with(vm).and_return(nil) expect(logger).to receive(:log).exactly(0).times - subject._create_vm_disk(vm, disk_size, vsphere) + subject._create_vm_disk(vm, disk_size, provider) end it 'should not do anything if the disk size is nil' do expect(logger).to receive(:log).exactly(0).times - subject._create_vm_disk(vm, nil, vsphere) + subject._create_vm_disk(vm, nil, provider) end it 'should not do anything if the disk size is empty string' do expect(logger).to receive(:log).exactly(0).times - subject._create_vm_disk(vm, '', vsphere) + subject._create_vm_disk(vm, '', provider) end it 'should not do anything if the disk size is less than 1' do expect(logger).to receive(:log).exactly(0).times - subject._create_vm_disk(vm, '0', vsphere) + subject._create_vm_disk(vm, '0', provider) end it 'should not do anything if the disk size cannot be converted to an integer' do expect(logger).to receive(:log).exactly(0).times - subject._create_vm_disk(vm, 'abc123', vsphere) + subject._create_vm_disk(vm, 'abc123', provider) end it 'should raise an error if the disk size is a Fixnum' do expect(logger).to receive(:log).exactly(0).times - expect{ subject._create_vm_disk(vm, 10, vsphere) }.to raise_error(NoMethodError,/empty?/) + expect{ subject._create_vm_disk(vm, 10, provider) }.to raise_error(NoMethodError,/empty?/) end it 'should not do anything if the datastore for pool is nil' do @@ -1063,7 +1059,7 @@ EOT expect(logger).to receive(:log).with('s', "[+] [disk_manager] '#{vm}' failed to attach disk") config[:pools][0]['datastore'] = nil - subject._create_vm_disk(vm, disk_size, vsphere) + subject._create_vm_disk(vm, disk_size, provider) end it 'should not do anything if the datastore for pool is empty' do @@ -1071,36 +1067,36 @@ EOT expect(logger).to receive(:log).with('s', "[+] [disk_manager] '#{vm}' failed to attach disk") config[:pools][0]['datastore'] = '' - subject._create_vm_disk(vm, disk_size, vsphere) + subject._create_vm_disk(vm, disk_size, provider) end it 'should attach the disk' do expect(logger).to receive(:log).with('s', "[ ] [disk_manager] '#{vm}' is attaching a #{disk_size}gb disk") expect(logger).to receive(:log).with('s', /\[\+\] \[disk_manager\] '#{vm}' attached #{disk_size}gb disk in 0.[\d]+ seconds/) - expect(vsphere).to receive(:add_disk).with(host,disk_size,datastore) + expect(provider).to receive(:add_disk).with(host,disk_size,datastore) - subject._create_vm_disk(vm, disk_size, vsphere) + subject._create_vm_disk(vm, disk_size, provider) end it 'should update redis information when attaching the first disk' do - expect(vsphere).to receive(:add_disk).with(host,disk_size,datastore) + expect(provider).to receive(:add_disk).with(host,disk_size,datastore) - subject._create_vm_disk(vm, disk_size, vsphere) + subject._create_vm_disk(vm, disk_size, provider) expect(redis.hget("vmpooler__vm__#{vm}", 'disk')).to eq("+#{disk_size}gb") end it 'should update redis information when attaching the additional disks' do - expect(vsphere).to receive(:add_disk).with(host,disk_size,datastore) + expect(provider).to receive(:add_disk).with(host,disk_size,datastore) initial_disks = '+10gb:+20gb' redis.hset("vmpooler__vm__#{vm}", 'disk', initial_disks) - subject._create_vm_disk(vm, disk_size, vsphere) + subject._create_vm_disk(vm, disk_size, provider) expect(redis.hget("vmpooler__vm__#{vm}", 'disk')).to eq("#{initial_disks}:+#{disk_size}gb") end end describe '#create_vm_snapshot' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } let(:snapshot_name) { 'snapshot' } before do @@ -1109,14 +1105,14 @@ EOT it 'calls _create_vm_snapshot' do expect(Thread).to receive(:new).and_yield - expect(subject).to receive(:_create_vm_snapshot).with(vm, snapshot_name, vsphere) + expect(subject).to receive(:_create_vm_snapshot).with(vm, snapshot_name, provider) - subject.create_vm_snapshot(vm, snapshot_name, vsphere) + subject.create_vm_snapshot(vm, snapshot_name, provider) end end describe '#_create_vm_snapshot' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } let(:snapshot_name) { 'snapshot1' } let(:snapshot_task) { double('snapshot_task') } @@ -1125,43 +1121,43 @@ EOT end before(:each) do - allow(vsphere).to receive(:find_vm).with(vm).and_return(host) + allow(provider).to receive(:find_vm).with(vm).and_return(host) allow(snapshot_task).to receive(:wait_for_completion).and_return(nil) allow(host).to receive(:CreateSnapshot_Task).with({:name=>snapshot_name, :description=>"vmpooler", :memory=>true, :quiesce=>true}).and_return(snapshot_task) create_running_vm(pool,vm,token) end it 'should not do anything if the VM does not exist' do - expect(vsphere).to receive(:find_vm).with(vm).and_return(nil) + expect(provider).to receive(:find_vm).with(vm).and_return(nil) expect(logger).to receive(:log).exactly(0).times - subject._create_vm_snapshot(vm, snapshot_name, vsphere) + subject._create_vm_snapshot(vm, snapshot_name, provider) end it 'should not do anything if the snapshot name is nil' do expect(logger).to receive(:log).exactly(0).times - subject._create_vm_snapshot(vm, nil, vsphere) + subject._create_vm_snapshot(vm, nil, provider) end it 'should not do anything if the snapshot name is empty string' do expect(logger).to receive(:log).exactly(0).times - subject._create_vm_snapshot(vm, '', vsphere) + subject._create_vm_snapshot(vm, '', provider) end - it 'should invoke vSphere to snapshot the VM' do + it 'should invoke provider to snapshot the VM' do expect(logger).to receive(:log).with('s', "[ ] [snapshot_manager] '#{vm}' is being snapshotted") expect(logger).to receive(:log).with('s', /\[\+\] \[snapshot_manager\] '#{vm}' snapshot created in 0.[\d]+ seconds/) - subject._create_vm_snapshot(vm, snapshot_name, vsphere) + subject._create_vm_snapshot(vm, snapshot_name, provider) end it 'should add snapshot redis information' do expect(redis.hget("vmpooler__vm__#{vm}", "snapshot:#{snapshot_name}")).to be_nil - subject._create_vm_snapshot(vm, snapshot_name, vsphere) + subject._create_vm_snapshot(vm, snapshot_name, provider) expect(redis.hget("vmpooler__vm__#{vm}", "snapshot:#{snapshot_name}")).to_not be_nil end end describe '#revert_vm_snapshot' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } let(:snapshot_name) { 'snapshot' } before do @@ -1170,14 +1166,14 @@ EOT it 'calls _create_vm_snapshot' do expect(Thread).to receive(:new).and_yield - expect(subject).to receive(:_revert_vm_snapshot).with(vm, snapshot_name, vsphere) + expect(subject).to receive(:_revert_vm_snapshot).with(vm, snapshot_name, provider) - subject.revert_vm_snapshot(vm, snapshot_name, vsphere) + subject.revert_vm_snapshot(vm, snapshot_name, provider) end end describe '#_revert_vm_snapshot' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } let(:snapshot_name) { 'snapshot1' } let(:snapshot_object) { double('snapshot_object') } @@ -1186,33 +1182,33 @@ EOT end before(:each) do - allow(vsphere).to receive(:find_vm).with(vm).and_return(host) + allow(provider).to receive(:find_vm).with(vm).and_return(host) allow(snapshot_object).to receive_message_chain(:RevertToSnapshot_Task, :wait_for_completion) - allow(vsphere).to receive(:find_snapshot).with(host,snapshot_name).and_return(snapshot_object) + allow(provider).to receive(:find_snapshot).with(host,snapshot_name).and_return(snapshot_object) end it 'should not do anything if the VM does not exist' do - expect(vsphere).to receive(:find_vm).with(vm).and_return(nil) + expect(provider).to receive(:find_vm).with(vm).and_return(nil) expect(logger).to receive(:log).exactly(0).times - subject._revert_vm_snapshot(vm, snapshot_name, vsphere) + subject._revert_vm_snapshot(vm, snapshot_name, provider) end it 'should not do anything if the snapshot name is nil' do expect(logger).to receive(:log).exactly(0).times - expect(vsphere).to receive(:find_snapshot).with(host,nil).and_return nil - subject._revert_vm_snapshot(vm, nil, vsphere) + expect(provider).to receive(:find_snapshot).with(host,nil).and_return nil + subject._revert_vm_snapshot(vm, nil, provider) end it 'should not do anything if the snapshot name is empty string' do expect(logger).to receive(:log).exactly(0).times - expect(vsphere).to receive(:find_snapshot).with(host,'').and_return nil - subject._revert_vm_snapshot(vm, '', vsphere) + expect(provider).to receive(:find_snapshot).with(host,'').and_return nil + subject._revert_vm_snapshot(vm, '', provider) end - it 'should invoke vSphere to revert the VM to the snapshot' do + it 'should invoke provider to revert the VM to the snapshot' do expect(logger).to receive(:log).with('s', "[ ] [snapshot_manager] '#{vm}' is being reverted to snapshot '#{snapshot_name}'") expect(logger).to receive(:log).with('s', /\[\<\] \[snapshot_manager\] '#{vm}' reverted to snapshot in 0\.[\d]+ seconds/) - subject._revert_vm_snapshot(vm, snapshot_name, vsphere) + subject._revert_vm_snapshot(vm, snapshot_name, provider) end end @@ -1283,7 +1279,7 @@ EOT end describe '#_check_disk_queue' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } before do expect(subject).not_to be_nil @@ -1292,7 +1288,7 @@ EOT context 'when no VMs in the queue' do it 'should not call create_vm_disk' do expect(subject).to receive(:create_vm_disk).exactly(0).times - subject._check_disk_queue(vsphere) + subject._check_disk_queue(provider) end end @@ -1305,18 +1301,18 @@ EOT it 'should call create_vm_disk once' do expect(subject).to receive(:create_vm_disk).exactly(1).times - subject._check_disk_queue(vsphere) + subject._check_disk_queue(provider) end it 'should snapshot the first VM in the queue' do - expect(subject).to receive(:create_vm_disk).with('vm1','1',vsphere) - subject._check_disk_queue(vsphere) + expect(subject).to receive(:create_vm_disk).with('vm1','1',provider) + subject._check_disk_queue(provider) end it 'should log an error if one occurs' do expect(subject).to receive(:create_vm_disk).and_raise(RuntimeError,'MockError') expect(logger).to receive(:log).with('s', "[!] [disk_manager] disk creation appears to have failed") - subject._check_disk_queue(vsphere) + subject._check_disk_queue(provider) end end end @@ -1388,7 +1384,7 @@ EOT end describe '#_check_snapshot_queue' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } before do expect(subject).not_to be_nil @@ -1398,7 +1394,7 @@ EOT context 'when no VMs in the queue' do it 'should not call create_vm_snapshot' do expect(subject).to receive(:create_vm_snapshot).exactly(0).times - subject._check_snapshot_queue(vsphere) + subject._check_snapshot_queue(provider) end end @@ -1411,18 +1407,18 @@ EOT it 'should call create_vm_snapshot once' do expect(subject).to receive(:create_vm_snapshot).exactly(1).times - subject._check_snapshot_queue(vsphere) + subject._check_snapshot_queue(provider) end it 'should snapshot the first VM in the queue' do - expect(subject).to receive(:create_vm_snapshot).with('vm1','snapshot1',vsphere) - subject._check_snapshot_queue(vsphere) + expect(subject).to receive(:create_vm_snapshot).with('vm1','snapshot1',provider) + subject._check_snapshot_queue(provider) end it 'should log an error if one occurs' do expect(subject).to receive(:create_vm_snapshot).and_raise(RuntimeError,'MockError') expect(logger).to receive(:log).with('s', "[!] [snapshot_manager] snapshot appears to have failed") - subject._check_snapshot_queue(vsphere) + subject._check_snapshot_queue(provider) end end end @@ -1431,7 +1427,7 @@ EOT context 'when no VMs in the queue' do it 'should not call revert_vm_snapshot' do expect(subject).to receive(:revert_vm_snapshot).exactly(0).times - subject._check_snapshot_queue(vsphere) + subject._check_snapshot_queue(provider) end end @@ -1444,18 +1440,18 @@ EOT it 'should call revert_vm_snapshot once' do expect(subject).to receive(:revert_vm_snapshot).exactly(1).times - subject._check_snapshot_queue(vsphere) + subject._check_snapshot_queue(provider) end it 'should revert snapshot the first VM in the queue' do - expect(subject).to receive(:revert_vm_snapshot).with('vm1','snapshot1',vsphere) - subject._check_snapshot_queue(vsphere) + expect(subject).to receive(:revert_vm_snapshot).with('vm1','snapshot1',provider) + subject._check_snapshot_queue(provider) end it 'should log an error if one occurs' do expect(subject).to receive(:revert_vm_snapshot).and_raise(RuntimeError,'MockError') expect(logger).to receive(:log).with('s', "[!] [snapshot_manager] snapshot revert appears to have failed") - subject._check_snapshot_queue(vsphere) + subject._check_snapshot_queue(provider) end end end @@ -1485,7 +1481,7 @@ EOT end describe '#migrate_vm' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } before do expect(subject).not_to be_nil @@ -1493,14 +1489,14 @@ EOT it 'calls _migrate_vm' do expect(Thread).to receive(:new).and_yield - expect(subject).to receive(:_migrate_vm).with(vm, pool, vsphere) + expect(subject).to receive(:_migrate_vm).with(vm, pool, provider) - subject.migrate_vm(vm, pool, vsphere) + subject.migrate_vm(vm, pool, provider) end end describe "#_migrate_vm" do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } let(:vm_parent_hostname) { 'parent1' } let(:config) { YAML.load(<<-EOT @@ -1519,25 +1515,25 @@ EOT context 'when an error occurs' do it 'should log an error message and attempt to remove from vmpooler_migration queue' do - expect(vsphere).to receive(:find_vm).with(vm).and_raise(RuntimeError,'MockError') + expect(provider).to receive(:find_vm).with(vm).and_raise(RuntimeError,'MockError') expect(logger).to receive(:log).with('s', "[x] [#{pool}] '#{vm}' migration failed with an error: MockError") expect(subject).to receive(:remove_vmpooler_migration_vm) - subject._migrate_vm(vm, pool, vsphere) + subject._migrate_vm(vm, pool, provider) end end context 'when VM does not exist' do it 'should log an error message when VM does not exist' do - expect(vsphere).to receive(:find_vm).with(vm).and_return(nil) + expect(provider).to receive(:find_vm).with(vm).and_return(nil) # This test is quite fragile. Should refactor the code to make this scenario easier to detect expect(logger).to receive(:log).with('s', "[x] [#{pool}] '#{vm}' migration failed with an error: undefined method `summary' for nil:NilClass") - subject._migrate_vm(vm, pool, vsphere) + subject._migrate_vm(vm, pool, provider) end end context 'when VM exists but migration is disabled' do before(:each) do - expect(vsphere).to receive(:find_vm).with(vm).and_return(host) + expect(provider).to receive(:find_vm).with(vm).and_return(host) allow(subject).to receive(:get_vm_host_info).with(host).and_return([{'name' => vm_parent_hostname}, vm_parent_hostname]) create_migrating_vm(vm, pool) end @@ -1546,7 +1542,7 @@ EOT it "should not migrate a VM if the migration limit is #{testvalue}" do config[:config]['migration_limit'] = testvalue expect(logger).to receive(:log).with('s', "[ ] [#{pool}] '#{vm}' is running on #{vm_parent_hostname}") - subject._migrate_vm(vm, pool, vsphere) + subject._migrate_vm(vm, pool, provider) end it "should remove the VM from vmpooler__migrating queue in redis if the migration limit is #{testvalue}" do @@ -1554,7 +1550,7 @@ EOT config[:config]['migration_limit'] = testvalue expect(redis.sismember("vmpooler__migrating__#{pool}",vm)).to be_truthy - subject._migrate_vm(vm, pool, vsphere) + subject._migrate_vm(vm, pool, provider) expect(redis.sismember("vmpooler__migrating__#{pool}",vm)).to be_falsey end end @@ -1562,7 +1558,7 @@ EOT context 'when VM exists but migration limit is reached' do before(:each) do - expect(vsphere).to receive(:find_vm).with(vm).and_return(host) + expect(provider).to receive(:find_vm).with(vm).and_return(host) allow(subject).to receive(:get_vm_host_info).with(host).and_return([{'name' => vm_parent_hostname}, vm_parent_hostname]) create_migrating_vm(vm, pool) @@ -1575,19 +1571,19 @@ EOT it "should not migrate a VM if the migration limit is reached" do expect(logger).to receive(:log).with('s',"[ ] [#{pool}] '#{vm}' is running on #{vm_parent_hostname}. No migration will be evaluated since the migration_limit has been reached") - subject._migrate_vm(vm, pool, vsphere) + subject._migrate_vm(vm, pool, provider) end it "should remove the VM from vmpooler__migrating queue in redis if the migration limit is reached" do expect(redis.sismember("vmpooler__migrating__#{pool}",vm)).to be_truthy - subject._migrate_vm(vm, pool, vsphere) + subject._migrate_vm(vm, pool, provider) expect(redis.sismember("vmpooler__migrating__#{pool}",vm)).to be_falsey end end context 'when VM exists but migration limit is not yet reached' do before(:each) do - expect(vsphere).to receive(:find_vm).with(vm).and_return(host) + expect(provider).to receive(:find_vm).with(vm).and_return(host) allow(subject).to receive(:get_vm_host_info).with(host).and_return([{'name' => vm_parent_hostname}, vm_parent_hostname]) create_migrating_vm(vm, pool) @@ -1597,59 +1593,59 @@ EOT context 'and host to migrate to is the same as the current host' do before(:each) do - expect(vsphere).to receive(:find_least_used_compatible_host).with(host).and_return([{'name' => vm_parent_hostname}, vm_parent_hostname]) + expect(provider).to receive(:find_least_used_compatible_host).with(host).and_return([{'name' => vm_parent_hostname}, vm_parent_hostname]) end it "should not migrate the VM" do expect(logger).to receive(:log).with('s', "[ ] [#{pool}] No migration required for '#{vm}' running on #{vm_parent_hostname}") - subject._migrate_vm(vm, pool, vsphere) + subject._migrate_vm(vm, pool, provider) end it "should remove the VM from vmpooler__migrating queue in redis" do expect(redis.sismember("vmpooler__migrating__#{pool}",vm)).to be_truthy - subject._migrate_vm(vm, pool, vsphere) + subject._migrate_vm(vm, pool, provider) expect(redis.sismember("vmpooler__migrating__#{pool}",vm)).to be_falsey end it "should not change the vmpooler_migration queue count" do before_count = redis.scard('vmpooler__migration') - subject._migrate_vm(vm, pool, vsphere) + subject._migrate_vm(vm, pool, provider) expect(redis.scard('vmpooler__migration')).to eq(before_count) end it "should call remove_vmpooler_migration_vm" do expect(subject).to receive(:remove_vmpooler_migration_vm) - subject._migrate_vm(vm, pool, vsphere) + subject._migrate_vm(vm, pool, provider) end end context 'and host to migrate to different to the current host' do let(:vm_new_hostname) { 'new_hostname' } before(:each) do - expect(vsphere).to receive(:find_least_used_compatible_host).with(host).and_return([{'name' => vm_new_hostname}, vm_new_hostname]) - expect(subject).to receive(:migrate_vm_and_record_timing).with(host, vm, pool, Object, vm_parent_hostname, vm_new_hostname, vsphere).and_return('1.00') + expect(provider).to receive(:find_least_used_compatible_host).with(host).and_return([{'name' => vm_new_hostname}, vm_new_hostname]) + expect(subject).to receive(:migrate_vm_and_record_timing).with(host, vm, pool, Object, vm_parent_hostname, vm_new_hostname, provider).and_return('1.00') end it "should migrate the VM" do expect(logger).to receive(:log).with('s', "[>] [#{pool}] '#{vm}' migrated from #{vm_parent_hostname} to #{vm_new_hostname} in 1.00 seconds") - subject._migrate_vm(vm, pool, vsphere) + subject._migrate_vm(vm, pool, provider) end it "should remove the VM from vmpooler__migrating queue in redis" do expect(redis.sismember("vmpooler__migrating__#{pool}",vm)).to be_truthy - subject._migrate_vm(vm, pool, vsphere) + subject._migrate_vm(vm, pool, provider) expect(redis.sismember("vmpooler__migrating__#{pool}",vm)).to be_falsey end it "should not change the vmpooler_migration queue count" do before_count = redis.scard('vmpooler__migration') - subject._migrate_vm(vm, pool, vsphere) + subject._migrate_vm(vm, pool, provider) expect(redis.scard('vmpooler__migration')).to eq(before_count) end it "should call remove_vmpooler_migration_vm" do expect(subject).to receive(:remove_vmpooler_migration_vm) - subject._migrate_vm(vm, pool, vsphere) + subject._migrate_vm(vm, pool, provider) end end end @@ -1859,7 +1855,7 @@ EOT describe "#check_pool" do let(:threads) {{}} - let(:vsphere) {{}} + let(:provider) {{}} let(:config) { YAML.load(<<-EOT @@ -1888,17 +1884,17 @@ EOT after(:each) do # Reset the global variable - Note this is a code smell $threads = nil - $vsphere = nil + $providers = nil end it 'should log a message the worker thread is starting' do subject.check_pool(pool_object,1,0) end - it 'should populate the vsphere global variable' do + it 'should populate the providers global variable' do subject.check_pool(pool_object,1,0) - expect($vsphere[pool]).to_not be_nil + expect($providers[pool]).to_not be_nil end it 'should populate the threads global variable' do @@ -1923,7 +1919,7 @@ EOT after(:each) do # Reset the global variable - Note this is a code smell $threads = nil - $vsphere = nil + $provider = nil end it 'when a non-default loop delay is specified' do @@ -1942,13 +1938,13 @@ EOT before(:each) do allow(logger).to receive(:log) # Note the Vmpooler::VsphereHelper is not mocked - allow(subject).to receive(:_check_pool) + allow(subject).to receive(:_check_pool) end after(:each) do # Reset the global variable - Note this is a code smell $threads = nil - $vsphere = nil + $provider = nil end it 'should run startup tasks only once' do @@ -1985,7 +1981,7 @@ EOT end describe '#migrate_vm_and_record_timing' do - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } let(:vm_object) { double('vm_object') } let(:source_host_name) { 'source_host' } let(:dest_host_name) { 'dest_host' } @@ -1996,34 +1992,34 @@ EOT before(:each) do create_vm(vm,token) - expect(vsphere).to receive(:migrate_vm_host).with(vm_object, host) + expect(provider).to receive(:migrate_vm_host).with(vm_object, host) end it 'should return the elapsed time for the migration' do - result = subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, vsphere) + result = subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, provider) expect(result).to match(/0\.[\d]+/) end it 'should add timing metric' do expect(metrics).to receive(:timing).with("migrate.#{pool}",String) - subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, vsphere) + subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, provider) end it 'should increment from_host and to_host metric' do expect(metrics).to receive(:increment).with("migrate_from.#{source_host_name}") expect(metrics).to receive(:increment).with("migrate_to.#{dest_host_name}") - subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, vsphere) + subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, provider) end it 'should set migration_time metric in redis' do expect(redis.hget("vmpooler__vm__#{vm}", 'migration_time')).to be_nil - subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, vsphere) + subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, provider) expect(redis.hget("vmpooler__vm__#{vm}", 'migration_time')).to match(/0\.[\d]+/) end it 'should set checkout_to_migration metric in redis' do expect(redis.hget("vmpooler__vm__#{vm}", 'checkout_to_migration')).to be_nil - subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, vsphere) + subject.migrate_vm_and_record_timing(vm_object, vm, pool, host, source_host_name, dest_host_name, provider) expect(redis.hget("vmpooler__vm__#{vm}", 'checkout_to_migration')).to match(/0\.[\d]+/) end end @@ -2046,7 +2042,7 @@ EOT ) } let(:pool_object) { config[:pools][0] } - let(:vsphere) { double('vsphere') } + let(:provider) { double('provider') } let(:new_vm) { 'newvm'} before do @@ -2066,27 +2062,27 @@ EOT end it 'should log an error if one occurs' do - expect(vsphere).to receive(:find_folder).and_raise(RuntimeError,'Mock Error') + expect(provider).to receive(:find_folder).and_raise(RuntimeError,'Mock Error') expect(logger).to receive(:log).with('s', "[!] [#{pool}] _check_pool failed with an error while inspecting inventory: Mock Error") - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should log the discovery of VMs' do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) expect(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue") - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should add undiscovered VMs to the completed queue' do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) allow(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue") expect(redis.sismember("vmpooler__discovered__#{pool}", new_vm)).to be(false) expect(redis.sismember("vmpooler__completed__#{pool}", new_vm)).to be(false) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) expect(redis.sismember("vmpooler__discovered__#{pool}", new_vm)).to be(false) expect(redis.sismember("vmpooler__completed__#{pool}", new_vm)).to be(true) @@ -2094,13 +2090,13 @@ EOT ['running','ready','pending','completed','discovered','migrating'].each do |queue_name| it "should not discover VMs in the #{queue_name} queue" do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) expect(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue").exactly(0).times expect(redis.sismember("vmpooler__discovered__#{pool}", new_vm)).to be(false) redis.sadd("vmpooler__#{queue_name}__#{pool}", new_vm) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) if queue_name == 'discovered' # Discovered VMs end up in the completed queue @@ -2115,7 +2111,7 @@ EOT # RUNNING context 'Running VM not in the inventory' do before(:each) do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) expect(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue") create_running_vm(pool,vm,token) end @@ -2123,13 +2119,13 @@ EOT it 'should not do anything' do expect(subject).to receive(:check_running_vm).exactly(0).times - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end end context 'Running VM in the inventory' do before(:each) do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) allow(subject).to receive(:check_running_vm) create_running_vm(pool,vm,token) end @@ -2138,7 +2134,7 @@ EOT expect(subject).to receive(:check_running_vm).and_raise(RuntimeError,'MockError') expect(logger).to receive(:log).with('d', "[!] [#{pool}] _check_pool with an error while evaluating running VMs: MockError") - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should use the VM lifetime in preference to defaults' do @@ -2146,29 +2142,29 @@ EOT redis.hset("vmpooler__vm__#{vm}", 'lifetime',big_lifetime) # The lifetime comes in as string - expect(subject).to receive(:check_running_vm).with(vm,pool,"#{big_lifetime}",vsphere) + expect(subject).to receive(:check_running_vm).with(vm,pool,"#{big_lifetime}",provider) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should use the configuration default if the VM lifetime is not set' do config[:config]['vm_lifetime'] = 50 - expect(subject).to receive(:check_running_vm).with(vm,pool,50,vsphere) + expect(subject).to receive(:check_running_vm).with(vm,pool,50,provider) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should use a lifetime of 12 if nothing is set' do - expect(subject).to receive(:check_running_vm).with(vm,pool,12,vsphere) + expect(subject).to receive(:check_running_vm).with(vm,pool,12,provider) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end end # READY context 'Ready VM not in the inventory' do before(:each) do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) expect(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue") create_ready_vm(pool,vm,token) end @@ -2176,13 +2172,13 @@ EOT it 'should not do anything' do expect(subject).to receive(:check_ready_vm).exactly(0).times - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end end context 'Ready VM in the inventory' do before(:each) do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) allow(subject).to receive(:check_ready_vm) create_ready_vm(pool,vm,token) end @@ -2191,29 +2187,29 @@ EOT expect(subject).to receive(:check_ready_vm).and_raise(RuntimeError,'MockError') expect(logger).to receive(:log).with('d', "[!] [#{pool}] _check_pool failed with an error while evaluating ready VMs: MockError") - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should use the pool TTL if set' do big_lifetime = 2000 config[:pools][0]['ready_ttl'] = big_lifetime - expect(subject).to receive(:check_ready_vm).with(vm,pool,big_lifetime,vsphere) + expect(subject).to receive(:check_ready_vm).with(vm,pool,big_lifetime,provider) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should use a pool TTL of zero if none set' do - expect(subject).to receive(:check_ready_vm).with(vm,pool,0,vsphere) + expect(subject).to receive(:check_ready_vm).with(vm,pool,0,provider) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end end # PENDING context 'Pending VM not in the inventory' do before(:each) do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) expect(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue") create_pending_vm(pool,vm,token) end @@ -2222,13 +2218,13 @@ EOT expect(subject).to receive(:check_ready_vm).exactly(0).times expect(subject).to receive(:fail_pending_vm).with(vm,pool,Integer,false) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end end context 'Pending VM in the inventory' do before(:each) do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) allow(subject).to receive(:check_pending_vm) create_pending_vm(pool,vm,token) end @@ -2237,51 +2233,51 @@ EOT expect(subject).to receive(:check_pending_vm).and_raise(RuntimeError,'MockError') expect(logger).to receive(:log).with('d', "[!] [#{pool}] _check_pool failed with an error while evaluating pending VMs: MockError") - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should use the pool timeout if set' do big_lifetime = 2000 config[:pools][0]['timeout'] = big_lifetime - expect(subject).to receive(:check_pending_vm).with(vm,pool,big_lifetime,vsphere) + expect(subject).to receive(:check_pending_vm).with(vm,pool,big_lifetime,provider) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should use the configuration setting if the pool timeout is not set' do big_lifetime = 2000 config[:config]['timeout'] = big_lifetime - expect(subject).to receive(:check_pending_vm).with(vm,pool,big_lifetime,vsphere) + expect(subject).to receive(:check_pending_vm).with(vm,pool,big_lifetime,provider) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should use a pool timeout of 15 if nothing is set' do - expect(subject).to receive(:check_pending_vm).with(vm,pool,15,vsphere) + expect(subject).to receive(:check_pending_vm).with(vm,pool,15,provider) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end end # COMPLETED context 'Completed VM not in the inventory' do before(:each) do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) expect(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue") expect(logger).to receive(:log).with('s', "[!] [#{pool}] '#{vm}' not found in inventory, removed from 'completed' queue") create_completed_vm(vm,pool,true) end it 'should log a message' do - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should not call destroy_vm' do expect(subject).to receive(:destroy_vm).exactly(0).times - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should remove redis information' do @@ -2289,7 +2285,7 @@ EOT expect(redis.hget("vmpooler__vm__#{vm}", 'checkout')).to_not be(nil) expect(redis.hget("vmpooler__active__#{pool}",vm)).to_not be(nil) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) expect(redis.sismember("vmpooler__completed__#{pool}",vm)).to be(false) expect(redis.hget("vmpooler__vm__#{vm}", 'checkout')).to be(nil) @@ -2299,14 +2295,14 @@ EOT context 'Completed VM in the inventory' do before(:each) do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) create_completed_vm(vm,pool,true) end it 'should call destroy_vm' do expect(subject).to receive(:destroy_vm) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end context 'with an error during destroy_vm' do @@ -2316,7 +2312,7 @@ EOT end it 'should log a message' do - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should remove redis information' do @@ -2324,7 +2320,7 @@ EOT expect(redis.hget("vmpooler__vm__#{vm}", 'checkout')).to_not be(nil) expect(redis.hget("vmpooler__active__#{pool}",vm)).to_not be(nil) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) expect(redis.sismember("vmpooler__completed__#{pool}",vm)).to be(false) expect(redis.hget("vmpooler__vm__#{vm}", 'checkout')).to be(nil) @@ -2336,12 +2332,12 @@ EOT # DISCOVERED context 'Discovered VM' do before(:each) do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) create_discovered_vm(vm,pool) end it 'should be moved to the completed queue' do - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(true) end @@ -2350,7 +2346,7 @@ EOT expect(redis).to receive(:smove).with("vmpooler__discovered__#{pool}", "vmpooler__completed__#{pool}", vm).and_raise(RuntimeError,'MockError') expect(logger).to receive(:log).with("d", "[!] [#{pool}] _check_pool failed with an error while evaluating discovered VMs: MockError") - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end ['pending','ready','running','completed'].each do |queue_name| @@ -2368,7 +2364,7 @@ EOT redis.sadd("vmpooler__#{queue_name}__#{pool}", vm) allow(logger).to receive(:log) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) expect(redis.sismember("vmpooler__#{queue_name}__#{pool}", vm)).to be(true) end @@ -2378,7 +2374,7 @@ EOT allow(logger).to receive(:log) expect(redis.sismember("vmpooler__discovered__#{pool}", vm)).to be(true) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) expect(redis.sismember("vmpooler__discovered__#{pool}", vm)).to be(false) end @@ -2386,7 +2382,7 @@ EOT redis.sadd("vmpooler__#{queue_name}__#{pool}", vm) expect(logger).to receive(:log).with('d', "[!] [#{pool}] '#{vm}' found in '#{queue_name}', removed from 'discovered' queue") - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end end end @@ -2395,7 +2391,7 @@ EOT # MIGRATIONS context 'Migrating VM not in the inventory' do before(:each) do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([new_vm])) expect(logger).to receive(:log).with('s', "[?] [#{pool}] '#{new_vm}' added to 'discovered' queue") create_migrating_vm(vm,pool) end @@ -2403,13 +2399,13 @@ EOT it 'should not do anything' do expect(subject).to receive(:migrate_vm).exactly(0).times - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end end context 'Migrating VM in the inventory' do before(:each) do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) allow(subject).to receive(:check_ready_vm) allow(logger).to receive(:log).with("s", "[!] [#{pool}] is empty") create_migrating_vm(vm,pool) @@ -2419,88 +2415,88 @@ EOT expect(subject).to receive(:migrate_vm).and_raise(RuntimeError,'MockError') expect(logger).to receive(:log).with('s', "[x] [#{pool}] '#{vm}' failed to migrate: MockError") - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should call migrate_vm' do - expect(subject).to receive(:migrate_vm).with(vm,pool,vsphere) + expect(subject).to receive(:migrate_vm).with(vm,pool,provider) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end end # REPOPULATE context 'Repopulate a pool' do it 'should not call clone_vm when number of VMs is equal to the pool size' do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([])) expect(subject).to receive(:clone_vm).exactly(0).times - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should not call clone_vm when number of VMs is greater than the pool size' do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) create_ready_vm(pool,vm,token) expect(subject).to receive(:clone_vm).exactly(0).times - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end ['ready','pending'].each do |queue_name| it "should use VMs in #{queue_name} queue to caculate pool size" do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) expect(subject).to receive(:clone_vm).exactly(0).times # Modify the pool size to 1 and add a VM in the queue redis.sadd("vmpooler__#{queue_name}__#{pool}",vm) config[:pools][0]['size'] = 1 - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end end ['running','completed','discovered','migrating'].each do |queue_name| it "should not use VMs in #{queue_name} queue to caculate pool size" do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([vm])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([vm])) expect(subject).to receive(:clone_vm) # Modify the pool size to 1 and add a VM in the queue redis.sadd("vmpooler__#{queue_name}__#{pool}",vm) config[:pools][0]['size'] = 1 - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end end it 'should log a message the first time a pool is empty' do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([])) expect(logger).to receive(:log).with('s', "[!] [#{pool}] is empty") - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end context 'when pool is marked as empty' do before(:each) do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([])) redis.set("vmpooler__empty__#{pool}", 'true') end it 'should not log a message when the pool remains empty' do expect(logger).to receive(:log).with('s', "[!] [#{pool}] is empty").exactly(0).times - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should remove the empty pool mark if it is no longer empty' do create_ready_vm(pool,vm,token) expect(redis.get("vmpooler__empty__#{pool}")).to be_truthy - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) expect(redis.get("vmpooler__empty__#{pool}")).to be_falsey end end context 'when number of VMs is less than the pool size' do before(:each) do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([])) end it 'should call clone_vm to populate the pool' do @@ -2509,7 +2505,7 @@ EOT expect(subject).to receive(:clone_vm).exactly(pool_size).times - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'should call clone_vm until task_limit is hit' do @@ -2520,7 +2516,7 @@ EOT expect(subject).to receive(:clone_vm).exactly(task_limit).times - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'log a message if a cloning error occurs' do @@ -2531,7 +2527,7 @@ EOT expect(logger).to receive(:log).with("s", "[!] [#{pool}] clone failed during check_pool with an error: MockError") expect(logger).to receive(:log).with('d', "[!] [#{pool}] _check_pool failed with an error: MockError") - expect{ subject._check_pool(pool_object,vsphere) }.to raise_error(RuntimeError,'MockError') + expect{ subject._check_pool(pool_object,provider) }.to raise_error(RuntimeError,'MockError') end end @@ -2540,33 +2536,33 @@ EOT create_ready_vm(pool,'vm1') create_ready_vm(pool,'vm2') create_ready_vm(pool,'vm3') - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new(['vm1','vm2','vm3'])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new(['vm1','vm2','vm3'])) expect(metrics).to receive(:gauge).with("ready.#{pool}", 3) allow(metrics).to receive(:gauge) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'increments metrics for running queue' do create_running_vm(pool,'vm1',token) create_running_vm(pool,'vm2',token) create_running_vm(pool,'vm3',token) - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new(['vm1','vm2','vm3'])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new(['vm1','vm2','vm3'])) expect(metrics).to receive(:gauge).with("running.#{pool}", 3) allow(metrics).to receive(:gauge) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end it 'increments metrics with 0 when pool empty' do - expect(vsphere).to receive(:find_folder).and_return(MockFindFolder.new([])) + expect(provider).to receive(:find_folder).and_return(MockFindFolder.new([])) expect(metrics).to receive(:gauge).with("ready.#{pool}", 0) expect(metrics).to receive(:gauge).with("running.#{pool}", 0) - subject._check_pool(pool_object,vsphere) + subject._check_pool(pool_object,provider) end end end @@ -2574,18 +2570,18 @@ EOT describe '#_check_snapshot_queue' do let(:pool_helper) { double('pool') } - let(:vsphere) { {pool => pool_helper} } + let(:provider) { {pool => pool_helper} } before do expect(subject).not_to be_nil - $vsphere = vsphere + $provider = provider end it 'checks appropriate redis queues' do expect(redis).to receive(:spop).with('vmpooler__tasks__snapshot') expect(redis).to receive(:spop).with('vmpooler__tasks__snapshot-revert') - subject._check_snapshot_queue(vsphere) + subject._check_snapshot_queue(provider) end end end From c724d90d908c08edea62f8c82bfb6fc4a5fd082d Mon Sep 17 00:00:00 2001 From: Glenn Sarti Date: Thu, 2 Mar 2017 15:56:34 -0800 Subject: [PATCH 3/3] (POOLER-70) Add initial VSphere VM Provider This commit adds a skeleton VM Provider for VSphere based VM operations. --- lib/vmpooler/providers/vsphere.rb | 17 ++++++ spec/unit/providers/vsphere_spec.rb | 89 +++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+) create mode 100644 lib/vmpooler/providers/vsphere.rb create mode 100644 spec/unit/providers/vsphere_spec.rb diff --git a/lib/vmpooler/providers/vsphere.rb b/lib/vmpooler/providers/vsphere.rb new file mode 100644 index 0000000..cdb7cf2 --- /dev/null +++ b/lib/vmpooler/providers/vsphere.rb @@ -0,0 +1,17 @@ +module Vmpooler + class PoolManager + class Provider + class VSphere < Vmpooler::PoolManager::Provider::Base + + def initialize(options) + super(options) + end + + def name + 'vsphere' + end + + end + end + end +end diff --git a/spec/unit/providers/vsphere_spec.rb b/spec/unit/providers/vsphere_spec.rb new file mode 100644 index 0000000..4724bda --- /dev/null +++ b/spec/unit/providers/vsphere_spec.rb @@ -0,0 +1,89 @@ +require 'spec_helper' + +describe 'Vmpooler::PoolManager::Provider::VSphere' do + let(:config) { {} } + let(:fake_vm) { + fake_vm = {} + fake_vm['name'] = 'vm1' + fake_vm['hostname'] = 'vm1' + fake_vm['template'] = 'pool1' + fake_vm['boottime'] = Time.now + fake_vm['powerstate'] = 'PoweredOn' + + fake_vm + } + + subject { Vmpooler::PoolManager::Provider::VSphere.new(config) } + + describe '#name' do + it 'should be vsphere' do + expect(subject.name).to eq('vsphere') + end + end + + describe '#vms_in_pool' do + it 'should raise error' do + expect{subject.vms_in_pool('pool')}.to raise_error(/does not implement vms_in_pool/) + end + end + + describe '#get_vm_host' do + it 'should raise error' do + expect{subject.get_vm_host('vm')}.to raise_error(/does not implement get_vm_host/) + end + end + + describe '#find_least_used_compatible_host' do + it 'should raise error' do + expect{subject.find_least_used_compatible_host('vm')}.to raise_error(/does not implement find_least_used_compatible_host/) + end + end + + describe '#migrate_vm_to_host' do + it 'should raise error' do + expect{subject.migrate_vm_to_host('vm','host')}.to raise_error(/does not implement migrate_vm_to_host/) + end + end + + describe '#get_vm' do + it 'should raise error' do + expect{subject.get_vm('vm')}.to raise_error(/does not implement get_vm/) + end + end + + describe '#create_vm' do + it 'should raise error' do + expect{subject.create_vm('pool','newname')}.to raise_error(/does not implement create_vm/) + end + end + + describe '#destroy_vm' do + it 'should raise error' do + expect{subject.destroy_vm('vm','pool')}.to raise_error(/does not implement destroy_vm/) + end + end + + describe '#is_vm_ready?' do + it 'should raise error' do + expect{subject.is_vm_ready?('vm','pool','timeout')}.to raise_error(/does not implement is_vm_ready?/) + end + end + + describe '#vm_exists?' do + it 'should raise error' do + expect{subject.vm_exists?('vm')}.to raise_error(/does not implement/) + end + + it 'should return true when get_vm returns an object' do + allow(subject).to receive(:get_vm).with('vm').and_return(fake_vm) + + expect(subject.vm_exists?('vm')).to eq(true) + end + + it 'should return false when get_vm returns nil' do + allow(subject).to receive(:get_vm).with('vm').and_return(nil) + + expect(subject.vm_exists?('vm')).to eq(false) + end + end +end