mirror of
https://github.com/puppetlabs/vmpooler.git
synced 2026-01-26 10:08:40 -05:00
Merge pull request #216 from glennsarti/ticket/master/POOLER-70-add-providers
(POOLER-72)(POOLER-70)(POOLER-52) Move Pool Manager to use the VM Provider
This commit is contained in:
commit
1fcb19bd7b
20 changed files with 2887 additions and 4082 deletions
|
|
@ -9,6 +9,8 @@ AllCops:
|
||||||
- 'scripts/**/*'
|
- 'scripts/**/*'
|
||||||
- 'spec/**/*'
|
- 'spec/**/*'
|
||||||
- 'vendor/**/*'
|
- 'vendor/**/*'
|
||||||
|
- Gemfile
|
||||||
|
- Rakefile
|
||||||
|
|
||||||
Style/Documentation:
|
Style/Documentation:
|
||||||
Enabled: false
|
Enabled: false
|
||||||
|
|
@ -58,3 +60,7 @@ Style/ConditionalAssignment:
|
||||||
Next:
|
Next:
|
||||||
Enabled: false
|
Enabled: false
|
||||||
|
|
||||||
|
# Enforce LF line endings, even when on Windows
|
||||||
|
Style/EndOfLine:
|
||||||
|
EnforcedStyle: lf
|
||||||
|
|
||||||
|
|
|
||||||
1
Gemfile
1
Gemfile
|
|
@ -16,6 +16,7 @@ gem 'redis', '>= 3.2'
|
||||||
gem 'sinatra', '>= 1.4'
|
gem 'sinatra', '>= 1.4'
|
||||||
gem 'net-ldap', '<= 0.12.1' # keep compatibility w/ jruby & mri-1.9.3
|
gem 'net-ldap', '<= 0.12.1' # keep compatibility w/ jruby & mri-1.9.3
|
||||||
gem 'statsd-ruby', '>= 1.3.0', :require => 'statsd'
|
gem 'statsd-ruby', '>= 1.3.0', :require => 'statsd'
|
||||||
|
gem 'connection_pool', '>= 2.2.1'
|
||||||
|
|
||||||
# Test deps
|
# Test deps
|
||||||
group :test do
|
group :test do
|
||||||
|
|
|
||||||
11
README.md
11
README.md
|
|
@ -29,10 +29,11 @@ The following YAML configuration sets up two pools, `debian-7-i386` and `debian-
|
||||||
|
|
||||||
```
|
```
|
||||||
---
|
---
|
||||||
:vsphere:
|
:providers:
|
||||||
server: 'vsphere.company.com'
|
:vsphere:
|
||||||
username: 'vmpooler'
|
server: 'vsphere.company.com'
|
||||||
password: 'swimsw1msw!m'
|
username: 'vmpooler'
|
||||||
|
password: 'swimsw1msw!m'
|
||||||
|
|
||||||
:redis:
|
:redis:
|
||||||
server: 'redis.company.com'
|
server: 'redis.company.com'
|
||||||
|
|
@ -47,12 +48,14 @@ The following YAML configuration sets up two pools, `debian-7-i386` and `debian-
|
||||||
pool: 'Pooled VMs/debian-7-i386'
|
pool: 'Pooled VMs/debian-7-i386'
|
||||||
datastore: 'vmstorage'
|
datastore: 'vmstorage'
|
||||||
size: 5
|
size: 5
|
||||||
|
provider: vsphere
|
||||||
- name: 'debian-7-x86_64'
|
- name: 'debian-7-x86_64'
|
||||||
template: 'Templates/debian-7-x86_64'
|
template: 'Templates/debian-7-x86_64'
|
||||||
folder: 'Pooled VMs/debian-7-x86_64'
|
folder: 'Pooled VMs/debian-7-x86_64'
|
||||||
pool: 'Pooled VMs/debian-7-x86_64'
|
pool: 'Pooled VMs/debian-7-x86_64'
|
||||||
datastore: 'vmstorage'
|
datastore: 'vmstorage'
|
||||||
size: 5
|
size: 5
|
||||||
|
provider: vsphere
|
||||||
```
|
```
|
||||||
|
|
||||||
See the provided YAML configuration example, [vmpooler.yaml.example](vmpooler.yaml.example), for additional configuration options and parameters.
|
See the provided YAML configuration example, [vmpooler.yaml.example](vmpooler.yaml.example), for additional configuration options and parameters.
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ module Vmpooler
|
||||||
require 'yaml'
|
require 'yaml'
|
||||||
require 'set'
|
require 'set'
|
||||||
|
|
||||||
%w(api graphite logger pool_manager vsphere_helper statsd dummy_statsd providers).each do |lib|
|
%w[api graphite logger pool_manager statsd dummy_statsd generic_connection_pool providers].each do |lib|
|
||||||
begin
|
begin
|
||||||
require "vmpooler/#{lib}"
|
require "vmpooler/#{lib}"
|
||||||
rescue LoadError
|
rescue LoadError
|
||||||
|
|
|
||||||
|
|
@ -30,7 +30,7 @@ module Vmpooler
|
||||||
use Vmpooler::Dashboard
|
use Vmpooler::Dashboard
|
||||||
|
|
||||||
# Load API components
|
# Load API components
|
||||||
%w(helpers dashboard reroute v1).each do |lib|
|
%w[helpers dashboard reroute v1].each do |lib|
|
||||||
begin
|
begin
|
||||||
require "api/#{lib}"
|
require "api/#{lib}"
|
||||||
rescue LoadError
|
rescue LoadError
|
||||||
|
|
|
||||||
53
lib/vmpooler/generic_connection_pool.rb
Normal file
53
lib/vmpooler/generic_connection_pool.rb
Normal file
|
|
@ -0,0 +1,53 @@
|
||||||
|
require 'connection_pool'
|
||||||
|
|
||||||
|
module Vmpooler
|
||||||
|
class PoolManager
|
||||||
|
class GenericConnectionPool < ConnectionPool
|
||||||
|
# Extend the ConnectionPool class with instrumentation
|
||||||
|
# https://github.com/mperham/connection_pool/blob/master/lib/connection_pool.rb
|
||||||
|
|
||||||
|
def initialize(options = {}, &block)
|
||||||
|
super(options, &block)
|
||||||
|
@metrics = options[:metrics]
|
||||||
|
@metric_prefix = options[:metric_prefix]
|
||||||
|
@metric_prefix = 'connectionpool' if @metric_prefix.nil? || @metric_prefix == ''
|
||||||
|
end
|
||||||
|
|
||||||
|
if Thread.respond_to?(:handle_interrupt)
|
||||||
|
# MRI
|
||||||
|
def with_metrics(options = {})
|
||||||
|
Thread.handle_interrupt(Exception => :never) do
|
||||||
|
start = Time.now
|
||||||
|
conn = checkout(options)
|
||||||
|
timespan_ms = ((Time.now - start) * 1000).to_i
|
||||||
|
@metrics.gauge(@metric_prefix + '.available', @available.length) unless @metrics.nil?
|
||||||
|
@metrics.timing(@metric_prefix + '.waited', timespan_ms) unless @metrics.nil?
|
||||||
|
begin
|
||||||
|
Thread.handle_interrupt(Exception => :immediate) do
|
||||||
|
yield conn
|
||||||
|
end
|
||||||
|
ensure
|
||||||
|
checkin
|
||||||
|
@metrics.gauge(@metric_prefix + '.available', @available.length) unless @metrics.nil?
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
else
|
||||||
|
# jruby 1.7.x
|
||||||
|
def with_metrics(options = {})
|
||||||
|
start = Time.now
|
||||||
|
conn = checkout(options)
|
||||||
|
timespan_ms = ((Time.now - start) * 1000).to_i
|
||||||
|
@metrics.gauge(@metric_prefix + '.available', @available.length) unless @metrics.nil?
|
||||||
|
@metrics.timing(@metric_prefix + '.waited', timespan_ms) unless @metrics.nil?
|
||||||
|
begin
|
||||||
|
yield conn
|
||||||
|
ensure
|
||||||
|
checkin
|
||||||
|
@metrics.gauge(@metric_prefix + '.available', @available.length) unless @metrics.nil?
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
@ -19,37 +19,34 @@ module Vmpooler
|
||||||
$threads = {}
|
$threads = {}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def config
|
||||||
|
$config
|
||||||
|
end
|
||||||
|
|
||||||
# Check the state of a VM
|
# Check the state of a VM
|
||||||
def check_pending_vm(vm, pool, timeout, provider)
|
def check_pending_vm(vm, pool, timeout, provider)
|
||||||
Thread.new do
|
Thread.new do
|
||||||
_check_pending_vm(vm, pool, timeout, provider)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def open_socket(host, domain=nil, timeout=5, port=22, &block)
|
|
||||||
Timeout.timeout(timeout) do
|
|
||||||
target_host = host
|
|
||||||
target_host = "#{host}.#{domain}" if domain
|
|
||||||
sock = TCPSocket.new target_host, port
|
|
||||||
begin
|
begin
|
||||||
yield sock if block_given?
|
_check_pending_vm(vm, pool, timeout, provider)
|
||||||
ensure
|
rescue => err
|
||||||
sock.close
|
$logger.log('s', "[!] [#{pool}] '#{vm}' errored while checking a pending vm : #{err}")
|
||||||
|
fail_pending_vm(vm, pool, timeout)
|
||||||
|
raise
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def _check_pending_vm(vm, pool, timeout, provider)
|
def _check_pending_vm(vm, pool, timeout, provider)
|
||||||
host = provider.find_vm(vm)
|
host = provider.get_vm(pool, vm)
|
||||||
|
|
||||||
if ! host
|
if ! host
|
||||||
fail_pending_vm(vm, pool, timeout, false)
|
fail_pending_vm(vm, pool, timeout, false)
|
||||||
return
|
return
|
||||||
end
|
end
|
||||||
open_socket vm
|
if provider.vm_ready?(pool, vm)
|
||||||
move_pending_vm_to_ready(vm, pool, host)
|
move_pending_vm_to_ready(vm, pool, host)
|
||||||
rescue
|
else
|
||||||
fail_pending_vm(vm, pool, timeout)
|
fail_pending_vm(vm, pool, timeout)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def remove_nonexistent_vm(vm, pool)
|
def remove_nonexistent_vm(vm, pool)
|
||||||
|
|
@ -57,9 +54,9 @@ module Vmpooler
|
||||||
$logger.log('d', "[!] [#{pool}] '#{vm}' no longer exists. Removing from pending.")
|
$logger.log('d', "[!] [#{pool}] '#{vm}' no longer exists. Removing from pending.")
|
||||||
end
|
end
|
||||||
|
|
||||||
def fail_pending_vm(vm, pool, timeout, exists=true)
|
def fail_pending_vm(vm, pool, timeout, exists = true)
|
||||||
clone_stamp = $redis.hget("vmpooler__vm__#{vm}", 'clone')
|
clone_stamp = $redis.hget("vmpooler__vm__#{vm}", 'clone')
|
||||||
return if ! clone_stamp
|
return true if !clone_stamp
|
||||||
|
|
||||||
time_since_clone = (Time.now - Time.parse(clone_stamp)) / 60
|
time_since_clone = (Time.now - Time.parse(clone_stamp)) / 60
|
||||||
if time_since_clone > timeout
|
if time_since_clone > timeout
|
||||||
|
|
@ -70,18 +67,16 @@ module Vmpooler
|
||||||
remove_nonexistent_vm(vm, pool)
|
remove_nonexistent_vm(vm, pool)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
true
|
||||||
rescue => err
|
rescue => err
|
||||||
$logger.log('d', "Fail pending VM failed with an error: #{err}")
|
$logger.log('d', "Fail pending VM failed with an error: #{err}")
|
||||||
|
false
|
||||||
end
|
end
|
||||||
|
|
||||||
def move_pending_vm_to_ready(vm, pool, host)
|
def move_pending_vm_to_ready(vm, pool, host)
|
||||||
if (host.summary) &&
|
if host['hostname'] == vm
|
||||||
(host.summary.guest) &&
|
|
||||||
(host.summary.guest.hostName) &&
|
|
||||||
(host.summary.guest.hostName == vm)
|
|
||||||
|
|
||||||
begin
|
begin
|
||||||
Socket.getaddrinfo(vm, nil) # WTF?
|
Socket.getaddrinfo(vm, nil) # WTF? I assume this is just priming the local DNS resolver cache?!?!
|
||||||
rescue
|
rescue
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -91,81 +86,84 @@ module Vmpooler
|
||||||
$redis.smove('vmpooler__pending__' + pool, 'vmpooler__ready__' + pool, vm)
|
$redis.smove('vmpooler__pending__' + pool, 'vmpooler__ready__' + pool, vm)
|
||||||
$redis.hset('vmpooler__boot__' + Date.today.to_s, pool + ':' + vm, finish)
|
$redis.hset('vmpooler__boot__' + Date.today.to_s, pool + ':' + vm, finish)
|
||||||
|
|
||||||
$logger.log('s', "[>] [#{pool}] '#{vm}' moved to 'ready' queue")
|
$logger.log('s', "[>] [#{pool}] '#{vm}' moved from 'pending' to 'ready' queue")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def check_ready_vm(vm, pool, ttl, provider)
|
def check_ready_vm(vm, pool, ttl, provider)
|
||||||
Thread.new do
|
Thread.new do
|
||||||
if ttl > 0
|
begin
|
||||||
if (((Time.now - host.runtime.bootTime) / 60).to_s[/^\d+\.\d{1}/].to_f) > ttl
|
_check_ready_vm(vm, pool, ttl, provider)
|
||||||
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
rescue => err
|
||||||
|
$logger.log('s', "[!] [#{pool}] '#{vm}' failed while checking a ready vm : #{err}")
|
||||||
$logger.log('d', "[!] [#{pool}] '#{vm}' reached end of TTL after #{ttl} minutes, removed from 'ready' queue")
|
raise
|
||||||
return
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
check_stamp = $redis.hget('vmpooler__vm__' + vm, 'check')
|
def _check_ready_vm(vm, pool, ttl, provider)
|
||||||
|
# Periodically check that the VM is available
|
||||||
|
check_stamp = $redis.hget('vmpooler__vm__' + vm, 'check')
|
||||||
|
return if check_stamp && (((Time.now - Time.parse(check_stamp)) / 60) <= $config[:config]['vm_checktime'])
|
||||||
|
|
||||||
if
|
host = provider.get_vm(pool, vm)
|
||||||
(!check_stamp) ||
|
# Check if the host even exists
|
||||||
(((Time.now - Time.parse(check_stamp)) / 60) > $config[:config]['vm_checktime'])
|
if !host
|
||||||
|
$redis.srem('vmpooler__ready__' + pool, vm)
|
||||||
|
$logger.log('s', "[!] [#{pool}] '#{vm}' not found in inventory, removed from 'ready' queue")
|
||||||
|
return
|
||||||
|
end
|
||||||
|
|
||||||
$redis.hset('vmpooler__vm__' + vm, 'check', Time.now)
|
# Check if the hosts TTL has expired
|
||||||
|
if ttl > 0
|
||||||
|
if (((Time.now - host['boottime']) / 60).to_s[/^\d+\.\d{1}/].to_f) > ttl
|
||||||
|
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
||||||
|
|
||||||
host = provider.find_vm(vm)
|
$logger.log('d', "[!] [#{pool}] '#{vm}' reached end of TTL after #{ttl} minutes, removed from 'ready' queue")
|
||||||
|
return
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
if host
|
$redis.hset('vmpooler__vm__' + vm, 'check', Time.now)
|
||||||
if
|
# Check if the VM is not powered on
|
||||||
(host.runtime) &&
|
unless (host['powerstate'].casecmp('poweredon') == 0)
|
||||||
(host.runtime.powerState) &&
|
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
||||||
(host.runtime.powerState != 'poweredOn')
|
$logger.log('d', "[!] [#{pool}] '#{vm}' appears to be powered off, removed from 'ready' queue")
|
||||||
|
return
|
||||||
|
end
|
||||||
|
|
||||||
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
# Check if the hostname has magically changed from underneath Pooler
|
||||||
|
if (host['hostname'] != vm)
|
||||||
|
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
||||||
|
$logger.log('d', "[!] [#{pool}] '#{vm}' has mismatched hostname, removed from 'ready' queue")
|
||||||
|
return
|
||||||
|
end
|
||||||
|
|
||||||
$logger.log('d', "[!] [#{pool}] '#{vm}' appears to be powered off, removed from 'ready' queue")
|
# Check if the VM is still ready/available
|
||||||
return
|
begin
|
||||||
end
|
fail "VM #{vm} is not ready" unless provider.vm_ready?(pool, vm)
|
||||||
|
rescue
|
||||||
if
|
if $redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
||||||
(host.summary.guest) &&
|
$logger.log('d', "[!] [#{pool}] '#{vm}' is unreachable, removed from 'ready' queue")
|
||||||
(host.summary.guest.hostName) &&
|
else
|
||||||
(host.summary.guest.hostName != vm)
|
$logger.log('d', "[!] [#{pool}] '#{vm}' is unreachable, and failed to remove from 'ready' queue")
|
||||||
|
|
||||||
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
|
||||||
|
|
||||||
$logger.log('d', "[!] [#{pool}] '#{vm}' has mismatched hostname, removed from 'ready' queue")
|
|
||||||
return
|
|
||||||
end
|
|
||||||
else
|
|
||||||
$redis.srem('vmpooler__ready__' + pool, vm)
|
|
||||||
|
|
||||||
$logger.log('s', "[!] [#{pool}] '#{vm}' not found in vCenter inventory, removed from 'ready' queue")
|
|
||||||
end
|
|
||||||
|
|
||||||
begin
|
|
||||||
open_socket vm
|
|
||||||
rescue
|
|
||||||
if $redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
|
||||||
$logger.log('d', "[!] [#{pool}] '#{vm}' is unreachable, removed from 'ready' queue")
|
|
||||||
else
|
|
||||||
$logger.log('d', "[!] [#{pool}] '#{vm}' is unreachable, and failed to remove from 'ready' queue")
|
|
||||||
end
|
|
||||||
return
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def check_running_vm(vm, pool, ttl, provider)
|
def check_running_vm(vm, pool, ttl, provider)
|
||||||
Thread.new do
|
Thread.new do
|
||||||
_check_running_vm(vm, pool, ttl, provider)
|
begin
|
||||||
|
_check_running_vm(vm, pool, ttl, provider)
|
||||||
|
rescue => err
|
||||||
|
$logger.log('s', "[!] [#{pool}] '#{vm}' failed while checking VM with an error: #{err}")
|
||||||
|
raise
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def _check_running_vm(vm, pool, ttl, provider)
|
def _check_running_vm(vm, pool, ttl, provider)
|
||||||
host = provider.find_vm(vm)
|
host = provider.get_vm(pool, vm)
|
||||||
|
|
||||||
if host
|
if host
|
||||||
queue_from, queue_to = 'running', 'completed'
|
queue_from, queue_to = 'running', 'completed'
|
||||||
|
|
@ -201,237 +199,182 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
def _clone_vm(pool, provider)
|
def _clone_vm(pool, provider)
|
||||||
template = pool['template']
|
pool_name = pool['name']
|
||||||
folder = pool['folder']
|
|
||||||
datastore = pool['datastore']
|
|
||||||
target = pool['clone_target']
|
|
||||||
vm = {}
|
|
||||||
|
|
||||||
if template =~ /\//
|
|
||||||
templatefolders = template.split('/')
|
|
||||||
vm['template'] = templatefolders.pop
|
|
||||||
end
|
|
||||||
|
|
||||||
if templatefolders
|
|
||||||
vm[vm['template']] = provider.find_folder(templatefolders.join('/')).find(vm['template'])
|
|
||||||
else
|
|
||||||
fail 'Please provide a full path to the template'
|
|
||||||
end
|
|
||||||
|
|
||||||
if vm['template'].length == 0
|
|
||||||
fail "Unable to find template '#{vm['template']}'!"
|
|
||||||
end
|
|
||||||
|
|
||||||
# Generate a randomized hostname
|
# Generate a randomized hostname
|
||||||
o = [('a'..'z'), ('0'..'9')].map(&:to_a).flatten
|
o = [('a'..'z'), ('0'..'9')].map(&:to_a).flatten
|
||||||
vm['hostname'] = $config[:config]['prefix'] + o[rand(25)] + (0...14).map { o[rand(o.length)] }.join
|
new_vmname = $config[:config]['prefix'] + o[rand(25)] + (0...14).map { o[rand(o.length)] }.join
|
||||||
|
|
||||||
# Add VM to Redis inventory ('pending' pool)
|
# Add VM to Redis inventory ('pending' pool)
|
||||||
$redis.sadd('vmpooler__pending__' + vm['template'], vm['hostname'])
|
$redis.sadd('vmpooler__pending__' + pool_name, new_vmname)
|
||||||
$redis.hset('vmpooler__vm__' + vm['hostname'], 'clone', Time.now)
|
$redis.hset('vmpooler__vm__' + new_vmname, 'clone', Time.now)
|
||||||
$redis.hset('vmpooler__vm__' + vm['hostname'], 'template', vm['template'])
|
$redis.hset('vmpooler__vm__' + new_vmname, 'template', pool_name)
|
||||||
|
|
||||||
# Annotate with creation time, origin template, etc.
|
|
||||||
# Add extraconfig options that can be queried by vmtools
|
|
||||||
configSpec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
|
||||||
annotation: JSON.pretty_generate(
|
|
||||||
name: vm['hostname'],
|
|
||||||
created_by: $config[:vsphere]['username'],
|
|
||||||
base_template: vm['template'],
|
|
||||||
creation_timestamp: Time.now.utc
|
|
||||||
),
|
|
||||||
extraConfig: [
|
|
||||||
{ key: 'guestinfo.hostname',
|
|
||||||
value: vm['hostname']
|
|
||||||
}
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
# Choose a clone target
|
|
||||||
if target
|
|
||||||
$clone_target = provider.find_least_used_host(target)
|
|
||||||
elsif $config[:config]['clone_target']
|
|
||||||
$clone_target = provider.find_least_used_host($config[:config]['clone_target'])
|
|
||||||
end
|
|
||||||
|
|
||||||
# Put the VM in the specified folder and resource pool
|
|
||||||
relocateSpec = RbVmomi::VIM.VirtualMachineRelocateSpec(
|
|
||||||
datastore: provider.find_datastore(datastore),
|
|
||||||
host: $clone_target,
|
|
||||||
diskMoveType: :moveChildMostDiskBacking
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create a clone spec
|
|
||||||
spec = RbVmomi::VIM.VirtualMachineCloneSpec(
|
|
||||||
location: relocateSpec,
|
|
||||||
config: configSpec,
|
|
||||||
powerOn: true,
|
|
||||||
template: false
|
|
||||||
)
|
|
||||||
|
|
||||||
# Clone the VM
|
|
||||||
$logger.log('d', "[ ] [#{vm['template']}] '#{vm['hostname']}' is being cloned from '#{vm['template']}'")
|
|
||||||
|
|
||||||
begin
|
begin
|
||||||
|
$logger.log('d', "[ ] [#{pool_name}] Starting to clone '#{new_vmname}'")
|
||||||
start = Time.now
|
start = Time.now
|
||||||
vm[vm['template']].CloneVM_Task(
|
provider.create_vm(pool_name, new_vmname)
|
||||||
folder: provider.find_folder(folder),
|
|
||||||
name: vm['hostname'],
|
|
||||||
spec: spec
|
|
||||||
).wait_for_completion
|
|
||||||
finish = '%.2f' % (Time.now - start)
|
finish = '%.2f' % (Time.now - start)
|
||||||
|
|
||||||
$redis.hset('vmpooler__clone__' + Date.today.to_s, vm['template'] + ':' + vm['hostname'], finish)
|
$redis.hset('vmpooler__clone__' + Date.today.to_s, pool_name + ':' + new_vmname, finish)
|
||||||
$redis.hset('vmpooler__vm__' + vm['hostname'], 'clone_time', finish)
|
$redis.hset('vmpooler__vm__' + new_vmname, 'clone_time', finish)
|
||||||
|
$logger.log('s', "[+] [#{pool_name}] '#{new_vmname}' cloned in #{finish} seconds")
|
||||||
|
|
||||||
$logger.log('s', "[+] [#{vm['template']}] '#{vm['hostname']}' cloned from '#{vm['template']}' in #{finish} seconds")
|
$metrics.timing("clone.#{pool_name}", finish)
|
||||||
rescue => err
|
rescue => err
|
||||||
$logger.log('s', "[!] [#{vm['template']}] '#{vm['hostname']}' clone failed with an error: #{err}")
|
$logger.log('s', "[!] [#{pool_name}] '#{new_vmname}' clone failed with an error: #{err}")
|
||||||
$redis.srem('vmpooler__pending__' + vm['template'], vm['hostname'])
|
$redis.srem('vmpooler__pending__' + pool_name, new_vmname)
|
||||||
raise
|
raise
|
||||||
|
ensure
|
||||||
|
$redis.decr('vmpooler__tasks__clone')
|
||||||
end
|
end
|
||||||
|
|
||||||
$redis.decr('vmpooler__tasks__clone')
|
|
||||||
|
|
||||||
$metrics.timing("clone.#{vm['template']}", finish)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
# Destroy a VM
|
# Destroy a VM
|
||||||
def destroy_vm(vm, pool, provider)
|
def destroy_vm(vm, pool, provider)
|
||||||
Thread.new do
|
Thread.new do
|
||||||
$redis.srem('vmpooler__completed__' + pool, vm)
|
begin
|
||||||
$redis.hdel('vmpooler__active__' + pool, vm)
|
_destroy_vm(vm, pool, provider)
|
||||||
$redis.hset('vmpooler__vm__' + vm, 'destroy', Time.now)
|
rescue => err
|
||||||
|
$logger.log('d', "[!] [#{pool}] '#{vm}' failed while destroying the VM with an error: #{err}")
|
||||||
# Auto-expire metadata key
|
raise
|
||||||
$redis.expire('vmpooler__vm__' + vm, ($config[:redis]['data_ttl'].to_i * 60 * 60))
|
|
||||||
|
|
||||||
host = provider.find_vm(vm)
|
|
||||||
|
|
||||||
if host
|
|
||||||
start = Time.now
|
|
||||||
|
|
||||||
if
|
|
||||||
(host.runtime) &&
|
|
||||||
(host.runtime.powerState) &&
|
|
||||||
(host.runtime.powerState == 'poweredOn')
|
|
||||||
|
|
||||||
$logger.log('d', "[ ] [#{pool}] '#{vm}' is being shut down")
|
|
||||||
host.PowerOffVM_Task.wait_for_completion
|
|
||||||
end
|
|
||||||
|
|
||||||
host.Destroy_Task.wait_for_completion
|
|
||||||
finish = '%.2f' % (Time.now - start)
|
|
||||||
|
|
||||||
$logger.log('s', "[-] [#{pool}] '#{vm}' destroyed in #{finish} seconds")
|
|
||||||
$metrics.timing("destroy.#{pool}", finish)
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def create_vm_disk(vm, disk_size, provider)
|
def _destroy_vm(vm, pool, provider)
|
||||||
|
$redis.srem('vmpooler__completed__' + pool, vm)
|
||||||
|
$redis.hdel('vmpooler__active__' + pool, vm)
|
||||||
|
$redis.hset('vmpooler__vm__' + vm, 'destroy', Time.now)
|
||||||
|
|
||||||
|
# Auto-expire metadata key
|
||||||
|
$redis.expire('vmpooler__vm__' + vm, ($config[:redis]['data_ttl'].to_i * 60 * 60))
|
||||||
|
|
||||||
|
start = Time.now
|
||||||
|
|
||||||
|
provider.destroy_vm(pool, vm)
|
||||||
|
|
||||||
|
finish = '%.2f' % (Time.now - start)
|
||||||
|
$logger.log('s', "[-] [#{pool}] '#{vm}' destroyed in #{finish} seconds")
|
||||||
|
$metrics.timing("destroy.#{pool}", finish)
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_vm_disk(pool_name, vm, disk_size, provider)
|
||||||
Thread.new do
|
Thread.new do
|
||||||
_create_vm_disk(vm, disk_size, provider)
|
begin
|
||||||
end
|
_create_vm_disk(pool_name, vm, disk_size, provider)
|
||||||
end
|
rescue => err
|
||||||
|
$logger.log('d', "[!] [#{pool_name}] '#{vm}' failed while creating disk: #{err}")
|
||||||
def _create_vm_disk(vm, disk_size, provider)
|
raise
|
||||||
host = provider.find_vm(vm)
|
|
||||||
|
|
||||||
if (host) && ((! disk_size.nil?) && (! disk_size.empty?) && (disk_size.to_i > 0))
|
|
||||||
$logger.log('s', "[ ] [disk_manager] '#{vm}' is attaching a #{disk_size}gb disk")
|
|
||||||
|
|
||||||
start = Time.now
|
|
||||||
|
|
||||||
template = $redis.hget('vmpooler__vm__' + vm, 'template')
|
|
||||||
datastore = nil
|
|
||||||
|
|
||||||
$config[:pools].each do |pool|
|
|
||||||
if pool['name'] == template
|
|
||||||
datastore = pool['datastore']
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
if ((! datastore.nil?) && (! datastore.empty?))
|
|
||||||
provider.add_disk(host, disk_size, datastore)
|
|
||||||
|
|
||||||
rdisks = $redis.hget('vmpooler__vm__' + vm, 'disk')
|
|
||||||
disks = rdisks ? rdisks.split(':') : []
|
|
||||||
disks.push("+#{disk_size}gb")
|
|
||||||
$redis.hset('vmpooler__vm__' + vm, 'disk', disks.join(':'))
|
|
||||||
|
|
||||||
finish = '%.2f' % (Time.now - start)
|
|
||||||
|
|
||||||
$logger.log('s', "[+] [disk_manager] '#{vm}' attached #{disk_size}gb disk in #{finish} seconds")
|
|
||||||
else
|
|
||||||
$logger.log('s', "[+] [disk_manager] '#{vm}' failed to attach disk")
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def create_vm_snapshot(vm, snapshot_name, provider)
|
def _create_vm_disk(pool_name, vm_name, disk_size, provider)
|
||||||
|
raise("Invalid disk size of '#{disk_size}' passed") if (disk_size.nil?) || (disk_size.empty?) || (disk_size.to_i <= 0)
|
||||||
|
|
||||||
|
$logger.log('s', "[ ] [disk_manager] '#{vm_name}' is attaching a #{disk_size}gb disk")
|
||||||
|
|
||||||
|
start = Time.now
|
||||||
|
|
||||||
|
result = provider.create_disk(pool_name, vm_name, disk_size.to_i)
|
||||||
|
|
||||||
|
finish = '%.2f' % (Time.now - start)
|
||||||
|
|
||||||
|
if result
|
||||||
|
rdisks = $redis.hget('vmpooler__vm__' + vm_name, 'disk')
|
||||||
|
disks = rdisks ? rdisks.split(':') : []
|
||||||
|
disks.push("+#{disk_size}gb")
|
||||||
|
$redis.hset('vmpooler__vm__' + vm_name, 'disk', disks.join(':'))
|
||||||
|
|
||||||
|
$logger.log('s', "[+] [disk_manager] '#{vm_name}' attached #{disk_size}gb disk in #{finish} seconds")
|
||||||
|
else
|
||||||
|
$logger.log('s', "[+] [disk_manager] '#{vm_name}' failed to attach disk")
|
||||||
|
end
|
||||||
|
|
||||||
|
result
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_vm_snapshot(pool_name, vm, snapshot_name, provider)
|
||||||
Thread.new do
|
Thread.new do
|
||||||
_create_vm_snapshot(vm, snapshot_name, provider)
|
begin
|
||||||
end
|
_create_vm_snapshot(pool_name, vm, snapshot_name, provider)
|
||||||
end
|
rescue => err
|
||||||
|
$logger.log('d', "[!] [#{pool_name}] '#{vm}' failed while creating snapshot: #{err}")
|
||||||
def _create_vm_snapshot(vm, snapshot_name, provider)
|
raise
|
||||||
host = provider.find_vm(vm)
|
|
||||||
|
|
||||||
if (host) && ((! snapshot_name.nil?) && (! snapshot_name.empty?))
|
|
||||||
$logger.log('s', "[ ] [snapshot_manager] '#{vm}' is being snapshotted")
|
|
||||||
|
|
||||||
start = Time.now
|
|
||||||
|
|
||||||
host.CreateSnapshot_Task(
|
|
||||||
name: snapshot_name,
|
|
||||||
description: 'vmpooler',
|
|
||||||
memory: true,
|
|
||||||
quiesce: true
|
|
||||||
).wait_for_completion
|
|
||||||
|
|
||||||
finish = '%.2f' % (Time.now - start)
|
|
||||||
|
|
||||||
$redis.hset('vmpooler__vm__' + vm, 'snapshot:' + snapshot_name, Time.now.to_s)
|
|
||||||
|
|
||||||
$logger.log('s', "[+] [snapshot_manager] '#{vm}' snapshot created in #{finish} seconds")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def revert_vm_snapshot(vm, snapshot_name, provider)
|
|
||||||
Thread.new do
|
|
||||||
_revert_vm_snapshot(vm, snapshot_name, provider)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def _revert_vm_snapshot(vm, snapshot_name, provider)
|
|
||||||
host = provider.find_vm(vm)
|
|
||||||
|
|
||||||
if host
|
|
||||||
snapshot = provider.find_snapshot(host, snapshot_name)
|
|
||||||
|
|
||||||
if snapshot
|
|
||||||
$logger.log('s', "[ ] [snapshot_manager] '#{vm}' is being reverted to snapshot '#{snapshot_name}'")
|
|
||||||
|
|
||||||
start = Time.now
|
|
||||||
|
|
||||||
snapshot.RevertToSnapshot_Task.wait_for_completion
|
|
||||||
|
|
||||||
finish = '%.2f' % (Time.now - start)
|
|
||||||
|
|
||||||
$logger.log('s', "[<] [snapshot_manager] '#{vm}' reverted to snapshot in #{finish} seconds")
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def _create_vm_snapshot(pool_name, vm_name, snapshot_name, provider)
|
||||||
|
$logger.log('s', "[ ] [snapshot_manager] 'Attempting to snapshot #{vm_name} in pool #{pool_name}")
|
||||||
|
start = Time.now
|
||||||
|
|
||||||
|
result = provider.create_snapshot(pool_name, vm_name, snapshot_name)
|
||||||
|
|
||||||
|
finish = '%.2f' % (Time.now - start)
|
||||||
|
|
||||||
|
if result
|
||||||
|
$redis.hset('vmpooler__vm__' + vm_name, 'snapshot:' + snapshot_name, Time.now.to_s)
|
||||||
|
$logger.log('s', "[+] [snapshot_manager] '#{vm_name}' snapshot created in #{finish} seconds")
|
||||||
|
else
|
||||||
|
$logger.log('s', "[+] [snapshot_manager] Failed to snapshot '#{vm_name}'")
|
||||||
|
end
|
||||||
|
|
||||||
|
result
|
||||||
|
end
|
||||||
|
|
||||||
|
def revert_vm_snapshot(pool_name, vm, snapshot_name, provider)
|
||||||
|
Thread.new do
|
||||||
|
begin
|
||||||
|
_revert_vm_snapshot(pool_name, vm, snapshot_name, provider)
|
||||||
|
rescue => err
|
||||||
|
$logger.log('d', "[!] [#{pool_name}] '#{vm}' failed while reverting snapshot: #{err}")
|
||||||
|
raise
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def _revert_vm_snapshot(pool_name, vm_name, snapshot_name, provider)
|
||||||
|
$logger.log('s', "[ ] [snapshot_manager] 'Attempting to revert #{vm_name}' in pool #{pool_name} to snapshot '#{snapshot_name}'")
|
||||||
|
start = Time.now
|
||||||
|
|
||||||
|
result = provider.revert_snapshot(pool_name, vm_name, snapshot_name)
|
||||||
|
|
||||||
|
finish = '%.2f' % (Time.now - start)
|
||||||
|
|
||||||
|
if result
|
||||||
|
$logger.log('s', "[+] [snapshot_manager] '#{vm_name}' reverted to snapshot '#{snapshot_name}' in #{finish} seconds")
|
||||||
|
else
|
||||||
|
$logger.log('s', "[+] [snapshot_manager] Failed to revert #{vm_name}' in pool #{pool_name} to snapshot '#{snapshot_name}'")
|
||||||
|
end
|
||||||
|
|
||||||
|
result
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_pool_name_for_vm(vm_name)
|
||||||
|
# the 'template' is a bad name. Should really be 'poolname'
|
||||||
|
$redis.hget('vmpooler__vm__' + vm_name, 'template')
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_provider_for_pool(pool_name)
|
||||||
|
provider_name = nil
|
||||||
|
$config[:pools].each do |pool|
|
||||||
|
next unless pool['name'] == pool_name
|
||||||
|
provider_name = pool['provider']
|
||||||
|
end
|
||||||
|
return nil if provider_name.nil?
|
||||||
|
|
||||||
|
$providers[provider_name]
|
||||||
|
end
|
||||||
|
|
||||||
def check_disk_queue(maxloop = 0, loop_delay = 5)
|
def check_disk_queue(maxloop = 0, loop_delay = 5)
|
||||||
$logger.log('d', "[*] [disk_manager] starting worker thread")
|
$logger.log('d', "[*] [disk_manager] starting worker thread")
|
||||||
|
|
||||||
$providers['disk_manager'] ||= Vmpooler::VsphereHelper.new $config, $metrics
|
|
||||||
$threads['disk_manager'] = Thread.new do
|
$threads['disk_manager'] = Thread.new do
|
||||||
loop_count = 1
|
loop_count = 1
|
||||||
loop do
|
loop do
|
||||||
_check_disk_queue $providers['disk_manager']
|
_check_disk_queue
|
||||||
sleep(loop_delay)
|
sleep(loop_delay)
|
||||||
|
|
||||||
unless maxloop.zero?
|
unless maxloop.zero?
|
||||||
|
|
@ -442,15 +385,20 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def _check_disk_queue(provider)
|
def _check_disk_queue
|
||||||
vm = $redis.spop('vmpooler__tasks__disk')
|
task_detail = $redis.spop('vmpooler__tasks__disk')
|
||||||
|
unless task_detail.nil?
|
||||||
unless vm.nil?
|
|
||||||
begin
|
begin
|
||||||
vm_name, disk_size = vm.split(':')
|
vm_name, disk_size = task_detail.split(':')
|
||||||
create_vm_disk(vm_name, disk_size, provider)
|
pool_name = get_pool_name_for_vm(vm_name)
|
||||||
rescue
|
raise("Unable to determine which pool #{vm_name} is a member of") if pool_name.nil?
|
||||||
$logger.log('s', "[!] [disk_manager] disk creation appears to have failed")
|
|
||||||
|
provider = get_provider_for_pool(pool_name)
|
||||||
|
raise("Missing Provider for vm #{vm_name} in pool #{pool_name}") if provider.nil?
|
||||||
|
|
||||||
|
create_vm_disk(pool_name, vm_name, disk_size, provider)
|
||||||
|
rescue => err
|
||||||
|
$logger.log('s', "[!] [disk_manager] disk creation appears to have failed: #{err}")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
@ -458,12 +406,10 @@ module Vmpooler
|
||||||
def check_snapshot_queue(maxloop = 0, loop_delay = 5)
|
def check_snapshot_queue(maxloop = 0, loop_delay = 5)
|
||||||
$logger.log('d', "[*] [snapshot_manager] starting worker thread")
|
$logger.log('d', "[*] [snapshot_manager] starting worker thread")
|
||||||
|
|
||||||
$providers['snapshot_manager'] ||= Vmpooler::VsphereHelper.new $config, $metrics
|
|
||||||
|
|
||||||
$threads['snapshot_manager'] = Thread.new do
|
$threads['snapshot_manager'] = Thread.new do
|
||||||
loop_count = 1
|
loop_count = 1
|
||||||
loop do
|
loop do
|
||||||
_check_snapshot_queue $providers['snapshot_manager']
|
_check_snapshot_queue
|
||||||
sleep(loop_delay)
|
sleep(loop_delay)
|
||||||
|
|
||||||
unless maxloop.zero?
|
unless maxloop.zero?
|
||||||
|
|
@ -474,26 +420,38 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def _check_snapshot_queue(provider)
|
def _check_snapshot_queue
|
||||||
vm = $redis.spop('vmpooler__tasks__snapshot')
|
task_detail = $redis.spop('vmpooler__tasks__snapshot')
|
||||||
|
|
||||||
unless vm.nil?
|
unless task_detail.nil?
|
||||||
begin
|
begin
|
||||||
vm_name, snapshot_name = vm.split(':')
|
vm_name, snapshot_name = task_detail.split(':')
|
||||||
create_vm_snapshot(vm_name, snapshot_name, provider)
|
pool_name = get_pool_name_for_vm(vm_name)
|
||||||
rescue
|
raise("Unable to determine which pool #{vm_name} is a member of") if pool_name.nil?
|
||||||
$logger.log('s', "[!] [snapshot_manager] snapshot appears to have failed")
|
|
||||||
|
provider = get_provider_for_pool(pool_name)
|
||||||
|
raise("Missing Provider for vm #{vm_name} in pool #{pool_name}") if provider.nil?
|
||||||
|
|
||||||
|
create_vm_snapshot(pool_name, vm_name, snapshot_name, provider)
|
||||||
|
rescue => err
|
||||||
|
$logger.log('s', "[!] [snapshot_manager] snapshot create appears to have failed: #{err}")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
vm = $redis.spop('vmpooler__tasks__snapshot-revert')
|
task_detail = $redis.spop('vmpooler__tasks__snapshot-revert')
|
||||||
|
|
||||||
unless vm.nil?
|
unless task_detail.nil?
|
||||||
begin
|
begin
|
||||||
vm_name, snapshot_name = vm.split(':')
|
vm_name, snapshot_name = task_detail.split(':')
|
||||||
revert_vm_snapshot(vm_name, snapshot_name, provider)
|
pool_name = get_pool_name_for_vm(vm_name)
|
||||||
rescue
|
raise("Unable to determine which pool #{vm_name} is a member of") if pool_name.nil?
|
||||||
$logger.log('s', "[!] [snapshot_manager] snapshot revert appears to have failed")
|
|
||||||
|
provider = get_provider_for_pool(pool_name)
|
||||||
|
raise("Missing Provider for vm #{vm_name} in pool #{pool_name}") if provider.nil?
|
||||||
|
|
||||||
|
revert_vm_snapshot(pool_name, vm_name, snapshot_name, provider)
|
||||||
|
rescue => err
|
||||||
|
$logger.log('s', "[!] [snapshot_manager] snapshot revert appears to have failed: #{err}")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
@ -504,50 +462,46 @@ module Vmpooler
|
||||||
migration_limit if migration_limit >= 1
|
migration_limit if migration_limit >= 1
|
||||||
end
|
end
|
||||||
|
|
||||||
def migrate_vm(vm, pool, provider)
|
def migrate_vm(vm_name, pool_name, provider)
|
||||||
Thread.new do
|
Thread.new do
|
||||||
_migrate_vm(vm, pool, provider)
|
begin
|
||||||
|
_migrate_vm(vm_name, pool_name, provider)
|
||||||
|
rescue => err
|
||||||
|
$logger.log('s', "[x] [#{pool_name}] '#{vm_name}' migration failed with an error: #{err}")
|
||||||
|
remove_vmpooler_migration_vm(pool_name, vm_name)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def _migrate_vm(vm, pool, provider)
|
def _migrate_vm(vm_name, pool_name, provider)
|
||||||
begin
|
$redis.srem('vmpooler__migrating__' + pool_name, vm_name)
|
||||||
$redis.srem('vmpooler__migrating__' + pool, vm)
|
|
||||||
vm_object = provider.find_vm(vm)
|
|
||||||
parent_host, parent_host_name = get_vm_host_info(vm_object)
|
|
||||||
migration_limit = migration_limit $config[:config]['migration_limit']
|
|
||||||
migration_count = $redis.scard('vmpooler__migration')
|
|
||||||
|
|
||||||
if ! migration_limit
|
parent_host_name = provider.get_vm_host(pool_name, vm_name)
|
||||||
$logger.log('s', "[ ] [#{pool}] '#{vm}' is running on #{parent_host_name}")
|
raise('Unable to determine which host the VM is running on') if parent_host_name.nil?
|
||||||
|
migration_limit = migration_limit $config[:config]['migration_limit']
|
||||||
|
migration_count = $redis.scard('vmpooler__migration')
|
||||||
|
|
||||||
|
if ! migration_limit
|
||||||
|
$logger.log('s', "[ ] [#{pool_name}] '#{vm_name}' is running on #{parent_host_name}")
|
||||||
|
return
|
||||||
|
else
|
||||||
|
if migration_count >= migration_limit
|
||||||
|
$logger.log('s', "[ ] [#{pool_name}] '#{vm_name}' is running on #{parent_host_name}. No migration will be evaluated since the migration_limit has been reached")
|
||||||
return
|
return
|
||||||
else
|
else
|
||||||
if migration_count >= migration_limit
|
$redis.sadd('vmpooler__migration', vm_name)
|
||||||
$logger.log('s', "[ ] [#{pool}] '#{vm}' is running on #{parent_host_name}. No migration will be evaluated since the migration_limit has been reached")
|
host_name = provider.find_least_used_compatible_host(vm_name)
|
||||||
return
|
if host_name == parent_host_name
|
||||||
|
$logger.log('s', "[ ] [#{pool_name}] No migration required for '#{vm_name}' running on #{parent_host_name}")
|
||||||
else
|
else
|
||||||
$redis.sadd('vmpooler__migration', vm)
|
finish = migrate_vm_and_record_timing(vm_name, pool_name, parent_host_name, host_name, provider)
|
||||||
host, host_name = provider.find_least_used_compatible_host(vm_object)
|
$logger.log('s', "[>] [#{pool_name}] '#{vm_name}' migrated from #{parent_host_name} to #{host_name} in #{finish} seconds")
|
||||||
if host == parent_host
|
|
||||||
$logger.log('s', "[ ] [#{pool}] No migration required for '#{vm}' running on #{parent_host_name}")
|
|
||||||
else
|
|
||||||
finish = migrate_vm_and_record_timing(vm_object, vm, pool, host, parent_host_name, host_name, provider)
|
|
||||||
$logger.log('s', "[>] [#{pool}] '#{vm}' migrated from #{parent_host_name} to #{host_name} in #{finish} seconds")
|
|
||||||
end
|
|
||||||
remove_vmpooler_migration_vm(pool, vm)
|
|
||||||
end
|
end
|
||||||
|
remove_vmpooler_migration_vm(pool_name, vm_name)
|
||||||
end
|
end
|
||||||
rescue => err
|
|
||||||
$logger.log('s', "[x] [#{pool}] '#{vm}' migration failed with an error: #{err}")
|
|
||||||
remove_vmpooler_migration_vm(pool, vm)
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def get_vm_host_info(vm_object)
|
|
||||||
parent_host = vm_object.summary.runtime.host
|
|
||||||
[parent_host, parent_host.name]
|
|
||||||
end
|
|
||||||
|
|
||||||
def remove_vmpooler_migration_vm(pool, vm)
|
def remove_vmpooler_migration_vm(pool, vm)
|
||||||
begin
|
begin
|
||||||
$redis.srem('vmpooler__migration', vm)
|
$redis.srem('vmpooler__migration', vm)
|
||||||
|
|
@ -556,11 +510,11 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def migrate_vm_and_record_timing(vm_object, vm_name, pool, host, source_host_name, dest_host_name, provider)
|
def migrate_vm_and_record_timing(vm_name, pool_name, source_host_name, dest_host_name, provider)
|
||||||
start = Time.now
|
start = Time.now
|
||||||
provider.migrate_vm_host(vm_object, host)
|
provider.migrate_vm_to_host(pool_name, vm_name, dest_host_name)
|
||||||
finish = '%.2f' % (Time.now - start)
|
finish = '%.2f' % (Time.now - start)
|
||||||
$metrics.timing("migrate.#{pool}", finish)
|
$metrics.timing("migrate.#{pool_name}", finish)
|
||||||
$metrics.increment("migrate_from.#{source_host_name}")
|
$metrics.increment("migrate_from.#{source_host_name}")
|
||||||
$metrics.increment("migrate_to.#{dest_host_name}")
|
$metrics.increment("migrate_to.#{dest_host_name}")
|
||||||
checkout_to_migration = '%.2f' % (Time.now - Time.parse($redis.hget("vmpooler__vm__#{vm_name}", 'checkout')))
|
checkout_to_migration = '%.2f' % (Time.now - Time.parse($redis.hget("vmpooler__vm__#{vm_name}", 'checkout')))
|
||||||
|
|
@ -572,18 +526,24 @@ module Vmpooler
|
||||||
def check_pool(pool, maxloop = 0, loop_delay = 5)
|
def check_pool(pool, maxloop = 0, loop_delay = 5)
|
||||||
$logger.log('d', "[*] [#{pool['name']}] starting worker thread")
|
$logger.log('d', "[*] [#{pool['name']}] starting worker thread")
|
||||||
|
|
||||||
$providers[pool['name']] ||= Vmpooler::VsphereHelper.new $config, $metrics
|
|
||||||
|
|
||||||
$threads[pool['name']] = Thread.new do
|
$threads[pool['name']] = Thread.new do
|
||||||
loop_count = 1
|
begin
|
||||||
loop do
|
loop_count = 1
|
||||||
_check_pool(pool, $providers[pool['name']])
|
provider = get_provider_for_pool(pool['name'])
|
||||||
sleep(loop_delay)
|
raise("Could not find provider '#{pool['provider']}") if provider.nil?
|
||||||
|
loop do
|
||||||
|
_check_pool(pool, provider)
|
||||||
|
|
||||||
unless maxloop.zero?
|
sleep(loop_delay)
|
||||||
break if loop_count >= maxloop
|
|
||||||
loop_count += 1
|
unless maxloop.zero?
|
||||||
|
break if loop_count >= maxloop
|
||||||
|
loop_count += 1
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
rescue => err
|
||||||
|
$logger.log('s', "[!] [#{pool['name']}] Error while checking the pool: #{err}")
|
||||||
|
raise
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
@ -592,9 +552,7 @@ module Vmpooler
|
||||||
# INVENTORY
|
# INVENTORY
|
||||||
inventory = {}
|
inventory = {}
|
||||||
begin
|
begin
|
||||||
base = provider.find_folder(pool['folder'])
|
provider.vms_in_pool(pool['name']).each do |vm|
|
||||||
|
|
||||||
base.childEntity.each do |vm|
|
|
||||||
if
|
if
|
||||||
(! $redis.sismember('vmpooler__running__' + pool['name'], vm['name'])) &&
|
(! $redis.sismember('vmpooler__running__' + pool['name'], vm['name'])) &&
|
||||||
(! $redis.sismember('vmpooler__ready__' + pool['name'], vm['name'])) &&
|
(! $redis.sismember('vmpooler__ready__' + pool['name'], vm['name'])) &&
|
||||||
|
|
@ -673,7 +631,7 @@ module Vmpooler
|
||||||
# DISCOVERED
|
# DISCOVERED
|
||||||
begin
|
begin
|
||||||
$redis.smembers("vmpooler__discovered__#{pool['name']}").each do |vm|
|
$redis.smembers("vmpooler__discovered__#{pool['name']}").each do |vm|
|
||||||
%w(pending ready running completed).each do |queue|
|
%w[pending ready running completed].each do |queue|
|
||||||
if $redis.sismember("vmpooler__#{queue}__#{pool['name']}", vm)
|
if $redis.sismember("vmpooler__#{queue}__#{pool['name']}", vm)
|
||||||
$logger.log('d', "[!] [#{pool['name']}] '#{vm}' found in '#{queue}', removed from 'discovered' queue")
|
$logger.log('d', "[!] [#{pool['name']}] '#{vm}' found in '#{queue}', removed from 'discovered' queue")
|
||||||
$redis.srem("vmpooler__discovered__#{pool['name']}", vm)
|
$redis.srem("vmpooler__discovered__#{pool['name']}", vm)
|
||||||
|
|
@ -736,6 +694,17 @@ module Vmpooler
|
||||||
raise
|
raise
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def create_provider_object(config, logger, metrics, provider_name, options)
|
||||||
|
case provider_name
|
||||||
|
when 'vsphere'
|
||||||
|
Vmpooler::PoolManager::Provider::VSphere.new(config, logger, metrics, provider_name, options)
|
||||||
|
when 'dummy'
|
||||||
|
Vmpooler::PoolManager::Provider::Dummy.new(config, logger, metrics, provider_name, options)
|
||||||
|
else
|
||||||
|
raise("Provider '#{provider_name}' is unknown")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
def execute!(maxloop = 0, loop_delay = 1)
|
def execute!(maxloop = 0, loop_delay = 1)
|
||||||
$logger.log('d', 'starting vmpooler')
|
$logger.log('d', 'starting vmpooler')
|
||||||
|
|
||||||
|
|
@ -744,6 +713,32 @@ module Vmpooler
|
||||||
# Clear out vmpooler__migrations since stale entries may be left after a restart
|
# Clear out vmpooler__migrations since stale entries may be left after a restart
|
||||||
$redis.del('vmpooler__migration')
|
$redis.del('vmpooler__migration')
|
||||||
|
|
||||||
|
# Copy vSphere settings to correct location. This happens with older configuration files
|
||||||
|
if !$config[:vsphere].nil? && ($config[:providers].nil? || $config[:providers][:vsphere].nil?)
|
||||||
|
$logger.log('d', "[!] Detected an older configuration file. Copying the settings from ':vsphere:' to ':providers:/:vsphere:'")
|
||||||
|
$config[:providers] = {} if $config[:providers].nil?
|
||||||
|
$config[:providers][:vsphere] = $config[:vsphere]
|
||||||
|
end
|
||||||
|
|
||||||
|
# Set default provider for all pools that do not have one defined
|
||||||
|
$config[:pools].each do |pool|
|
||||||
|
if pool['provider'].nil?
|
||||||
|
$logger.log('d', "[!] Setting provider for pool '#{pool['name']}' to 'vsphere' as default")
|
||||||
|
pool['provider'] = 'vsphere'
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Create the providers
|
||||||
|
$config[:pools].each do |pool|
|
||||||
|
provider_name = pool['provider']
|
||||||
|
begin
|
||||||
|
$providers[provider_name] = create_provider_object($config, $logger, $metrics, provider_name, {}) if $providers[provider_name].nil?
|
||||||
|
rescue => err
|
||||||
|
$logger.log('s', "Error while creating provider for pool #{pool['name']}: #{err}")
|
||||||
|
raise
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
loop_count = 1
|
loop_count = 1
|
||||||
loop do
|
loop do
|
||||||
if ! $threads['disk_manager']
|
if ! $threads['disk_manager']
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
%w(base vsphere).each do |lib|
|
%w[base dummy vsphere].each do |lib|
|
||||||
begin
|
begin
|
||||||
require "vmpooler/providers/#{lib}"
|
require "vmpooler/providers/#{lib}"
|
||||||
rescue LoadError
|
rescue LoadError
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,15 @@ module Vmpooler
|
||||||
@metrics = metrics
|
@metrics = metrics
|
||||||
@provider_name = name
|
@provider_name = name
|
||||||
|
|
||||||
|
# Ensure that there is not a nil provider configuration
|
||||||
|
@config[:providers] = {} if @config[:providers].nil?
|
||||||
|
@config[:providers][@provider_name] = {} if provider_config.nil?
|
||||||
|
|
||||||
|
# Ensure that there is not a nil pool configuration
|
||||||
|
@config[:pools] = {} if @config[:pools].nil?
|
||||||
|
|
||||||
@provider_options = options
|
@provider_options = options
|
||||||
|
logger.log('s', "[!] Creating provider '#{name}'")
|
||||||
end
|
end
|
||||||
|
|
||||||
# Helper Methods
|
# Helper Methods
|
||||||
|
|
@ -41,7 +49,7 @@ module Vmpooler
|
||||||
def provider_config
|
def provider_config
|
||||||
@config[:providers].each do |provider|
|
@config[:providers].each do |provider|
|
||||||
# Convert the symbol from the config into a string for comparison
|
# Convert the symbol from the config into a string for comparison
|
||||||
return provider[1] if provider[0].to_s == @provider_name
|
return (provider[1].nil? ? {} : provider[1]) if provider[0].to_s == @provider_name
|
||||||
end
|
end
|
||||||
|
|
||||||
nil
|
nil
|
||||||
|
|
@ -60,6 +68,16 @@ module Vmpooler
|
||||||
@provider_name
|
@provider_name
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# returns
|
||||||
|
# Array[String] : Array of pool names this provider services
|
||||||
|
def provided_pools
|
||||||
|
list = []
|
||||||
|
@config[:pools].each do |pool|
|
||||||
|
list << pool['name'] if pool['provider'] == name
|
||||||
|
end
|
||||||
|
list
|
||||||
|
end
|
||||||
|
|
||||||
# Pool Manager Methods
|
# Pool Manager Methods
|
||||||
|
|
||||||
# inputs
|
# inputs
|
||||||
|
|
@ -146,8 +164,8 @@ module Vmpooler
|
||||||
# [String] new_snapshot_name : Name of the new snapshot to create
|
# [String] new_snapshot_name : Name of the new snapshot to create
|
||||||
# returns
|
# returns
|
||||||
# [Boolean] : true if success, false if snapshot could not be created
|
# [Boolean] : true if success, false if snapshot could not be created
|
||||||
|
# Raises RuntimeError if the Pool does not exist
|
||||||
# Raises RuntimeError if the VM does not exist
|
# Raises RuntimeError if the VM does not exist
|
||||||
# Raises RuntimeError if the snapshot already exists
|
|
||||||
def create_snapshot(_pool_name, _vm_name, _new_snapshot_name)
|
def create_snapshot(_pool_name, _vm_name, _new_snapshot_name)
|
||||||
raise("#{self.class.name} does not implement create_snapshot")
|
raise("#{self.class.name} does not implement create_snapshot")
|
||||||
end
|
end
|
||||||
|
|
@ -158,8 +176,9 @@ module Vmpooler
|
||||||
# [String] snapshot_name : Name of the snapshot to restore to
|
# [String] snapshot_name : Name of the snapshot to restore to
|
||||||
# returns
|
# returns
|
||||||
# [Boolean] : true if success, false if snapshot could not be revertted
|
# [Boolean] : true if success, false if snapshot could not be revertted
|
||||||
|
# Raises RuntimeError if the Pool does not exist
|
||||||
# Raises RuntimeError if the VM does not exist
|
# Raises RuntimeError if the VM does not exist
|
||||||
# Raises RuntimeError if the snapshot already exists
|
# Raises RuntimeError if the snapshot does not exist
|
||||||
def revert_snapshot(_pool_name, _vm_name, _snapshot_name)
|
def revert_snapshot(_pool_name, _vm_name, _snapshot_name)
|
||||||
raise("#{self.class.name} does not implement revert_snapshot")
|
raise("#{self.class.name} does not implement revert_snapshot")
|
||||||
end
|
end
|
||||||
|
|
|
||||||
402
lib/vmpooler/providers/dummy.rb
Normal file
402
lib/vmpooler/providers/dummy.rb
Normal file
|
|
@ -0,0 +1,402 @@
|
||||||
|
require 'yaml'
|
||||||
|
|
||||||
|
module Vmpooler
|
||||||
|
class PoolManager
|
||||||
|
class Provider
|
||||||
|
class Dummy < Vmpooler::PoolManager::Provider::Base
|
||||||
|
# Fake VM Provider for testing
|
||||||
|
|
||||||
|
def initialize(config, logger, metrics, name, options)
|
||||||
|
super(config, logger, metrics, name, options)
|
||||||
|
dummyfilename = provider_config['filename']
|
||||||
|
|
||||||
|
# This initial_state option is only intended to be used by spec tests
|
||||||
|
@dummylist = provider_options['initial_state'].nil? ? {} : provider_options['initial_state']
|
||||||
|
|
||||||
|
@dummylist = YAML.load_file(dummyfilename) if !dummyfilename.nil? && File.exist?(dummyfilename)
|
||||||
|
|
||||||
|
# Even though this code is using Mutexes, it's still no 100% atomic i.e. it's still possible for
|
||||||
|
# duplicate actions to put the @dummylist hashtable into a bad state, for example;
|
||||||
|
# Deleting a VM while it's in the middle of adding a disk.
|
||||||
|
@write_lock = Mutex.new
|
||||||
|
|
||||||
|
# Create a dummy connection pool
|
||||||
|
connpool_size = provider_config['connection_pool_size'].nil? ? 1 : provider_config['connection_pool_size'].to_i
|
||||||
|
connpool_timeout = provider_config['connection_pool_timeout'].nil? ? 10 : provider_config['connection_pool_timeout'].to_i
|
||||||
|
logger.log('d', "[#{name}] ConnPool - Creating a connection pool of size #{connpool_size} with timeout #{connpool_timeout}")
|
||||||
|
@connection_pool = Vmpooler::PoolManager::GenericConnectionPool.new(
|
||||||
|
metrics: metrics,
|
||||||
|
metric_prefix: "#{name}_provider_connection_pool",
|
||||||
|
size: connpool_size,
|
||||||
|
timeout: connpool_timeout
|
||||||
|
) do
|
||||||
|
# Create a mock connection object
|
||||||
|
new_conn = { create_timestamp: Time.now, conn_id: rand(2048).to_s }
|
||||||
|
logger.log('d', "[#{name}] ConnPool - Creating a connection object ID #{new_conn[:conn_id]}")
|
||||||
|
new_conn
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def name
|
||||||
|
'dummy'
|
||||||
|
end
|
||||||
|
|
||||||
|
def vms_in_pool(pool_name)
|
||||||
|
vmlist = []
|
||||||
|
|
||||||
|
@connection_pool.with_metrics do |_conn|
|
||||||
|
get_dummy_pool_object(pool_name).each do |vm|
|
||||||
|
vmlist << { 'name' => vm['name'] }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
vmlist
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_vm_host(pool_name, vm_name)
|
||||||
|
current_vm = nil
|
||||||
|
@connection_pool.with_metrics do |_conn|
|
||||||
|
current_vm = get_dummy_vm(pool_name, vm_name)
|
||||||
|
end
|
||||||
|
|
||||||
|
current_vm.nil? ? raise("VM #{vm_name} does not exist") : current_vm['vm_host']
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_least_used_compatible_host(pool_name, vm_name)
|
||||||
|
current_vm = nil
|
||||||
|
@connection_pool.with_metrics do |_conn|
|
||||||
|
current_vm = get_dummy_vm(pool_name, vm_name)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Unless migratevm_couldmove_percent is specified, don't migrate
|
||||||
|
return current_vm['vm_host'] if provider_config['migratevm_couldmove_percent'].nil?
|
||||||
|
|
||||||
|
# Only migrate if migratevm_couldmove_percent is met
|
||||||
|
return current_vm['vm_host'] if 1 + rand(100) > provider_config['migratevm_couldmove_percent']
|
||||||
|
|
||||||
|
# Simulate a 10 node cluster and randomly pick a different one
|
||||||
|
new_host = 'HOST' + (1 + rand(10)).to_s while new_host == current_vm['vm_host']
|
||||||
|
|
||||||
|
new_host
|
||||||
|
end
|
||||||
|
|
||||||
|
def migrate_vm_to_host(pool_name, vm_name, dest_host_name)
|
||||||
|
@connection_pool.with_metrics do |_conn|
|
||||||
|
current_vm = get_dummy_vm(pool_name, vm_name)
|
||||||
|
|
||||||
|
# Inject migration delay
|
||||||
|
unless provider_config['migratevm_max_time'].nil?
|
||||||
|
migrate_time = 1 + rand(provider_config['migratevm_max_time'])
|
||||||
|
sleep(migrate_time)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Inject clone failure
|
||||||
|
unless provider_config['migratevm_fail_percent'].nil?
|
||||||
|
raise('Dummy Failure for migratevm_fail_percent') if 1 + rand(100) <= provider_config['migratevm_fail_percent']
|
||||||
|
end
|
||||||
|
|
||||||
|
@write_lock.synchronize do
|
||||||
|
current_vm = get_dummy_vm(pool_name, vm_name)
|
||||||
|
current_vm['vm_host'] = dest_host_name
|
||||||
|
write_backing_file
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_vm(pool_name, vm_name)
|
||||||
|
obj = {}
|
||||||
|
@connection_pool.with_metrics do |_conn|
|
||||||
|
dummy = get_dummy_vm(pool_name, vm_name)
|
||||||
|
return nil if dummy.nil?
|
||||||
|
|
||||||
|
# Randomly power off the VM
|
||||||
|
unless dummy['powerstate'] != 'PoweredOn' || provider_config['getvm_poweroff_percent'].nil?
|
||||||
|
if 1 + rand(100) <= provider_config['getvm_poweroff_percent']
|
||||||
|
@write_lock.synchronize do
|
||||||
|
dummy = get_dummy_vm(pool_name, vm_name)
|
||||||
|
dummy['powerstate'] = 'PoweredOff'
|
||||||
|
write_backing_file
|
||||||
|
end
|
||||||
|
logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy Powered Off")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Randomly rename the host
|
||||||
|
unless dummy['hostname'] != dummy['name'] || provider_config['getvm_rename_percent'].nil?
|
||||||
|
if 1 + rand(100) <= provider_config['getvm_rename_percent']
|
||||||
|
@write_lock.synchronize do
|
||||||
|
dummy = get_dummy_vm(pool_name, vm_name)
|
||||||
|
dummy['hostname'] = 'DUMMY' + dummy['name']
|
||||||
|
write_backing_file
|
||||||
|
end
|
||||||
|
logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy renamed")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
obj['name'] = dummy['name']
|
||||||
|
obj['hostname'] = dummy['hostname']
|
||||||
|
obj['boottime'] = dummy['boottime']
|
||||||
|
obj['template'] = dummy['template']
|
||||||
|
obj['poolname'] = dummy['poolname']
|
||||||
|
obj['powerstate'] = dummy['powerstate']
|
||||||
|
obj['snapshots'] = dummy['snapshots']
|
||||||
|
end
|
||||||
|
|
||||||
|
obj
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_vm(pool_name, dummy_hostname)
|
||||||
|
pool = pool_config(pool_name)
|
||||||
|
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
||||||
|
|
||||||
|
template_name = pool['template']
|
||||||
|
|
||||||
|
vm = {}
|
||||||
|
vm['name'] = dummy_hostname
|
||||||
|
vm['hostname'] = dummy_hostname
|
||||||
|
vm['domain'] = 'dummy.local'
|
||||||
|
# 'vm_template' is the name of the template to use to clone the VM from <----- Do we need this?!?!?
|
||||||
|
vm['vm_template'] = template_name
|
||||||
|
# 'template' is the Template name in VM Pooler API, in our case that's the poolname.
|
||||||
|
vm['template'] = pool_name
|
||||||
|
vm['poolname'] = pool_name
|
||||||
|
vm['ready'] = false
|
||||||
|
vm['boottime'] = Time.now
|
||||||
|
vm['powerstate'] = 'PoweredOn'
|
||||||
|
vm['vm_host'] = 'HOST1'
|
||||||
|
vm['dummy_state'] = 'UNKNOWN'
|
||||||
|
vm['snapshots'] = []
|
||||||
|
vm['disks'] = []
|
||||||
|
|
||||||
|
# Make sure the pool exists in the dummy list
|
||||||
|
@write_lock.synchronize do
|
||||||
|
get_dummy_pool_object(pool_name)
|
||||||
|
@dummylist['pool'][pool_name] << vm
|
||||||
|
write_backing_file
|
||||||
|
end
|
||||||
|
|
||||||
|
logger.log('d', "[ ] [#{pool_name}] '#{dummy_hostname}' is being cloned from '#{template_name}'")
|
||||||
|
|
||||||
|
@connection_pool.with_metrics do |_conn|
|
||||||
|
# Inject clone time delay
|
||||||
|
unless provider_config['createvm_max_time'].nil?
|
||||||
|
@write_lock.synchronize do
|
||||||
|
vm['dummy_state'] = 'CLONING'
|
||||||
|
write_backing_file
|
||||||
|
end
|
||||||
|
clone_time = 1 + rand(provider_config['createvm_max_time'])
|
||||||
|
sleep(clone_time)
|
||||||
|
end
|
||||||
|
|
||||||
|
begin
|
||||||
|
# Inject clone failure
|
||||||
|
unless provider_config['createvm_fail_percent'].nil?
|
||||||
|
raise('Dummy Failure for createvm_fail_percent') if 1 + rand(100) <= provider_config['createvm_fail_percent']
|
||||||
|
end
|
||||||
|
|
||||||
|
# Assert the VM is ready for use
|
||||||
|
@write_lock.synchronize do
|
||||||
|
vm['dummy_state'] = 'RUNNING'
|
||||||
|
write_backing_file
|
||||||
|
end
|
||||||
|
rescue => _err
|
||||||
|
@write_lock.synchronize do
|
||||||
|
remove_dummy_vm(pool_name, dummy_hostname)
|
||||||
|
write_backing_file
|
||||||
|
end
|
||||||
|
raise
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
get_vm(pool_name, dummy_hostname)
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_disk(pool_name, vm_name, disk_size)
|
||||||
|
@connection_pool.with_metrics do |_conn|
|
||||||
|
vm_object = get_dummy_vm(pool_name, vm_name)
|
||||||
|
raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil?
|
||||||
|
|
||||||
|
# Inject create time delay
|
||||||
|
unless provider_config['createdisk_max_time'].nil?
|
||||||
|
delay = 1 + rand(provider_config['createdisk_max_time'])
|
||||||
|
sleep(delay)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Inject create failure
|
||||||
|
unless provider_config['createdisk_fail_percent'].nil?
|
||||||
|
raise('Dummy Failure for createdisk_fail_percent') if 1 + rand(100) <= provider_config['createdisk_fail_percent']
|
||||||
|
end
|
||||||
|
|
||||||
|
@write_lock.synchronize do
|
||||||
|
vm_object = get_dummy_vm(pool_name, vm_name)
|
||||||
|
vm_object['disks'] << disk_size
|
||||||
|
write_backing_file
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_snapshot(pool_name, vm_name, snapshot_name)
|
||||||
|
@connection_pool.with_metrics do |_conn|
|
||||||
|
vm_object = get_dummy_vm(pool_name, vm_name)
|
||||||
|
raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil?
|
||||||
|
|
||||||
|
# Inject create time delay
|
||||||
|
unless provider_config['createsnapshot_max_time'].nil?
|
||||||
|
delay = 1 + rand(provider_config['createsnapshot_max_time'])
|
||||||
|
sleep(delay)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Inject create failure
|
||||||
|
unless provider_config['createsnapshot_fail_percent'].nil?
|
||||||
|
raise('Dummy Failure for createsnapshot_fail_percent') if 1 + rand(100) <= provider_config['createsnapshot_fail_percent']
|
||||||
|
end
|
||||||
|
|
||||||
|
@write_lock.synchronize do
|
||||||
|
vm_object = get_dummy_vm(pool_name, vm_name)
|
||||||
|
vm_object['snapshots'] << snapshot_name
|
||||||
|
write_backing_file
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
def revert_snapshot(pool_name, vm_name, snapshot_name)
|
||||||
|
vm_object = nil
|
||||||
|
@connection_pool.with_metrics do |_conn|
|
||||||
|
vm_object = get_dummy_vm(pool_name, vm_name)
|
||||||
|
raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil?
|
||||||
|
|
||||||
|
# Inject create time delay
|
||||||
|
unless provider_config['revertsnapshot_max_time'].nil?
|
||||||
|
delay = 1 + rand(provider_config['revertsnapshot_max_time'])
|
||||||
|
sleep(delay)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Inject create failure
|
||||||
|
unless provider_config['revertsnapshot_fail_percent'].nil?
|
||||||
|
raise('Dummy Failure for revertsnapshot_fail_percent') if 1 + rand(100) <= provider_config['revertsnapshot_fail_percent']
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
vm_object['snapshots'].include?(snapshot_name)
|
||||||
|
end
|
||||||
|
|
||||||
|
def destroy_vm(pool_name, vm_name)
|
||||||
|
@connection_pool.with_metrics do |_conn|
|
||||||
|
vm = get_dummy_vm(pool_name, vm_name)
|
||||||
|
return false if vm.nil?
|
||||||
|
return false if vm['poolname'] != pool_name
|
||||||
|
|
||||||
|
# Shutdown down the VM if it's poweredOn
|
||||||
|
if vm['powerstate'] == 'PoweredOn'
|
||||||
|
logger.log('d', "[ ] [#{pool_name}] '#{vm_name}' is being shut down")
|
||||||
|
|
||||||
|
# Inject shutdown delay time
|
||||||
|
unless provider_config['destroyvm_max_shutdown_time'].nil?
|
||||||
|
shutdown_time = 1 + rand(provider_config['destroyvm_max_shutdown_time'])
|
||||||
|
sleep(shutdown_time)
|
||||||
|
end
|
||||||
|
|
||||||
|
@write_lock.synchronize do
|
||||||
|
vm = get_dummy_vm(pool_name, vm_name)
|
||||||
|
vm['powerstate'] = 'PoweredOff'
|
||||||
|
write_backing_file
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Inject destroy VM delay
|
||||||
|
unless provider_config['destroyvm_max_time'].nil?
|
||||||
|
destroy_time = 1 + rand(provider_config['destroyvm_max_time'])
|
||||||
|
sleep(destroy_time)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Inject destroy VM failure
|
||||||
|
unless provider_config['destroyvm_fail_percent'].nil?
|
||||||
|
raise('Dummy Failure for migratevm_fail_percent') if 1 + rand(100) <= provider_config['destroyvm_fail_percent']
|
||||||
|
end
|
||||||
|
|
||||||
|
# 'Destroy' the VM
|
||||||
|
@write_lock.synchronize do
|
||||||
|
remove_dummy_vm(pool_name, vm_name)
|
||||||
|
write_backing_file
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
def vm_ready?(pool_name, vm_name)
|
||||||
|
@connection_pool.with_metrics do |_conn|
|
||||||
|
vm_object = get_dummy_vm(pool_name, vm_name)
|
||||||
|
return false if vm_object.nil?
|
||||||
|
return false if vm_object['poolname'] != pool_name
|
||||||
|
return true if vm_object['ready']
|
||||||
|
|
||||||
|
timeout = provider_config['is_ready_timeout'] || 5
|
||||||
|
|
||||||
|
Timeout.timeout(timeout) do
|
||||||
|
while vm_object['dummy_state'] != 'RUNNING'
|
||||||
|
sleep(2)
|
||||||
|
vm_object = get_dummy_vm(pool_name, vm_name)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Simulate how long it takes from a VM being powered on until
|
||||||
|
# it's ready to receive a connection
|
||||||
|
sleep(2)
|
||||||
|
|
||||||
|
unless provider_config['vmready_fail_percent'].nil?
|
||||||
|
raise('Dummy Failure for vmready_fail_percent') if 1 + rand(100) <= provider_config['vmready_fail_percent']
|
||||||
|
end
|
||||||
|
|
||||||
|
@write_lock.synchronize do
|
||||||
|
vm_object['ready'] = true
|
||||||
|
write_backing_file
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# Note - NEVER EVER use the @write_lock object in the private methods!!!! Deadlocks will ensue
|
||||||
|
|
||||||
|
def remove_dummy_vm(pool_name, vm_name)
|
||||||
|
return if @dummylist['pool'][pool_name].nil?
|
||||||
|
new_poollist = @dummylist['pool'][pool_name].delete_if { |vm| vm['name'] == vm_name }
|
||||||
|
@dummylist['pool'][pool_name] = new_poollist
|
||||||
|
end
|
||||||
|
|
||||||
|
# Get's the pool config safely from the in-memory hashtable
|
||||||
|
def get_dummy_pool_object(pool_name)
|
||||||
|
@dummylist['pool'] = {} if @dummylist['pool'].nil?
|
||||||
|
@dummylist['pool'][pool_name] = [] if @dummylist['pool'][pool_name].nil?
|
||||||
|
|
||||||
|
@dummylist['pool'][pool_name]
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_dummy_vm(pool_name, vm_name)
|
||||||
|
return nil if @dummylist['pool'][pool_name].nil?
|
||||||
|
|
||||||
|
@dummylist['pool'][pool_name].each do |poolvm|
|
||||||
|
return poolvm if poolvm['name'] == vm_name
|
||||||
|
end
|
||||||
|
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
|
||||||
|
def write_backing_file
|
||||||
|
dummyfilename = provider_config['filename']
|
||||||
|
return if dummyfilename.nil?
|
||||||
|
File.open(dummyfilename, 'w') { |file| file.write(YAML.dump(@dummylist)) }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
@ -2,10 +2,37 @@ module Vmpooler
|
||||||
class PoolManager
|
class PoolManager
|
||||||
class Provider
|
class Provider
|
||||||
class VSphere < Vmpooler::PoolManager::Provider::Base
|
class VSphere < Vmpooler::PoolManager::Provider::Base
|
||||||
|
# The connection_pool method is normally used only for testing
|
||||||
|
attr_reader :connection_pool
|
||||||
|
|
||||||
def initialize(config, logger, metrics, name, options)
|
def initialize(config, logger, metrics, name, options)
|
||||||
super(config, logger, metrics, name, options)
|
super(config, logger, metrics, name, options)
|
||||||
@credentials = provider_config
|
|
||||||
@conf = global_config[:config]
|
task_limit = global_config[:config].nil? || global_config[:config]['task_limit'].nil? ? 10 : global_config[:config]['task_limit'].to_i
|
||||||
|
# The default connection pool size is:
|
||||||
|
# Whatever is biggest from:
|
||||||
|
# - How many pools this provider services
|
||||||
|
# - Maximum number of cloning tasks allowed
|
||||||
|
# - Need at least 2 connections so that a pool can have inventory functions performed while cloning etc.
|
||||||
|
default_connpool_size = [provided_pools.count, task_limit, 2].max
|
||||||
|
connpool_size = provider_config['connection_pool_size'].nil? ? default_connpool_size : provider_config['connection_pool_size'].to_i
|
||||||
|
# The default connection pool timeout should be quite large - 60 seconds
|
||||||
|
connpool_timeout = provider_config['connection_pool_timeout'].nil? ? 60 : provider_config['connection_pool_timeout'].to_i
|
||||||
|
logger.log('d', "[#{name}] ConnPool - Creating a connection pool of size #{connpool_size} with timeout #{connpool_timeout}")
|
||||||
|
@connection_pool = Vmpooler::PoolManager::GenericConnectionPool.new(
|
||||||
|
metrics: metrics,
|
||||||
|
metric_prefix: "#{name}_provider_connection_pool",
|
||||||
|
size: connpool_size,
|
||||||
|
timeout: connpool_timeout
|
||||||
|
) do
|
||||||
|
logger.log('d', "[#{name}] Connection Pool - Creating a connection object")
|
||||||
|
# Need to wrap the vSphere connection object in another object. The generic connection pooler will preserve
|
||||||
|
# the object reference for the connection, which means it cannot "reconnect" by creating an entirely new connection
|
||||||
|
# object. Instead by wrapping it in a Hash, the Hash object reference itself never changes but the content of the
|
||||||
|
# Hash can change, and is preserved across invocations.
|
||||||
|
new_conn = connect_to_vsphere
|
||||||
|
{ connection: new_conn }
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def name
|
def name
|
||||||
|
|
@ -13,154 +40,162 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
def vms_in_pool(pool_name)
|
def vms_in_pool(pool_name)
|
||||||
connection = get_connection
|
|
||||||
|
|
||||||
foldername = pool_config(pool_name)['folder']
|
|
||||||
folder_object = find_folder(foldername, connection)
|
|
||||||
|
|
||||||
vms = []
|
vms = []
|
||||||
|
@connection_pool.with_metrics do |pool_object|
|
||||||
|
connection = ensured_vsphere_connection(pool_object)
|
||||||
|
foldername = pool_config(pool_name)['folder']
|
||||||
|
folder_object = find_folder(foldername, connection)
|
||||||
|
|
||||||
return vms if folder_object.nil?
|
return vms if folder_object.nil?
|
||||||
|
|
||||||
folder_object.childEntity.each do |vm|
|
folder_object.childEntity.each do |vm|
|
||||||
vms << { 'name' => vm.name }
|
vms << { 'name' => vm.name }
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
vms
|
vms
|
||||||
end
|
end
|
||||||
|
|
||||||
def get_vm_host(_pool_name, vm_name)
|
def get_vm_host(_pool_name, vm_name)
|
||||||
connection = get_connection
|
|
||||||
|
|
||||||
vm_object = find_vm(vm_name, connection)
|
|
||||||
return nil if vm_object.nil?
|
|
||||||
|
|
||||||
host_name = nil
|
host_name = nil
|
||||||
host_name = vm_object.summary.runtime.host.name if vm_object.summary && vm_object.summary.runtime && vm_object.summary.runtime.host
|
|
||||||
|
|
||||||
|
@connection_pool.with_metrics do |pool_object|
|
||||||
|
connection = ensured_vsphere_connection(pool_object)
|
||||||
|
vm_object = find_vm(vm_name, connection)
|
||||||
|
return host_name if vm_object.nil?
|
||||||
|
|
||||||
|
host_name = vm_object.summary.runtime.host.name if vm_object.summary && vm_object.summary.runtime && vm_object.summary.runtime.host
|
||||||
|
end
|
||||||
host_name
|
host_name
|
||||||
end
|
end
|
||||||
|
|
||||||
def find_least_used_compatible_host(_pool_name, vm_name)
|
def find_least_used_compatible_host(_pool_name, vm_name)
|
||||||
connection = get_connection
|
hostname = nil
|
||||||
|
@connection_pool.with_metrics do |pool_object|
|
||||||
|
connection = ensured_vsphere_connection(pool_object)
|
||||||
|
vm_object = find_vm(vm_name, connection)
|
||||||
|
|
||||||
vm_object = find_vm(vm_name, connection)
|
return hostname if vm_object.nil?
|
||||||
|
host_object = find_least_used_vpshere_compatible_host(vm_object)
|
||||||
|
|
||||||
return nil if vm_object.nil?
|
return hostname if host_object.nil?
|
||||||
host_object = find_least_used_vpshere_compatible_host(vm_object)
|
hostname = host_object[0].name
|
||||||
|
end
|
||||||
return nil if host_object.nil?
|
hostname
|
||||||
host_object[0].name
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def migrate_vm_to_host(pool_name, vm_name, dest_host_name)
|
def migrate_vm_to_host(pool_name, vm_name, dest_host_name)
|
||||||
pool = pool_config(pool_name)
|
pool = pool_config(pool_name)
|
||||||
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
||||||
|
|
||||||
connection = get_connection
|
@connection_pool.with_metrics do |pool_object|
|
||||||
|
connection = ensured_vsphere_connection(pool_object)
|
||||||
|
vm_object = find_vm(vm_name, connection)
|
||||||
|
raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil?
|
||||||
|
|
||||||
vm_object = find_vm(vm_name, connection)
|
target_cluster_name = get_target_cluster_from_config(pool_name)
|
||||||
raise("VM #{vm_name} does not exist in Pool #{pool_name} for the provider #{name}") if vm_object.nil?
|
cluster = find_cluster(target_cluster_name, connection)
|
||||||
|
raise("Pool #{pool_name} specifies cluster #{target_cluster_name} which does not exist for the provider #{name}") if cluster.nil?
|
||||||
|
|
||||||
target_cluster_name = get_target_cluster_from_config(pool_name)
|
# Go through each host and initiate a migration when the correct host name is found
|
||||||
cluster = find_cluster(target_cluster_name, connection)
|
cluster.host.each do |host|
|
||||||
raise("Pool #{pool_name} specifies cluster #{target_cluster_name} which does not exist for the provider #{name}") if cluster.nil?
|
if host.name == dest_host_name
|
||||||
|
migrate_vm_host(vm_object, host)
|
||||||
# Go through each host and initiate a migration when the correct host name is found
|
return true
|
||||||
cluster.host.each do |host|
|
end
|
||||||
if host.name == dest_host_name
|
|
||||||
migrate_vm_host(vm_object, host)
|
|
||||||
return true
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
false
|
false
|
||||||
end
|
end
|
||||||
|
|
||||||
def get_vm(_pool_name, vm_name)
|
def get_vm(_pool_name, vm_name)
|
||||||
connection = get_connection
|
vm_hash = nil
|
||||||
|
@connection_pool.with_metrics do |pool_object|
|
||||||
|
connection = ensured_vsphere_connection(pool_object)
|
||||||
|
vm_object = find_vm(vm_name, connection)
|
||||||
|
return vm_hash if vm_object.nil?
|
||||||
|
|
||||||
vm_object = find_vm(vm_name, connection)
|
vm_folder_path = get_vm_folder_path(vm_object)
|
||||||
return nil if vm_object.nil?
|
# Find the pool name based on the folder path
|
||||||
|
pool_name = nil
|
||||||
vm_folder_path = get_vm_folder_path(vm_object)
|
template_name = nil
|
||||||
# Find the pool name based on the folder path
|
global_config[:pools].each do |pool|
|
||||||
pool_name = nil
|
if pool['folder'] == vm_folder_path
|
||||||
template_name = nil
|
pool_name = pool['name']
|
||||||
global_config[:pools].each do |pool|
|
template_name = pool['template']
|
||||||
if pool['folder'] == vm_folder_path
|
end
|
||||||
pool_name = pool['name']
|
|
||||||
template_name = pool['template']
|
|
||||||
end
|
end
|
||||||
end
|
|
||||||
|
|
||||||
generate_vm_hash(vm_object, template_name, pool_name)
|
vm_hash = generate_vm_hash(vm_object, template_name, pool_name)
|
||||||
|
end
|
||||||
|
vm_hash
|
||||||
end
|
end
|
||||||
|
|
||||||
def create_vm(pool_name, new_vmname)
|
def create_vm(pool_name, new_vmname)
|
||||||
pool = pool_config(pool_name)
|
pool = pool_config(pool_name)
|
||||||
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
||||||
|
vm_hash = nil
|
||||||
|
@connection_pool.with_metrics do |pool_object|
|
||||||
|
connection = ensured_vsphere_connection(pool_object)
|
||||||
|
# Assume all pool config is valid i.e. not missing
|
||||||
|
template_path = pool['template']
|
||||||
|
target_folder_path = pool['folder']
|
||||||
|
target_datastore = pool['datastore']
|
||||||
|
target_cluster_name = get_target_cluster_from_config(pool_name)
|
||||||
|
|
||||||
connection = get_connection
|
# Extract the template VM name from the full path
|
||||||
|
raise("Pool #{pool_name} did specify a full path for the template for the provider #{name}") unless template_path =~ /\//
|
||||||
|
templatefolders = template_path.split('/')
|
||||||
|
template_name = templatefolders.pop
|
||||||
|
|
||||||
# Assume all pool config is valid i.e. not missing
|
# Get the actual objects from vSphere
|
||||||
template_path = pool['template']
|
template_folder_object = find_folder(templatefolders.join('/'), connection)
|
||||||
target_folder_path = pool['folder']
|
raise("Pool #{pool_name} specifies a template folder of #{templatefolders.join('/')} which does not exist for the provider #{name}") if template_folder_object.nil?
|
||||||
target_datastore = pool['datastore']
|
|
||||||
target_cluster_name = get_target_cluster_from_config(pool_name)
|
|
||||||
|
|
||||||
# Extract the template VM name from the full path
|
template_vm_object = template_folder_object.find(template_name)
|
||||||
raise("Pool #{pool_name} did specify a full path for the template for the provider #{name}") unless template_path =~ /\//
|
raise("Pool #{pool_name} specifies a template VM of #{template_name} which does not exist for the provider #{name}") if template_vm_object.nil?
|
||||||
templatefolders = template_path.split('/')
|
|
||||||
template_name = templatefolders.pop
|
|
||||||
|
|
||||||
# Get the actual objects from vSphere
|
# Annotate with creation time, origin template, etc.
|
||||||
template_folder_object = find_folder(templatefolders.join('/'), connection)
|
# Add extraconfig options that can be queried by vmtools
|
||||||
raise("Pool #{pool_name} specifies a template folder of #{templatefolders.join('/')} which does not exist for the provider #{name}") if template_folder_object.nil?
|
config_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
||||||
|
annotation: JSON.pretty_generate(
|
||||||
|
name: new_vmname,
|
||||||
|
created_by: provider_config['username'],
|
||||||
|
base_template: template_path,
|
||||||
|
creation_timestamp: Time.now.utc
|
||||||
|
),
|
||||||
|
extraConfig: [
|
||||||
|
{ key: 'guestinfo.hostname', value: new_vmname }
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
template_vm_object = template_folder_object.find(template_name)
|
# Choose a cluster/host to place the new VM on
|
||||||
raise("Pool #{pool_name} specifies a template VM of #{template_name} which does not exist for the provider #{name}") if template_vm_object.nil?
|
target_host_object = find_least_used_host(target_cluster_name, connection)
|
||||||
|
|
||||||
# Annotate with creation time, origin template, etc.
|
# Put the VM in the specified folder and resource pool
|
||||||
# Add extraconfig options that can be queried by vmtools
|
relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
|
||||||
config_spec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
datastore: find_datastore(target_datastore, connection),
|
||||||
annotation: JSON.pretty_generate(
|
host: target_host_object,
|
||||||
|
diskMoveType: :moveChildMostDiskBacking
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create a clone spec
|
||||||
|
clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(
|
||||||
|
location: relocate_spec,
|
||||||
|
config: config_spec,
|
||||||
|
powerOn: true,
|
||||||
|
template: false
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create the new VM
|
||||||
|
new_vm_object = template_vm_object.CloneVM_Task(
|
||||||
|
folder: find_folder(target_folder_path, connection),
|
||||||
name: new_vmname,
|
name: new_vmname,
|
||||||
created_by: provider_config['username'],
|
spec: clone_spec
|
||||||
base_template: template_path,
|
).wait_for_completion
|
||||||
creation_timestamp: Time.now.utc
|
|
||||||
),
|
|
||||||
extraConfig: [
|
|
||||||
{ key: 'guestinfo.hostname', value: new_vmname }
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
# Choose a cluster/host to place the new VM on
|
vm_hash = generate_vm_hash(new_vm_object, template_path, pool_name)
|
||||||
target_host_object = find_least_used_host(target_cluster_name, connection)
|
end
|
||||||
|
vm_hash
|
||||||
# Put the VM in the specified folder and resource pool
|
|
||||||
relocate_spec = RbVmomi::VIM.VirtualMachineRelocateSpec(
|
|
||||||
datastore: find_datastore(target_datastore, connection),
|
|
||||||
host: target_host_object,
|
|
||||||
diskMoveType: :moveChildMostDiskBacking
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create a clone spec
|
|
||||||
clone_spec = RbVmomi::VIM.VirtualMachineCloneSpec(
|
|
||||||
location: relocate_spec,
|
|
||||||
config: config_spec,
|
|
||||||
powerOn: true,
|
|
||||||
template: false
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create the new VM
|
|
||||||
new_vm_object = template_vm_object.CloneVM_Task(
|
|
||||||
folder: find_folder(target_folder_path, connection),
|
|
||||||
name: new_vmname,
|
|
||||||
spec: clone_spec
|
|
||||||
).wait_for_completion
|
|
||||||
|
|
||||||
generate_vm_hash(new_vm_object, template_path, pool_name)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def create_disk(pool_name, vm_name, disk_size)
|
def create_disk(pool_name, vm_name, disk_size)
|
||||||
|
|
@ -170,62 +205,62 @@ module Vmpooler
|
||||||
datastore_name = pool['datastore']
|
datastore_name = pool['datastore']
|
||||||
raise("Pool #{pool_name} does not have a datastore defined for the provider #{name}") if datastore_name.nil?
|
raise("Pool #{pool_name} does not have a datastore defined for the provider #{name}") if datastore_name.nil?
|
||||||
|
|
||||||
connection = get_connection
|
@connection_pool.with_metrics do |pool_object|
|
||||||
|
connection = ensured_vsphere_connection(pool_object)
|
||||||
vm_object = find_vm(vm_name, connection)
|
vm_object = find_vm(vm_name, connection)
|
||||||
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil?
|
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil?
|
||||||
|
|
||||||
add_disk(vm_object, disk_size, datastore_name, connection)
|
|
||||||
|
|
||||||
|
add_disk(vm_object, disk_size, datastore_name, connection)
|
||||||
|
end
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
|
|
||||||
def create_snapshot(pool_name, vm_name, new_snapshot_name)
|
def create_snapshot(pool_name, vm_name, new_snapshot_name)
|
||||||
connection = get_connection
|
@connection_pool.with_metrics do |pool_object|
|
||||||
|
connection = ensured_vsphere_connection(pool_object)
|
||||||
|
vm_object = find_vm(vm_name, connection)
|
||||||
|
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil?
|
||||||
|
|
||||||
vm_object = find_vm(vm_name, connection)
|
old_snap = find_snapshot(vm_object, new_snapshot_name)
|
||||||
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil?
|
raise("Snapshot #{new_snapshot_name} for VM #{vm_name} in pool #{pool_name} already exists for the provider #{name}") unless old_snap.nil?
|
||||||
|
|
||||||
old_snap = find_snapshot(vm_object, new_snapshot_name)
|
|
||||||
raise("Snapshot #{new_snapshot_name} for VM #{vm_name} in pool #{pool_name} already exists for the provider #{name}") unless old_snap.nil?
|
|
||||||
|
|
||||||
vm_object.CreateSnapshot_Task(
|
|
||||||
name: new_snapshot_name,
|
|
||||||
description: 'vmpooler',
|
|
||||||
memory: true,
|
|
||||||
quiesce: true
|
|
||||||
).wait_for_completion
|
|
||||||
|
|
||||||
|
vm_object.CreateSnapshot_Task(
|
||||||
|
name: new_snapshot_name,
|
||||||
|
description: 'vmpooler',
|
||||||
|
memory: true,
|
||||||
|
quiesce: true
|
||||||
|
).wait_for_completion
|
||||||
|
end
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
|
|
||||||
def revert_snapshot(pool_name, vm_name, snapshot_name)
|
def revert_snapshot(pool_name, vm_name, snapshot_name)
|
||||||
connection = get_connection
|
@connection_pool.with_metrics do |pool_object|
|
||||||
|
connection = ensured_vsphere_connection(pool_object)
|
||||||
|
vm_object = find_vm(vm_name, connection)
|
||||||
|
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil?
|
||||||
|
|
||||||
vm_object = find_vm(vm_name, connection)
|
snapshot_object = find_snapshot(vm_object, snapshot_name)
|
||||||
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if vm_object.nil?
|
raise("Snapshot #{snapshot_name} for VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if snapshot_object.nil?
|
||||||
|
|
||||||
snapshot_object = find_snapshot(vm_object, snapshot_name)
|
|
||||||
raise("Snapshot #{snapshot_name} for VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if snapshot_object.nil?
|
|
||||||
|
|
||||||
snapshot_object.RevertToSnapshot_Task.wait_for_completion
|
|
||||||
|
|
||||||
|
snapshot_object.RevertToSnapshot_Task.wait_for_completion
|
||||||
|
end
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
|
|
||||||
def destroy_vm(_pool_name, vm_name)
|
def destroy_vm(_pool_name, vm_name)
|
||||||
connection = get_connection
|
@connection_pool.with_metrics do |pool_object|
|
||||||
|
connection = ensured_vsphere_connection(pool_object)
|
||||||
|
vm_object = find_vm(vm_name, connection)
|
||||||
|
# If a VM doesn't exist then it is effectively deleted
|
||||||
|
return true if vm_object.nil?
|
||||||
|
|
||||||
vm_object = find_vm(vm_name, connection)
|
# Poweroff the VM if it's running
|
||||||
# If a VM doesn't exist then it is effectively deleted
|
vm_object.PowerOffVM_Task.wait_for_completion if vm_object.runtime && vm_object.runtime.powerState && vm_object.runtime.powerState == 'poweredOn'
|
||||||
return true if vm_object.nil?
|
|
||||||
|
|
||||||
# Poweroff the VM if it's running
|
|
||||||
vm_object.PowerOffVM_Task.wait_for_completion if vm_object.runtime && vm_object.runtime.powerState && vm_object.runtime.powerState == 'poweredOn'
|
|
||||||
|
|
||||||
# Kill it with fire
|
|
||||||
vm_object.Destroy_Task.wait_for_completion
|
|
||||||
|
|
||||||
|
# Kill it with fire
|
||||||
|
vm_object.Destroy_Task.wait_for_completion
|
||||||
|
end
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -239,12 +274,6 @@ module Vmpooler
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
|
|
||||||
def provider_config
|
|
||||||
# The vSphere configuration is currently in it's own root. This will
|
|
||||||
# eventually shift into the same location base expects it
|
|
||||||
global_config[:vsphere]
|
|
||||||
end
|
|
||||||
|
|
||||||
# VSphere Helper methods
|
# VSphere Helper methods
|
||||||
|
|
||||||
def get_target_cluster_from_config(pool_name)
|
def get_target_cluster_from_config(pool_name)
|
||||||
|
|
@ -275,25 +304,27 @@ module Vmpooler
|
||||||
DISK_TYPE = 'thin'.freeze
|
DISK_TYPE = 'thin'.freeze
|
||||||
DISK_MODE = 'persistent'.freeze
|
DISK_MODE = 'persistent'.freeze
|
||||||
|
|
||||||
def get_connection
|
def ensured_vsphere_connection(connection_pool_object)
|
||||||
begin
|
connection_pool_object[:connection] = connect_to_vsphere unless vsphere_connection_ok?(connection_pool_object[:connection])
|
||||||
@connection.serviceInstance.CurrentTime
|
connection_pool_object[:connection]
|
||||||
rescue
|
|
||||||
@connection = connect_to_vsphere @credentials
|
|
||||||
end
|
|
||||||
|
|
||||||
@connection
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def connect_to_vsphere(credentials)
|
def vsphere_connection_ok?(connection)
|
||||||
max_tries = @conf['max_tries'] || 3
|
_result = connection.serviceInstance.CurrentTime
|
||||||
retry_factor = @conf['retry_factor'] || 10
|
return true
|
||||||
|
rescue
|
||||||
|
return false
|
||||||
|
end
|
||||||
|
|
||||||
|
def connect_to_vsphere
|
||||||
|
max_tries = global_config[:config]['max_tries'] || 3
|
||||||
|
retry_factor = global_config[:config]['retry_factor'] || 10
|
||||||
try = 1
|
try = 1
|
||||||
begin
|
begin
|
||||||
connection = RbVmomi::VIM.connect host: credentials['server'],
|
connection = RbVmomi::VIM.connect host: provider_config['server'],
|
||||||
user: credentials['username'],
|
user: provider_config['username'],
|
||||||
password: credentials['password'],
|
password: provider_config['password'],
|
||||||
insecure: credentials['insecure'] || true
|
insecure: provider_config['insecure'] || true
|
||||||
metrics.increment('connect.open')
|
metrics.increment('connect.open')
|
||||||
return connection
|
return connection
|
||||||
rescue => err
|
rescue => err
|
||||||
|
|
@ -679,10 +710,6 @@ module Vmpooler
|
||||||
relospec = RbVmomi::VIM.VirtualMachineRelocateSpec(host: host)
|
relospec = RbVmomi::VIM.VirtualMachineRelocateSpec(host: host)
|
||||||
vm.RelocateVM_Task(spec: relospec).wait_for_completion
|
vm.RelocateVM_Task(spec: relospec).wait_for_completion
|
||||||
end
|
end
|
||||||
|
|
||||||
def close
|
|
||||||
@connection.close
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
||||||
|
|
@ -1,415 +0,0 @@
|
||||||
require 'rubygems' unless defined?(Gem)
|
|
||||||
|
|
||||||
module Vmpooler
|
|
||||||
class VsphereHelper
|
|
||||||
ADAPTER_TYPE = 'lsiLogic'
|
|
||||||
DISK_TYPE = 'thin'
|
|
||||||
DISK_MODE = 'persistent'
|
|
||||||
|
|
||||||
def initialize(config, metrics)
|
|
||||||
@credentials = config[:vsphere]
|
|
||||||
@conf = config[:config]
|
|
||||||
@metrics = metrics
|
|
||||||
end
|
|
||||||
|
|
||||||
def ensure_connected(connection, credentials)
|
|
||||||
connection.serviceInstance.CurrentTime
|
|
||||||
rescue
|
|
||||||
connect_to_vsphere @credentials
|
|
||||||
end
|
|
||||||
|
|
||||||
def connect_to_vsphere(credentials)
|
|
||||||
max_tries = @conf['max_tries'] || 3
|
|
||||||
retry_factor = @conf['retry_factor'] || 10
|
|
||||||
try = 1
|
|
||||||
begin
|
|
||||||
@connection = RbVmomi::VIM.connect host: credentials['server'],
|
|
||||||
user: credentials['username'],
|
|
||||||
password: credentials['password'],
|
|
||||||
insecure: credentials['insecure'] || true
|
|
||||||
@metrics.increment("connect.open")
|
|
||||||
rescue => err
|
|
||||||
try += 1
|
|
||||||
@metrics.increment("connect.fail")
|
|
||||||
raise err if try == max_tries
|
|
||||||
sleep(try * retry_factor)
|
|
||||||
retry
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def add_disk(vm, size, datastore)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
|
|
||||||
return false unless size.to_i > 0
|
|
||||||
|
|
||||||
vmdk_datastore = find_datastore(datastore)
|
|
||||||
vmdk_file_name = "#{vm['name']}/#{vm['name']}_#{find_vmdks(vm['name'], datastore).length + 1}.vmdk"
|
|
||||||
|
|
||||||
controller = find_disk_controller(vm)
|
|
||||||
|
|
||||||
vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec(
|
|
||||||
capacityKb: size.to_i * 1024 * 1024,
|
|
||||||
adapterType: ADAPTER_TYPE,
|
|
||||||
diskType: DISK_TYPE
|
|
||||||
)
|
|
||||||
|
|
||||||
vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
|
|
||||||
datastore: vmdk_datastore,
|
|
||||||
diskMode: DISK_MODE,
|
|
||||||
fileName: "[#{vmdk_datastore.name}] #{vmdk_file_name}"
|
|
||||||
)
|
|
||||||
|
|
||||||
device = RbVmomi::VIM::VirtualDisk(
|
|
||||||
backing: vmdk_backing,
|
|
||||||
capacityInKB: size.to_i * 1024 * 1024,
|
|
||||||
controllerKey: controller.key,
|
|
||||||
key: -1,
|
|
||||||
unitNumber: find_disk_unit_number(vm, controller)
|
|
||||||
)
|
|
||||||
|
|
||||||
device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
|
|
||||||
device: device,
|
|
||||||
operation: RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add')
|
|
||||||
)
|
|
||||||
|
|
||||||
vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
|
|
||||||
deviceChange: [device_config_spec]
|
|
||||||
)
|
|
||||||
|
|
||||||
@connection.serviceContent.virtualDiskManager.CreateVirtualDisk_Task(
|
|
||||||
datacenter: @connection.serviceInstance.find_datacenter,
|
|
||||||
name: "[#{vmdk_datastore.name}] #{vmdk_file_name}",
|
|
||||||
spec: vmdk_spec
|
|
||||||
).wait_for_completion
|
|
||||||
|
|
||||||
vm.ReconfigVM_Task(spec: vm_config_spec).wait_for_completion
|
|
||||||
|
|
||||||
true
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_datastore(datastorename)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
|
|
||||||
datacenter = @connection.serviceInstance.find_datacenter
|
|
||||||
datacenter.find_datastore(datastorename)
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_device(vm, deviceName)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
|
|
||||||
vm.config.hardware.device.each do |device|
|
|
||||||
return device if device.deviceInfo.label == deviceName
|
|
||||||
end
|
|
||||||
|
|
||||||
nil
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_disk_controller(vm)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
|
|
||||||
devices = find_disk_devices(vm)
|
|
||||||
|
|
||||||
devices.keys.sort.each do |device|
|
|
||||||
if devices[device]['children'].length < 15
|
|
||||||
return find_device(vm, devices[device]['device'].deviceInfo.label)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
nil
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_disk_devices(vm)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
|
|
||||||
devices = {}
|
|
||||||
|
|
||||||
vm.config.hardware.device.each do |device|
|
|
||||||
if device.is_a? RbVmomi::VIM::VirtualSCSIController
|
|
||||||
if devices[device.controllerKey].nil?
|
|
||||||
devices[device.key] = {}
|
|
||||||
devices[device.key]['children'] = []
|
|
||||||
end
|
|
||||||
|
|
||||||
devices[device.key]['device'] = device
|
|
||||||
end
|
|
||||||
|
|
||||||
if device.is_a? RbVmomi::VIM::VirtualDisk
|
|
||||||
if devices[device.controllerKey].nil?
|
|
||||||
devices[device.controllerKey] = {}
|
|
||||||
devices[device.controllerKey]['children'] = []
|
|
||||||
end
|
|
||||||
|
|
||||||
devices[device.controllerKey]['children'].push(device)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
devices
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_disk_unit_number(vm, controller)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
|
|
||||||
used_unit_numbers = []
|
|
||||||
available_unit_numbers = []
|
|
||||||
|
|
||||||
devices = find_disk_devices(vm)
|
|
||||||
|
|
||||||
devices.keys.sort.each do |c|
|
|
||||||
next unless controller.key == devices[c]['device'].key
|
|
||||||
used_unit_numbers.push(devices[c]['device'].scsiCtlrUnitNumber)
|
|
||||||
devices[c]['children'].each do |disk|
|
|
||||||
used_unit_numbers.push(disk.unitNumber)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
(0..15).each do |scsi_id|
|
|
||||||
if used_unit_numbers.grep(scsi_id).length <= 0
|
|
||||||
available_unit_numbers.push(scsi_id)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
available_unit_numbers.sort[0]
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_folder(foldername)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
|
|
||||||
datacenter = @connection.serviceInstance.find_datacenter
|
|
||||||
base = datacenter.vmFolder
|
|
||||||
folders = foldername.split('/')
|
|
||||||
folders.each do |folder|
|
|
||||||
if base.is_a? RbVmomi::VIM::Folder
|
|
||||||
base = base.childEntity.find { |f| f.name == folder }
|
|
||||||
else
|
|
||||||
raise(RuntimeError, "Unexpected object type encountered (#{base.class}) while finding folder")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
base
|
|
||||||
end
|
|
||||||
|
|
||||||
# Returns an array containing cumulative CPU and memory utilization of a host, and its object reference
|
|
||||||
# Params:
|
|
||||||
# +model+:: CPU arch version to match on
|
|
||||||
# +limit+:: Hard limit for CPU or memory utilization beyond which a host is excluded for deployments
|
|
||||||
def get_host_utilization(host, model=nil, limit=90)
|
|
||||||
if model
|
|
||||||
return nil unless host_has_cpu_model? host, model
|
|
||||||
end
|
|
||||||
return nil if host.runtime.inMaintenanceMode
|
|
||||||
return nil unless host.overallStatus == 'green'
|
|
||||||
|
|
||||||
cpu_utilization = cpu_utilization_for host
|
|
||||||
memory_utilization = memory_utilization_for host
|
|
||||||
|
|
||||||
return nil if cpu_utilization > limit
|
|
||||||
return nil if memory_utilization > limit
|
|
||||||
|
|
||||||
[ cpu_utilization + memory_utilization, host ]
|
|
||||||
end
|
|
||||||
|
|
||||||
def host_has_cpu_model?(host, model)
|
|
||||||
get_host_cpu_arch_version(host) == model
|
|
||||||
end
|
|
||||||
|
|
||||||
def get_host_cpu_arch_version(host)
|
|
||||||
cpu_model = host.hardware.cpuPkg[0].description
|
|
||||||
cpu_model_parts = cpu_model.split()
|
|
||||||
arch_version = cpu_model_parts[4]
|
|
||||||
arch_version
|
|
||||||
end
|
|
||||||
|
|
||||||
def cpu_utilization_for(host)
|
|
||||||
cpu_usage = host.summary.quickStats.overallCpuUsage
|
|
||||||
cpu_size = host.summary.hardware.cpuMhz * host.summary.hardware.numCpuCores
|
|
||||||
(cpu_usage.to_f / cpu_size.to_f) * 100
|
|
||||||
end
|
|
||||||
|
|
||||||
def memory_utilization_for(host)
|
|
||||||
memory_usage = host.summary.quickStats.overallMemoryUsage
|
|
||||||
memory_size = host.summary.hardware.memorySize / 1024 / 1024
|
|
||||||
(memory_usage.to_f / memory_size.to_f) * 100
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_least_used_host(cluster)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
|
|
||||||
cluster_object = find_cluster(cluster)
|
|
||||||
target_hosts = get_cluster_host_utilization(cluster_object)
|
|
||||||
least_used_host = target_hosts.sort[0][1]
|
|
||||||
least_used_host
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_cluster(cluster)
|
|
||||||
datacenter = @connection.serviceInstance.find_datacenter
|
|
||||||
datacenter.hostFolder.children.find { |cluster_object| cluster_object.name == cluster }
|
|
||||||
end
|
|
||||||
|
|
||||||
def get_cluster_host_utilization(cluster)
|
|
||||||
cluster_hosts = []
|
|
||||||
cluster.host.each do |host|
|
|
||||||
host_usage = get_host_utilization(host)
|
|
||||||
cluster_hosts << host_usage if host_usage
|
|
||||||
end
|
|
||||||
cluster_hosts
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_least_used_compatible_host(vm)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
|
|
||||||
source_host = vm.summary.runtime.host
|
|
||||||
model = get_host_cpu_arch_version(source_host)
|
|
||||||
cluster = source_host.parent
|
|
||||||
target_hosts = []
|
|
||||||
cluster.host.each do |host|
|
|
||||||
host_usage = get_host_utilization(host, model)
|
|
||||||
target_hosts << host_usage if host_usage
|
|
||||||
end
|
|
||||||
target_host = target_hosts.sort[0][1]
|
|
||||||
[target_host, target_host.name]
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_pool(poolname)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
|
|
||||||
datacenter = @connection.serviceInstance.find_datacenter
|
|
||||||
base = datacenter.hostFolder
|
|
||||||
pools = poolname.split('/')
|
|
||||||
pools.each do |pool|
|
|
||||||
case
|
|
||||||
when base.is_a?(RbVmomi::VIM::Folder)
|
|
||||||
base = base.childEntity.find { |f| f.name == pool }
|
|
||||||
when base.is_a?(RbVmomi::VIM::ClusterComputeResource)
|
|
||||||
base = base.resourcePool.resourcePool.find { |f| f.name == pool }
|
|
||||||
when base.is_a?(RbVmomi::VIM::ResourcePool)
|
|
||||||
base = base.resourcePool.find { |f| f.name == pool }
|
|
||||||
else
|
|
||||||
raise(RuntimeError, "Unexpected object type encountered (#{base.class}) while finding resource pool")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
base = base.resourcePool unless base.is_a?(RbVmomi::VIM::ResourcePool) && base.respond_to?(:resourcePool)
|
|
||||||
base
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_snapshot(vm, snapshotname)
|
|
||||||
if vm.snapshot
|
|
||||||
get_snapshot_list(vm.snapshot.rootSnapshotList, snapshotname)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_vm(vmname)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
find_vm_light(vmname) || find_vm_heavy(vmname)[vmname]
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_vm_light(vmname)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
|
|
||||||
@connection.searchIndex.FindByDnsName(vmSearch: true, dnsName: vmname)
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_vm_heavy(vmname)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
|
|
||||||
vmname = vmname.is_a?(Array) ? vmname : [vmname]
|
|
||||||
containerView = get_base_vm_container_from @connection
|
|
||||||
propertyCollector = @connection.propertyCollector
|
|
||||||
|
|
||||||
objectSet = [{
|
|
||||||
obj: containerView,
|
|
||||||
skip: true,
|
|
||||||
selectSet: [RbVmomi::VIM::TraversalSpec.new(
|
|
||||||
name: 'gettingTheVMs',
|
|
||||||
path: 'view',
|
|
||||||
skip: false,
|
|
||||||
type: 'ContainerView'
|
|
||||||
)]
|
|
||||||
}]
|
|
||||||
|
|
||||||
propSet = [{
|
|
||||||
pathSet: ['name'],
|
|
||||||
type: 'VirtualMachine'
|
|
||||||
}]
|
|
||||||
|
|
||||||
results = propertyCollector.RetrievePropertiesEx(
|
|
||||||
specSet: [{
|
|
||||||
objectSet: objectSet,
|
|
||||||
propSet: propSet
|
|
||||||
}],
|
|
||||||
options: { maxObjects: nil }
|
|
||||||
)
|
|
||||||
|
|
||||||
vms = {}
|
|
||||||
results.objects.each do |result|
|
|
||||||
name = result.propSet.first.val
|
|
||||||
next unless vmname.include? name
|
|
||||||
vms[name] = result.obj
|
|
||||||
end
|
|
||||||
|
|
||||||
while results.token
|
|
||||||
results = propertyCollector.ContinueRetrievePropertiesEx(token: results.token)
|
|
||||||
results.objects.each do |result|
|
|
||||||
name = result.propSet.first.val
|
|
||||||
next unless vmname.include? name
|
|
||||||
vms[name] = result.obj
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
vms
|
|
||||||
end
|
|
||||||
|
|
||||||
def find_vmdks(vmname, datastore)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
|
|
||||||
disks = []
|
|
||||||
|
|
||||||
vmdk_datastore = find_datastore(datastore)
|
|
||||||
|
|
||||||
vm_files = vmdk_datastore._connection.serviceContent.propertyCollector.collectMultiple vmdk_datastore.vm, 'layoutEx.file'
|
|
||||||
vm_files.keys.each do |f|
|
|
||||||
vm_files[f]['layoutEx.file'].each do |l|
|
|
||||||
if l.name.match(/^\[#{vmdk_datastore.name}\] #{vmname}\/#{vmname}_([0-9]+).vmdk/)
|
|
||||||
disks.push(l)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
disks
|
|
||||||
end
|
|
||||||
|
|
||||||
def get_base_vm_container_from(connection)
|
|
||||||
ensure_connected @connection, @credentials
|
|
||||||
|
|
||||||
viewManager = connection.serviceContent.viewManager
|
|
||||||
viewManager.CreateContainerView(
|
|
||||||
container: connection.serviceContent.rootFolder,
|
|
||||||
recursive: true,
|
|
||||||
type: ['VirtualMachine']
|
|
||||||
)
|
|
||||||
end
|
|
||||||
|
|
||||||
def get_snapshot_list(tree, snapshotname)
|
|
||||||
snapshot = nil
|
|
||||||
|
|
||||||
tree.each do |child|
|
|
||||||
if child.name == snapshotname
|
|
||||||
snapshot ||= child.snapshot
|
|
||||||
else
|
|
||||||
snapshot ||= get_snapshot_list(child.childSnapshotList, snapshotname)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
snapshot
|
|
||||||
end
|
|
||||||
|
|
||||||
def migrate_vm_host(vm, host)
|
|
||||||
relospec = RbVmomi::VIM.VirtualMachineRelocateSpec(host: host)
|
|
||||||
vm.RelocateVM_Task(spec: relospec).wait_for_completion
|
|
||||||
end
|
|
||||||
|
|
||||||
def close
|
|
||||||
@connection.close
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
@ -6,21 +6,6 @@ def redis
|
||||||
@redis
|
@redis
|
||||||
end
|
end
|
||||||
|
|
||||||
# Mock an object which is result from Vmpooler::VsphereHelper.find_folder(foldername)
|
|
||||||
class MockFindFolder
|
|
||||||
attr_reader :childEntity
|
|
||||||
|
|
||||||
def initialize(vmlist = [])
|
|
||||||
# Generate an array of hashes
|
|
||||||
@childEntity = vmlist.map do |vm|
|
|
||||||
vm_object = {}
|
|
||||||
vm_object['name'] = vm
|
|
||||||
|
|
||||||
vm_object
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# Mock an object which represents a Logger. This stops the proliferation
|
# Mock an object which represents a Logger. This stops the proliferation
|
||||||
# of allow(logger).to .... expectations in tests.
|
# of allow(logger).to .... expectations in tests.
|
||||||
class MockLogger
|
class MockLogger
|
||||||
|
|
|
||||||
132
spec/unit/generic_connection_pool_spec.rb
Normal file
132
spec/unit/generic_connection_pool_spec.rb
Normal file
|
|
@ -0,0 +1,132 @@
|
||||||
|
require 'spec_helper'
|
||||||
|
|
||||||
|
describe 'GenericConnectionPool' do
|
||||||
|
let(:metrics) { Vmpooler::DummyStatsd.new }
|
||||||
|
let(:metric_prefix) { 'prefix' }
|
||||||
|
let(:default_metric_prefix) { 'connectionpool' }
|
||||||
|
let(:connection_object) { double('connection') }
|
||||||
|
let(:pool_size) { 1 }
|
||||||
|
let(:pool_timeout) { 1 }
|
||||||
|
|
||||||
|
subject { Vmpooler::PoolManager::GenericConnectionPool.new(
|
||||||
|
metrics: metrics,
|
||||||
|
metric_prefix: metric_prefix,
|
||||||
|
size: pool_size,
|
||||||
|
timeout: pool_timeout
|
||||||
|
) { connection_object }
|
||||||
|
}
|
||||||
|
|
||||||
|
describe "When consuming a pool object" do
|
||||||
|
let(:pool_size) { 1 }
|
||||||
|
let(:pool_timeout) { 1 }
|
||||||
|
let(:connection_object) {{
|
||||||
|
connection: 'connection'
|
||||||
|
}}
|
||||||
|
|
||||||
|
it 'should return a connection object when grabbing one from the pool' do
|
||||||
|
subject.with_metrics do |conn_pool_object|
|
||||||
|
expect(conn_pool_object).to be(connection_object)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return the same connection object when calling the pool multiple times' do
|
||||||
|
subject.with_metrics do |conn_pool_object|
|
||||||
|
expect(conn_pool_object).to be(connection_object)
|
||||||
|
end
|
||||||
|
subject.with_metrics do |conn_pool_object|
|
||||||
|
expect(conn_pool_object).to be(connection_object)
|
||||||
|
end
|
||||||
|
subject.with_metrics do |conn_pool_object|
|
||||||
|
expect(conn_pool_object).to be(connection_object)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should preserve connection state across mulitple pool calls' do
|
||||||
|
new_connection = 'new_connection'
|
||||||
|
# Ensure the connection is not modified
|
||||||
|
subject.with_metrics do |conn_pool_object|
|
||||||
|
expect(conn_pool_object).to be(connection_object)
|
||||||
|
expect(conn_pool_object[:connection]).to_not eq(new_connection)
|
||||||
|
# Change the connection
|
||||||
|
conn_pool_object[:connection] = new_connection
|
||||||
|
end
|
||||||
|
# Ensure the connection is modified
|
||||||
|
subject.with_metrics do |conn_pool_object|
|
||||||
|
expect(conn_pool_object).to be(connection_object)
|
||||||
|
expect(conn_pool_object[:connection]).to eq(new_connection)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe "#with_metrics" do
|
||||||
|
before(:each) do
|
||||||
|
expect(subject).not_to be_nil
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'When metrics are configured' do
|
||||||
|
it 'should emit a gauge metric when the connection is grabbed and released' do
|
||||||
|
expect(metrics).to receive(:gauge).with(/\.available/,Integer).exactly(2).times
|
||||||
|
|
||||||
|
subject.with_metrics do |conn1|
|
||||||
|
# do nothing
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should emit a timing metric when the connection is grabbed' do
|
||||||
|
expect(metrics).to receive(:timing).with(/\.waited/,Integer).exactly(1).times
|
||||||
|
|
||||||
|
subject.with_metrics do |conn1|
|
||||||
|
# do nothing
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should emit metrics with the specified prefix' do
|
||||||
|
expect(metrics).to receive(:gauge).with(/#{metric_prefix}\./,Integer).at_least(1).times
|
||||||
|
expect(metrics).to receive(:timing).with(/#{metric_prefix}\./,Integer).at_least(1).times
|
||||||
|
|
||||||
|
subject.with_metrics do |conn1|
|
||||||
|
# do nothing
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'Metrix prefix is missing' do
|
||||||
|
let(:metric_prefix) { nil }
|
||||||
|
|
||||||
|
it 'should emit metrics with default prefix' do
|
||||||
|
expect(metrics).to receive(:gauge).with(/#{default_metric_prefix}\./,Integer).at_least(1).times
|
||||||
|
expect(metrics).to receive(:timing).with(/#{default_metric_prefix}\./,Integer).at_least(1).times
|
||||||
|
|
||||||
|
subject.with_metrics do |conn1|
|
||||||
|
# do nothing
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'Metrix prefix is empty' do
|
||||||
|
let(:metric_prefix) { '' }
|
||||||
|
|
||||||
|
it 'should emit metrics with default prefix' do
|
||||||
|
expect(metrics).to receive(:gauge).with(/#{default_metric_prefix}\./,Integer).at_least(1).times
|
||||||
|
expect(metrics).to receive(:timing).with(/#{default_metric_prefix}\./,Integer).at_least(1).times
|
||||||
|
|
||||||
|
subject.with_metrics do |conn1|
|
||||||
|
# do nothing
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'When metrics are not configured' do
|
||||||
|
let(:metrics) { nil }
|
||||||
|
|
||||||
|
it 'should not emit any metrics' do
|
||||||
|
# if any metrics are called it would result in a method error on Nil.
|
||||||
|
|
||||||
|
subject.with_metrics do |conn1|
|
||||||
|
# do nothing
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -84,7 +84,7 @@ EOT
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
context 'Given a misconfigured provider name' do
|
context 'Given a provider with no configuration' do
|
||||||
let(:config) { YAML.load(<<-EOT
|
let(:config) { YAML.load(<<-EOT
|
||||||
---
|
---
|
||||||
:providers:
|
:providers:
|
||||||
|
|
@ -94,8 +94,8 @@ EOT
|
||||||
EOT
|
EOT
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
it 'should return nil' do
|
it 'should return empty hash' do
|
||||||
expect(subject.provider_config).to be_nil
|
expect(subject.provider_config).to eq({})
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -120,6 +120,26 @@ EOT
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
describe '#provided_pools' do
|
||||||
|
let(:config) { YAML.load(<<-EOT
|
||||||
|
---
|
||||||
|
:pools:
|
||||||
|
- name: 'pool1'
|
||||||
|
provider: 'base'
|
||||||
|
- name: 'pool2'
|
||||||
|
provider: 'base'
|
||||||
|
- name: 'otherpool'
|
||||||
|
provider: 'other provider'
|
||||||
|
- name: 'no name'
|
||||||
|
EOT
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
it "should return pools serviced by this provider" do
|
||||||
|
expect(subject.provided_pools).to eq(['pool1','pool2'])
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
describe '#vms_in_pool' do
|
describe '#vms_in_pool' do
|
||||||
it 'should raise error' do
|
it 'should raise error' do
|
||||||
expect{subject.vms_in_pool('pool')}.to raise_error(/does not implement vms_in_pool/)
|
expect{subject.vms_in_pool('pool')}.to raise_error(/does not implement vms_in_pool/)
|
||||||
|
|
|
||||||
509
spec/unit/providers/dummy_spec.rb
Normal file
509
spec/unit/providers/dummy_spec.rb
Normal file
|
|
@ -0,0 +1,509 @@
|
||||||
|
require 'spec_helper'
|
||||||
|
|
||||||
|
describe 'Vmpooler::PoolManager::Provider::Dummy' do
|
||||||
|
let(:logger) { MockLogger.new }
|
||||||
|
let(:metrics) { Vmpooler::DummyStatsd.new }
|
||||||
|
let(:pool_name) { 'pool1' }
|
||||||
|
let(:other_pool_name) { 'pool2' }
|
||||||
|
let(:vm_name) { 'vm1' }
|
||||||
|
|
||||||
|
let(:running_vm_name) { 'vm2' }
|
||||||
|
let(:notready_vm_name) { 'vm3' }
|
||||||
|
|
||||||
|
let (:provider_options) {
|
||||||
|
# Construct an initial state for testing
|
||||||
|
dummylist = {}
|
||||||
|
dummylist['pool'] = {}
|
||||||
|
# pool1 is a pool of "normal" VMs
|
||||||
|
dummylist['pool'][pool_name] = []
|
||||||
|
# A normal running VM
|
||||||
|
vm = {}
|
||||||
|
vm['name'] = vm_name
|
||||||
|
vm['hostname'] = vm_name
|
||||||
|
vm['domain'] = 'dummy.local'
|
||||||
|
vm['vm_template'] = 'template1'
|
||||||
|
vm['template'] = pool_name
|
||||||
|
vm['poolname'] = pool_name
|
||||||
|
vm['ready'] = true
|
||||||
|
vm['boottime'] = Time.now
|
||||||
|
vm['powerstate'] = 'PoweredOn'
|
||||||
|
vm['vm_host'] = 'HOST1'
|
||||||
|
vm['snapshots'] = []
|
||||||
|
vm['disks'] = []
|
||||||
|
vm['dummy_state'] = 'RUNNING'
|
||||||
|
dummylist['pool'][pool_name] << vm
|
||||||
|
|
||||||
|
# pool2 is a pool of "abnormal" VMs e.g. PoweredOff etc.
|
||||||
|
dummylist['pool'][other_pool_name] = []
|
||||||
|
# A freshly provisioned VM that is not ready
|
||||||
|
vm = {}
|
||||||
|
vm['name'] = running_vm_name
|
||||||
|
vm['hostname'] = running_vm_name
|
||||||
|
vm['domain'] = 'dummy.local'
|
||||||
|
vm['vm_template'] = 'template1'
|
||||||
|
vm['template'] = other_pool_name
|
||||||
|
vm['poolname'] = other_pool_name
|
||||||
|
vm['ready'] = false
|
||||||
|
vm['boottime'] = Time.now
|
||||||
|
vm['powerstate'] = 'PoweredOn'
|
||||||
|
vm['vm_host'] = 'HOST1'
|
||||||
|
vm['snapshots'] = []
|
||||||
|
vm['disks'] = []
|
||||||
|
vm['dummy_state'] = 'UNKNOWN'
|
||||||
|
dummylist['pool'][other_pool_name] << vm
|
||||||
|
# A freshly provisioned VM that is running but not ready
|
||||||
|
vm = {}
|
||||||
|
vm['name'] = notready_vm_name
|
||||||
|
vm['hostname'] = notready_vm_name
|
||||||
|
vm['domain'] = 'dummy.local'
|
||||||
|
vm['vm_template'] = 'template1'
|
||||||
|
vm['template'] = other_pool_name
|
||||||
|
vm['poolname'] = other_pool_name
|
||||||
|
vm['ready'] = false
|
||||||
|
vm['boottime'] = Time.now
|
||||||
|
vm['powerstate'] = 'PoweredOn'
|
||||||
|
vm['vm_host'] = 'HOST1'
|
||||||
|
vm['snapshots'] = []
|
||||||
|
vm['disks'] = []
|
||||||
|
vm['dummy_state'] = 'RUNNING'
|
||||||
|
dummylist['pool'][other_pool_name] << vm
|
||||||
|
|
||||||
|
{
|
||||||
|
'initial_state' => dummylist
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let(:config) { YAML.load(<<-EOT
|
||||||
|
---
|
||||||
|
:config:
|
||||||
|
:providers:
|
||||||
|
:dummy:
|
||||||
|
key1: 'value1'
|
||||||
|
# Drop the connection pool timeout way down for spec tests so they fail fast
|
||||||
|
connection_pool_timeout: 1
|
||||||
|
:pools:
|
||||||
|
- name: '#{pool_name}'
|
||||||
|
size: 5
|
||||||
|
- name: 'pool2'
|
||||||
|
size: 5
|
||||||
|
EOT
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
subject { Vmpooler::PoolManager::Provider::Dummy.new(config, logger, metrics, 'dummy', provider_options) }
|
||||||
|
|
||||||
|
describe '#name' do
|
||||||
|
it 'should be dummy' do
|
||||||
|
expect(subject.name).to eq('dummy')
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#vms_in_pool' do
|
||||||
|
it 'should return [] when pool does not exist' do
|
||||||
|
vm_list = subject.vms_in_pool('missing_pool')
|
||||||
|
|
||||||
|
expect(vm_list).to eq([])
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return an array of VMs when pool exists' do
|
||||||
|
vm_list = subject.vms_in_pool(pool_name)
|
||||||
|
|
||||||
|
expect(vm_list.count).to eq(1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#get_vm_host' do
|
||||||
|
it 'should return the hostname when VM exists' do
|
||||||
|
expect(subject.get_vm_host(pool_name, vm_name)).to eq('HOST1')
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should error when VM does not exist' do
|
||||||
|
expect{subject.get_vm_host(pool_name, 'doesnotexist')}.to raise_error(RuntimeError)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#find_least_used_compatible_host' do
|
||||||
|
it 'should return the current host' do
|
||||||
|
new_host = subject.find_least_used_compatible_host(pool_name, vm_name)
|
||||||
|
expect(new_host).to eq('HOST1')
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'using migratevm_couldmove_percent' do
|
||||||
|
describe 'of zero' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['migratevm_couldmove_percent'] = 0
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return the current host' do
|
||||||
|
new_host = subject.find_least_used_compatible_host(pool_name, vm_name)
|
||||||
|
expect(new_host).to eq('HOST1')
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe 'of 100' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['migratevm_couldmove_percent'] = 100
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return a different host' do
|
||||||
|
new_host = subject.find_least_used_compatible_host(pool_name, vm_name)
|
||||||
|
expect(new_host).to_not eq('HOST1')
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#migrate_vm_to_host' do
|
||||||
|
it 'should move to the new host' do
|
||||||
|
expect(subject.migrate_vm_to_host(pool_name, 'vm1','NEWHOST')).to eq(true)
|
||||||
|
expect(subject.get_vm_host(pool_name, 'vm1')).to eq('NEWHOST')
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'using migratevm_fail_percent' do
|
||||||
|
describe 'of zero' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['migratevm_fail_percent'] = 0
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should move to the new host' do
|
||||||
|
expect(subject.migrate_vm_to_host(pool_name, 'vm1','NEWHOST')).to eq(true)
|
||||||
|
expect(subject.get_vm_host(pool_name, 'vm1')).to eq('NEWHOST')
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe 'of 100' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['migratevm_fail_percent'] = 100
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect{subject.migrate_vm_to_host(pool_name, 'vm1','NEWHOST')}.to raise_error(/migratevm_fail_percent/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#get_vm' do
|
||||||
|
it 'should return the VM when VM exists' do
|
||||||
|
vm = subject.get_vm(pool_name, vm_name)
|
||||||
|
expect(vm['name']).to eq(vm_name)
|
||||||
|
expect(vm['powerstate']).to eq('PoweredOn')
|
||||||
|
expect(vm['hostname']).to eq(vm['name'])
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return nil when VM does not exist' do
|
||||||
|
expect(subject.get_vm(pool_name, 'doesnotexist')).to eq(nil)
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'using getvm_poweroff_percent' do
|
||||||
|
describe 'of zero' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['getvm_poweroff_percent'] = 0
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'will not power off a VM' do
|
||||||
|
vm = subject.get_vm(pool_name, vm_name)
|
||||||
|
expect(vm['name']).to eq(vm_name)
|
||||||
|
expect(vm['powerstate']).to eq('PoweredOn')
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe 'of 100' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['getvm_poweroff_percent'] = 100
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'will power off a VM' do
|
||||||
|
vm = subject.get_vm(pool_name, vm_name)
|
||||||
|
expect(vm['name']).to eq(vm_name)
|
||||||
|
expect(vm['powerstate']).to eq('PoweredOff')
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'using getvm_rename_percent' do
|
||||||
|
describe 'of zero' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['getvm_rename_percent'] = 0
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'will not rename a VM' do
|
||||||
|
vm = subject.get_vm(pool_name, vm_name)
|
||||||
|
expect(vm['name']).to eq(vm_name)
|
||||||
|
expect(vm['hostname']).to eq(vm['name'])
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe 'of 100' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['getvm_rename_percent'] = 100
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'will rename a VM' do
|
||||||
|
vm = subject.get_vm(pool_name, vm_name)
|
||||||
|
expect(vm['name']).to eq(vm_name)
|
||||||
|
expect(vm['hostname']).to_not eq(vm['name'])
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#create_vm' do
|
||||||
|
let(:new_vm_name) { 'newvm' }
|
||||||
|
|
||||||
|
it 'should return a new VM' do
|
||||||
|
expect(subject.create_vm(pool_name, new_vm_name)['name']).to eq(new_vm_name)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should increase the number of VMs in the pool' do
|
||||||
|
old_pool_count = subject.vms_in_pool(pool_name).count
|
||||||
|
|
||||||
|
new_vm = subject.create_vm(pool_name, new_vm_name)
|
||||||
|
|
||||||
|
expect(subject.vms_in_pool(pool_name).count).to eq(old_pool_count + 1)
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'using createvm_fail_percent' do
|
||||||
|
describe 'of zero' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['createvm_fail_percent'] = 0
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return a new VM' do
|
||||||
|
expect(subject.create_vm(pool_name, new_vm_name)['name']).to eq(new_vm_name)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe 'of 100' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['createvm_fail_percent'] = 100
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect{subject.create_vm(pool_name, new_vm_name)}.to raise_error(/createvm_fail_percent/)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'new VM should not exist' do
|
||||||
|
begin
|
||||||
|
subject.create_vm(pool_name, new_vm_name)
|
||||||
|
rescue
|
||||||
|
end
|
||||||
|
expect(subject.get_vm(pool_name, new_vm_name)).to eq(nil)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#create_disk' do
|
||||||
|
let(:disk_size) { 10 }
|
||||||
|
|
||||||
|
it 'should return true when the disk is created' do
|
||||||
|
expect(subject.create_disk(pool_name, vm_name,disk_size)).to be true
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error when VM does not exist' do
|
||||||
|
expect{ subject.create_disk(pool_name, 'doesnotexist',disk_size) }.to raise_error(/VM doesnotexist does not exist/)
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'using createdisk_fail_percent' do
|
||||||
|
describe 'of zero' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['createdisk_fail_percent'] = 0
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true when the disk is created' do
|
||||||
|
expect(subject.create_disk(pool_name, vm_name,disk_size)).to be true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe 'of 100' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['createdisk_fail_percent'] = 100
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect{subject.create_disk(pool_name, vm_name,disk_size)}.to raise_error(/createdisk_fail_percent/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#create_snapshot' do
|
||||||
|
let(:snapshot_name) { 'newsnapshot' }
|
||||||
|
|
||||||
|
it 'should return true when the snapshot is created' do
|
||||||
|
expect(subject.create_snapshot(pool_name, vm_name, snapshot_name)).to be true
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error when VM does not exist' do
|
||||||
|
expect{ subject.create_snapshot(pool_name, 'doesnotexist', snapshot_name) }.to raise_error(/VM doesnotexist does not exist/)
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'using createsnapshot_fail_percent' do
|
||||||
|
describe 'of zero' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['createsnapshot_fail_percent'] = 0
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true when the disk is created' do
|
||||||
|
expect(subject.create_snapshot(pool_name, vm_name, snapshot_name)).to be true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe 'of 100' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['createsnapshot_fail_percent'] = 100
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect{ subject.create_snapshot(pool_name, vm_name, snapshot_name) }.to raise_error(/createsnapshot_fail_percent/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#revert_snapshot' do
|
||||||
|
let(:snapshot_name) { 'newsnapshot' }
|
||||||
|
|
||||||
|
before(:each) do
|
||||||
|
# Create a snapshot
|
||||||
|
subject.create_snapshot(pool_name, vm_name, snapshot_name)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true when the snapshot is reverted' do
|
||||||
|
expect(subject.revert_snapshot(pool_name, vm_name, snapshot_name)).to be true
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error when VM does not exist' do
|
||||||
|
expect{ subject.revert_snapshot(pool_name, 'doesnotexist', snapshot_name) }.to raise_error(/VM doesnotexist does not exist/)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return false when the snapshot does not exist' do
|
||||||
|
expect(subject.revert_snapshot(pool_name, vm_name, 'doesnotexist')).to be false
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'using revertsnapshot_fail_percent' do
|
||||||
|
describe 'of zero' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['revertsnapshot_fail_percent'] = 0
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true when the snapshot is reverted' do
|
||||||
|
expect(subject.revert_snapshot(pool_name, vm_name, snapshot_name)).to be true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe 'of 100' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['revertsnapshot_fail_percent'] = 100
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error when VM does not exist' do
|
||||||
|
expect{ subject.revert_snapshot(pool_name, vm_name, snapshot_name) }.to raise_error(/revertsnapshot_fail_percent/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#destroy_vm' do
|
||||||
|
it 'should return true when destroyed' do
|
||||||
|
expect(subject.destroy_vm(pool_name, vm_name)).to eq(true)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should log if the VM is powered off' do
|
||||||
|
allow(logger).to receive(:log)
|
||||||
|
expect(logger).to receive(:log).with('d', "[ ] [pool1] 'vm1' is being shut down")
|
||||||
|
expect(subject.destroy_vm(pool_name, vm_name)).to eq(true)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return false if VM does not exist' do
|
||||||
|
expect(subject.destroy_vm('doesnotexist',vm_name)).to eq(false)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return false if VM is not in the correct pool' do
|
||||||
|
expect(subject.destroy_vm(other_pool_name, vm_name)).to eq(false)
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'using destroyvm_fail_percent' do
|
||||||
|
describe 'of zero' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['destroyvm_fail_percent'] = 0
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true when destroyed' do
|
||||||
|
expect(subject.destroy_vm(pool_name, vm_name)).to eq(true)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe 'of 100' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['destroyvm_fail_percent'] = 100
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect{subject.destroy_vm(pool_name, vm_name)}.to raise_error(/migratevm_fail_percent/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#vm_ready?' do
|
||||||
|
before(:each) do
|
||||||
|
# Speed up tests and ignore sleeping
|
||||||
|
allow(subject).to receive(:sleep)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true if ready' do
|
||||||
|
expect(subject.vm_ready?(pool_name, vm_name)).to eq(true)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return false if VM does not exist' do
|
||||||
|
expect(subject.vm_ready?(pool_name, 'doesnotexist')).to eq(false)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return false if VM is not in the correct pool' do
|
||||||
|
expect(subject.vm_ready?(other_pool_name, vm_name)).to eq(false)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error if timeout expires' do
|
||||||
|
expect{subject.vm_ready?(other_pool_name, running_vm_name)}.to raise_error(Timeout::Error)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true if VM becomes ready' do
|
||||||
|
expect(subject.vm_ready?(other_pool_name, notready_vm_name)).to eq(true)
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'using vmready_fail_percent' do
|
||||||
|
describe 'of zero' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['vmready_fail_percent'] = 0
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true if VM becomes ready' do
|
||||||
|
expect(subject.vm_ready?(other_pool_name, notready_vm_name)).to eq(true)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe 'of 100' do
|
||||||
|
before(:each) do
|
||||||
|
config[:providers][:dummy]['vmready_fail_percent'] = 100
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect{subject.vm_ready?(other_pool_name, notready_vm_name)}.to raise_error(/vmready_fail_percent/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#vm_exists?' do
|
||||||
|
it 'should return true when VM exists' do
|
||||||
|
expect(subject.vm_exists?(pool_name, vm_name)).to eq(true)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true when VM does not exist' do
|
||||||
|
expect(subject.vm_exists?(pool_name, 'doesnotexist')).to eq(false)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
@ -47,11 +47,14 @@ describe 'Vmpooler::PoolManager::Provider::VSphere' do
|
||||||
:config:
|
:config:
|
||||||
max_tries: 3
|
max_tries: 3
|
||||||
retry_factor: 10
|
retry_factor: 10
|
||||||
:vsphere:
|
:providers:
|
||||||
server: "vcenter.domain.local"
|
:vsphere:
|
||||||
username: "vcenter_user"
|
server: "vcenter.domain.local"
|
||||||
password: "vcenter_password"
|
username: "vcenter_user"
|
||||||
insecure: true
|
password: "vcenter_password"
|
||||||
|
insecure: true
|
||||||
|
# Drop the connection pool timeout way down for spec tests so they fail fast
|
||||||
|
connection_pool_timeout: 1
|
||||||
:pools:
|
:pools:
|
||||||
- name: '#{poolname}'
|
- name: '#{poolname}'
|
||||||
alias: [ 'mockpool' ]
|
alias: [ 'mockpool' ]
|
||||||
|
|
@ -66,14 +69,16 @@ EOT
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
let(:credentials) { config[:vsphere] }
|
|
||||||
|
|
||||||
let(:connection_options) {{}}
|
let(:connection_options) {{}}
|
||||||
let(:connection) { mock_RbVmomi_VIM_Connection(connection_options) }
|
let(:connection) { mock_RbVmomi_VIM_Connection(connection_options) }
|
||||||
let(:vmname) { 'vm1' }
|
let(:vmname) { 'vm1' }
|
||||||
|
|
||||||
subject { Vmpooler::PoolManager::Provider::VSphere.new(config, logger, metrics, 'vsphere', provider_options) }
|
subject { Vmpooler::PoolManager::Provider::VSphere.new(config, logger, metrics, 'vsphere', provider_options) }
|
||||||
|
|
||||||
|
before(:each) do
|
||||||
|
allow(subject).to receive(:vsphere_connection_ok?).and_return(true)
|
||||||
|
end
|
||||||
|
|
||||||
describe '#name' do
|
describe '#name' do
|
||||||
it 'should be vsphere' do
|
it 'should be vsphere' do
|
||||||
expect(subject.name).to eq('vsphere')
|
expect(subject.name).to eq('vsphere')
|
||||||
|
|
@ -85,7 +90,7 @@ EOT
|
||||||
let(:pool_config) { config[:pools][0] }
|
let(:pool_config) { config[:pools][0] }
|
||||||
|
|
||||||
before(:each) do
|
before(:each) do
|
||||||
allow(subject).to receive(:get_connection).and_return(connection)
|
allow(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
end
|
end
|
||||||
|
|
||||||
context 'Given a pool folder that is missing' do
|
context 'Given a pool folder that is missing' do
|
||||||
|
|
@ -94,7 +99,7 @@ EOT
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should get a connection' do
|
it 'should get a connection' do
|
||||||
expect(subject).to receive(:get_connection).and_return(connection)
|
expect(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
|
|
||||||
subject.vms_in_pool(poolname)
|
subject.vms_in_pool(poolname)
|
||||||
end
|
end
|
||||||
|
|
@ -112,7 +117,7 @@ EOT
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should get a connection' do
|
it 'should get a connection' do
|
||||||
expect(subject).to receive(:get_connection).and_return(connection)
|
expect(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
|
|
||||||
subject.vms_in_pool(poolname)
|
subject.vms_in_pool(poolname)
|
||||||
end
|
end
|
||||||
|
|
@ -141,7 +146,7 @@ EOT
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should get a connection' do
|
it 'should get a connection' do
|
||||||
expect(subject).to receive(:get_connection).and_return(connection)
|
expect(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
|
|
||||||
subject.vms_in_pool(poolname)
|
subject.vms_in_pool(poolname)
|
||||||
end
|
end
|
||||||
|
|
@ -156,7 +161,7 @@ EOT
|
||||||
|
|
||||||
describe '#get_vm_host' do
|
describe '#get_vm_host' do
|
||||||
before(:each) do
|
before(:each) do
|
||||||
allow(subject).to receive(:get_connection).and_return(connection)
|
allow(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
expect(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object)
|
expect(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -164,7 +169,7 @@ EOT
|
||||||
let(:vm_object) { nil }
|
let(:vm_object) { nil }
|
||||||
|
|
||||||
it 'should get a connection' do
|
it 'should get a connection' do
|
||||||
expect(subject).to receive(:get_connection).and_return(connection)
|
expect(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
|
|
||||||
subject.get_vm_host(poolname,vmname)
|
subject.get_vm_host(poolname,vmname)
|
||||||
end
|
end
|
||||||
|
|
@ -186,7 +191,7 @@ EOT
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should get a connection' do
|
it 'should get a connection' do
|
||||||
expect(subject).to receive(:get_connection).and_return(connection)
|
expect(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
|
|
||||||
subject.get_vm_host(poolname,vmname)
|
subject.get_vm_host(poolname,vmname)
|
||||||
end
|
end
|
||||||
|
|
@ -209,7 +214,7 @@ EOT
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should get a connection' do
|
it 'should get a connection' do
|
||||||
expect(subject).to receive(:get_connection).and_return(connection)
|
expect(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
|
|
||||||
subject.get_vm_host(poolname,vmname)
|
subject.get_vm_host(poolname,vmname)
|
||||||
end
|
end
|
||||||
|
|
@ -224,7 +229,7 @@ EOT
|
||||||
let(:vm_object) { nil }
|
let(:vm_object) { nil }
|
||||||
|
|
||||||
before(:each) do
|
before(:each) do
|
||||||
allow(subject).to receive(:get_connection).and_return(connection)
|
allow(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
expect(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object)
|
expect(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -232,7 +237,7 @@ EOT
|
||||||
let(:vm_object) { nil }
|
let(:vm_object) { nil }
|
||||||
|
|
||||||
it 'should get a connection' do
|
it 'should get a connection' do
|
||||||
expect(subject).to receive(:get_connection).and_return(connection)
|
expect(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
|
|
||||||
subject.find_least_used_compatible_host(poolname,vmname)
|
subject.find_least_used_compatible_host(poolname,vmname)
|
||||||
end
|
end
|
||||||
|
|
@ -251,7 +256,7 @@ EOT
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should get a connection' do
|
it 'should get a connection' do
|
||||||
expect(subject).to receive(:get_connection).and_return(connection)
|
expect(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
|
|
||||||
subject.find_least_used_compatible_host(poolname,vmname)
|
subject.find_least_used_compatible_host(poolname,vmname)
|
||||||
end
|
end
|
||||||
|
|
@ -273,7 +278,7 @@ EOT
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should get a connection' do
|
it 'should get a connection' do
|
||||||
expect(subject).to receive(:get_connection).and_return(connection)
|
expect(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
|
|
||||||
subject.find_least_used_compatible_host(poolname,vmname)
|
subject.find_least_used_compatible_host(poolname,vmname)
|
||||||
end
|
end
|
||||||
|
|
@ -294,7 +299,7 @@ EOT
|
||||||
|
|
||||||
before(:each) do
|
before(:each) do
|
||||||
config[:pools][0]['clone_target'] = cluster_name
|
config[:pools][0]['clone_target'] = cluster_name
|
||||||
allow(subject).to receive(:get_connection).and_return(connection)
|
allow(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
allow(subject).to receive(:find_vm).and_return(vm_object)
|
allow(subject).to receive(:find_vm).and_return(vm_object)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -390,7 +395,7 @@ EOT
|
||||||
describe '#get_vm' do
|
describe '#get_vm' do
|
||||||
let(:vm_object) { nil }
|
let(:vm_object) { nil }
|
||||||
before(:each) do
|
before(:each) do
|
||||||
allow(subject).to receive(:get_connection).and_return(connection)
|
allow(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
expect(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object)
|
expect(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -511,7 +516,7 @@ EOT
|
||||||
let(:new_vm_object) { mock_RbVmomi_VIM_VirtualMachine({ :name => vmname }) }
|
let(:new_vm_object) { mock_RbVmomi_VIM_VirtualMachine({ :name => vmname }) }
|
||||||
|
|
||||||
before(:each) do
|
before(:each) do
|
||||||
allow(subject).to receive(:get_connection).and_return(connection)
|
allow(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
allow(connection.serviceInstance).to receive(:find_datacenter).and_return(datacenter_object)
|
allow(connection.serviceInstance).to receive(:find_datacenter).and_return(datacenter_object)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -586,7 +591,7 @@ EOT
|
||||||
let(:datastorename) { 'datastore0' }
|
let(:datastorename) { 'datastore0' }
|
||||||
let(:disk_size) { 10 }
|
let(:disk_size) { 10 }
|
||||||
before(:each) do
|
before(:each) do
|
||||||
allow(subject).to receive(:get_connection).and_return(connection)
|
allow(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
allow(subject).to receive(:find_vm).with(vmname, connection).and_return(vm_object)
|
allow(subject).to receive(:find_vm).with(vmname, connection).and_return(vm_object)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -644,7 +649,7 @@ EOT
|
||||||
let(:vm_object) { mock_RbVmomi_VIM_VirtualMachine({ :name => vmname, :snapshot_tree => snapshot_tree }) }
|
let(:vm_object) { mock_RbVmomi_VIM_VirtualMachine({ :name => vmname, :snapshot_tree => snapshot_tree }) }
|
||||||
|
|
||||||
before(:each) do
|
before(:each) do
|
||||||
allow(subject).to receive(:get_connection).and_return(connection)
|
allow(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
allow(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object)
|
allow(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -699,7 +704,7 @@ EOT
|
||||||
let(:vm_object) { mock_RbVmomi_VIM_VirtualMachine({ :name => vmname, :snapshot_tree => snapshot_tree }) }
|
let(:vm_object) { mock_RbVmomi_VIM_VirtualMachine({ :name => vmname, :snapshot_tree => snapshot_tree }) }
|
||||||
|
|
||||||
before(:each) do
|
before(:each) do
|
||||||
allow(subject).to receive(:get_connection).and_return(connection)
|
allow(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
allow(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object)
|
allow(subject).to receive(:find_vm).with(vmname,connection).and_return(vm_object)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -748,7 +753,7 @@ EOT
|
||||||
let(:destroy_task) { mock_RbVmomi_VIM_Task() }
|
let(:destroy_task) { mock_RbVmomi_VIM_Task() }
|
||||||
|
|
||||||
before(:each) do
|
before(:each) do
|
||||||
allow(subject).to receive(:get_connection).and_return(connection)
|
allow(subject).to receive(:connect_to_vsphere).and_return(connection)
|
||||||
end
|
end
|
||||||
|
|
||||||
context 'Given a missing VM name' do
|
context 'Given a missing VM name' do
|
||||||
|
|
@ -877,53 +882,65 @@ EOT
|
||||||
end
|
end
|
||||||
|
|
||||||
# vSphere helper methods
|
# vSphere helper methods
|
||||||
describe '#get_connection' do
|
describe '#ensured_vsphere_connection' do
|
||||||
|
let(:config) { YAML.load(<<-EOT
|
||||||
|
---
|
||||||
|
:config:
|
||||||
|
:providers:
|
||||||
|
:vsphere:
|
||||||
|
# Drop the connection pool timeout way down for spec tests so they fail fast
|
||||||
|
connection_pool_timeout: 1
|
||||||
|
connection_pool_size: 1
|
||||||
|
:pools:
|
||||||
|
EOT
|
||||||
|
)
|
||||||
|
}
|
||||||
|
let(:connection1) { mock_RbVmomi_VIM_Connection(connection_options) }
|
||||||
|
let(:connection2) { mock_RbVmomi_VIM_Connection(connection_options) }
|
||||||
|
|
||||||
before(:each) do
|
before(:each) do
|
||||||
# NOTE - Using instance_variable_set is a code smell of code that is not testable
|
allow(subject).to receive(:connect_to_vsphere).and_return(connection1)
|
||||||
subject.instance_variable_set("@connection",connection)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
context 'when connection is ok' do
|
# This is to ensure that the pool_size of 1 is in effect
|
||||||
it 'should not attempt to reconnect' do
|
it 'should return the same connection object when calling the pool multiple times' do
|
||||||
expect(subject).to receive(:connect_to_vsphere).exactly(0).times
|
subject.connection_pool.with_metrics do |pool_object|
|
||||||
|
expect(pool_object[:connection]).to be(connection1)
|
||||||
subject.get_connection()
|
|
||||||
end
|
end
|
||||||
|
subject.connection_pool.with_metrics do |pool_object|
|
||||||
it 'should return a connection' do
|
expect(pool_object[:connection]).to be(connection1)
|
||||||
result = subject.get_connection()
|
end
|
||||||
|
subject.connection_pool.with_metrics do |pool_object|
|
||||||
expect(result).to be(connection)
|
expect(pool_object[:connection]).to be(connection1)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
context 'when connection has broken' do
|
context 'when the connection breaks' do
|
||||||
before(:each) do
|
before(:each) do
|
||||||
expect(connection.serviceInstance).to receive(:CurrentTime).and_raise(RuntimeError,'MockConnectionError')
|
# Emulate the connection state being good, then bad, then good again
|
||||||
|
expect(subject).to receive(:vsphere_connection_ok?).and_return(true, false, true)
|
||||||
|
expect(subject).to receive(:connect_to_vsphere).and_return(connection1, connection2)
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should not increment the connect.open metric' do
|
it 'should restore the connection' do
|
||||||
# https://github.com/puppetlabs/vmpooler/issues/195
|
subject.connection_pool.with_metrics do |pool_object|
|
||||||
expect(metrics).to receive(:increment).with('connect.open').exactly(0).times
|
# This line needs to be added to all instances of the connection_pool allocation
|
||||||
allow(subject).to receive(:connect_to_vsphere)
|
connection = subject.ensured_vsphere_connection(pool_object)
|
||||||
|
|
||||||
subject.get_connection()
|
expect(connection).to be(connection1)
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should call connect_to_vsphere to reconnect' do
|
subject.connection_pool.with_metrics do |pool_object|
|
||||||
allow(metrics).to receive(:increment)
|
connection = subject.ensured_vsphere_connection(pool_object)
|
||||||
expect(subject).to receive(:connect_to_vsphere).with(credentials)
|
# The second connection would have failed. This test ensures that a
|
||||||
|
# new connection object was created.
|
||||||
|
expect(connection).to be(connection2)
|
||||||
|
end
|
||||||
|
|
||||||
subject.get_connection()
|
subject.connection_pool.with_metrics do |pool_object|
|
||||||
end
|
connection = subject.ensured_vsphere_connection(pool_object)
|
||||||
|
expect(connection).to be(connection2)
|
||||||
it 'should return a new connection' do
|
end
|
||||||
new_connection = mock_RbVmomi_VIM_Connection(connection_options)
|
|
||||||
expect(subject).to receive(:connect_to_vsphere).with(credentials).and_return(new_connection)
|
|
||||||
|
|
||||||
result = subject.get_connection()
|
|
||||||
|
|
||||||
expect(result).to be(new_connection)
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
@ -933,6 +950,8 @@ EOT
|
||||||
allow(RbVmomi::VIM).to receive(:connect).and_return(connection)
|
allow(RbVmomi::VIM).to receive(:connect).and_return(connection)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
let (:credentials) { config[:providers][:vsphere] }
|
||||||
|
|
||||||
context 'succesful connection' do
|
context 'succesful connection' do
|
||||||
it 'should use the supplied credentials' do
|
it 'should use the supplied credentials' do
|
||||||
expect(RbVmomi::VIM).to receive(:connect).with({
|
expect(RbVmomi::VIM).to receive(:connect).with({
|
||||||
|
|
@ -941,7 +960,7 @@ EOT
|
||||||
:password => credentials['password'],
|
:password => credentials['password'],
|
||||||
:insecure => credentials['insecure']
|
:insecure => credentials['insecure']
|
||||||
}).and_return(connection)
|
}).and_return(connection)
|
||||||
subject.connect_to_vsphere(credentials)
|
subject.connect_to_vsphere
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should honor the insecure setting' do
|
it 'should honor the insecure setting' do
|
||||||
|
|
@ -954,11 +973,11 @@ EOT
|
||||||
:password => credentials['password'],
|
:password => credentials['password'],
|
||||||
:insecure => false,
|
:insecure => false,
|
||||||
}).and_return(connection)
|
}).and_return(connection)
|
||||||
subject.connect_to_vsphere(credentials)
|
subject.connect_to_vsphere
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should default to an insecure connection' do
|
it 'should default to an insecure connection' do
|
||||||
config[:vsphere][:insecure] = nil
|
config[:providers][:vsphere][:insecure] = nil
|
||||||
|
|
||||||
expect(RbVmomi::VIM).to receive(:connect).with({
|
expect(RbVmomi::VIM).to receive(:connect).with({
|
||||||
:host => credentials['server'],
|
:host => credentials['server'],
|
||||||
|
|
@ -967,18 +986,18 @@ EOT
|
||||||
:insecure => true
|
:insecure => true
|
||||||
}).and_return(connection)
|
}).and_return(connection)
|
||||||
|
|
||||||
subject.connect_to_vsphere(credentials)
|
subject.connect_to_vsphere
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should return the connection object' do
|
it 'should return the connection object' do
|
||||||
result = subject.connect_to_vsphere(credentials)
|
result = subject.connect_to_vsphere
|
||||||
|
|
||||||
expect(result).to be(connection)
|
expect(result).to be(connection)
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should increment the connect.open counter' do
|
it 'should increment the connect.open counter' do
|
||||||
expect(metrics).to receive(:increment).with('connect.open')
|
expect(metrics).to receive(:increment).with('connect.open')
|
||||||
subject.connect_to_vsphere(credentials)
|
subject.connect_to_vsphere
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -992,7 +1011,7 @@ EOT
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should return the connection object' do
|
it 'should return the connection object' do
|
||||||
result = subject.connect_to_vsphere(credentials)
|
result = subject.connect_to_vsphere
|
||||||
|
|
||||||
expect(result).to be(connection)
|
expect(result).to be(connection)
|
||||||
end
|
end
|
||||||
|
|
@ -1000,7 +1019,7 @@ EOT
|
||||||
it 'should increment the connect.fail and then connect.open counter' do
|
it 'should increment the connect.fail and then connect.open counter' do
|
||||||
expect(metrics).to receive(:increment).with('connect.fail').exactly(1).times
|
expect(metrics).to receive(:increment).with('connect.fail').exactly(1).times
|
||||||
expect(metrics).to receive(:increment).with('connect.open').exactly(1).times
|
expect(metrics).to receive(:increment).with('connect.open').exactly(1).times
|
||||||
subject.connect_to_vsphere(credentials)
|
subject.connect_to_vsphere
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -1011,7 +1030,7 @@ EOT
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should raise an error' do
|
it 'should raise an error' do
|
||||||
expect{subject.connect_to_vsphere(credentials)}.to raise_error(RuntimeError,'MockError')
|
expect{subject.connect_to_vsphere}.to raise_error(RuntimeError,'MockError')
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'should retry the connection attempt config.max_tries times' do
|
it 'should retry the connection attempt config.max_tries times' do
|
||||||
|
|
@ -1020,7 +1039,7 @@ EOT
|
||||||
|
|
||||||
begin
|
begin
|
||||||
# Swallow any errors
|
# Swallow any errors
|
||||||
subject.connect_to_vsphere(credentials)
|
subject.connect_to_vsphere
|
||||||
rescue
|
rescue
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
@ -1031,7 +1050,7 @@ EOT
|
||||||
|
|
||||||
begin
|
begin
|
||||||
# Swallow any errors
|
# Swallow any errors
|
||||||
subject.connect_to_vsphere(credentials)
|
subject.connect_to_vsphere
|
||||||
rescue
|
rescue
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
@ -1051,7 +1070,7 @@ EOT
|
||||||
|
|
||||||
begin
|
begin
|
||||||
# Swallow any errors
|
# Swallow any errors
|
||||||
subject.connect_to_vsphere(credentials)
|
subject.connect_to_vsphere
|
||||||
rescue
|
rescue
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
@ -2827,30 +2846,4 @@ EOT
|
||||||
expect(subject.migrate_vm_host(vm_object,host_object)).to eq('RELOCATE_RESULT')
|
expect(subject.migrate_vm_host(vm_object,host_object)).to eq('RELOCATE_RESULT')
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
describe '#close' do
|
|
||||||
context 'no connection has been made' do
|
|
||||||
before(:each) do
|
|
||||||
# NOTE - Using instance_variable_set is a code smell of code that is not testable
|
|
||||||
subject.instance_variable_set("@connection",nil)
|
|
||||||
end
|
|
||||||
|
|
||||||
it 'should not error' do
|
|
||||||
pending('https://github.com/puppetlabs/vmpooler/issues/211')
|
|
||||||
subject.close
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
context 'on an open connection' do
|
|
||||||
before(:each) do
|
|
||||||
# NOTE - Using instance_variable_set is a code smell of code that is not testable
|
|
||||||
subject.instance_variable_set("@connection",connection)
|
|
||||||
end
|
|
||||||
|
|
||||||
it 'should close the underlying connection object' do
|
|
||||||
expect(connection).to receive(:close)
|
|
||||||
subject.close
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,9 +1,20 @@
|
||||||
---
|
---
|
||||||
|
:providers:
|
||||||
|
# :providers:
|
||||||
|
#
|
||||||
|
# This section contains the VM providers for VMs and Pools
|
||||||
|
# The currently supported backing services are:
|
||||||
|
# - vsphere
|
||||||
|
# - dummy
|
||||||
|
|
||||||
# :vsphere:
|
# :vsphere:
|
||||||
#
|
#
|
||||||
# This section contains the server hostname and authentication credentials
|
# This section contains the server hostname and authentication credentials
|
||||||
# needed for vmpooler to connect to VMware vSphere.
|
# needed for vmpooler to connect to VMware vSphere.
|
||||||
#
|
#
|
||||||
|
# NOTE - To support older configuration files, a :vsphere: configuration section
|
||||||
|
# will be copied into :providers:/:vsphere: if one does not already exist.
|
||||||
|
#
|
||||||
# Available configuration parameters:
|
# Available configuration parameters:
|
||||||
#
|
#
|
||||||
# - server
|
# - server
|
||||||
|
|
@ -17,13 +28,109 @@
|
||||||
# - password
|
# - password
|
||||||
# The password used to authenticate VMware vSphere.
|
# The password used to authenticate VMware vSphere.
|
||||||
# (required)
|
# (required)
|
||||||
|
#
|
||||||
|
# - insecure
|
||||||
|
# Whether to ignore any HTTPS negotiation errors (e.g. untrusted self-signed certificates)
|
||||||
|
# (optional: default true)
|
||||||
|
# Example:
|
||||||
|
|
||||||
|
:vsphere:
|
||||||
|
server: 'vsphere.company.com'
|
||||||
|
username: 'vmpooler'
|
||||||
|
password: 'swimsw1msw!m'
|
||||||
|
|
||||||
|
# :dummy:
|
||||||
|
#
|
||||||
|
# The dummy backing service is a simple text file service that can be used
|
||||||
|
# to test vmpooler operations in a development or test environment
|
||||||
|
#
|
||||||
|
# Available configuration parameters:
|
||||||
|
#
|
||||||
|
# - filename (Optional)
|
||||||
|
# The filename used to store the backing text file. If this is not specified the VM state is only
|
||||||
|
# kept in memory, and is lost when the Provider is shutdown
|
||||||
|
#
|
||||||
|
# - connection_pool_size (Optional)
|
||||||
|
# The size of the dummy connection pool. This can be used to simulate constrained provider resources e.g. 200 pools sharing on connection
|
||||||
|
# (optional; default 1)
|
||||||
|
#
|
||||||
|
# - connection_pool_timeout (Optional)
|
||||||
|
# The number of seconds to wait for a connection object from the pool. If the timeout is exceeded an error is raised
|
||||||
|
# (optional; default 10 seconds)
|
||||||
|
#
|
||||||
|
# - migratevm_couldmove_percent
|
||||||
|
# Percent chance that a VM could be moved to another host
|
||||||
|
# (optional; default 0%)
|
||||||
|
#
|
||||||
|
# - migratevm_max_time
|
||||||
|
# Maximum amount of random time a VM migration action will take in seconds
|
||||||
|
# (optional; default 0 seconds)
|
||||||
|
#
|
||||||
|
# - migratevm_fail_percent
|
||||||
|
# Percent chance that a VM migration action will fail
|
||||||
|
# (optional; default 0%)
|
||||||
|
#
|
||||||
|
# - getvm_poweroff_percent
|
||||||
|
# Percent chance that when the VM information is gathered that the VM will be powered off
|
||||||
|
# (optional; default 0%)
|
||||||
|
#
|
||||||
|
# - getvm_rename_percent
|
||||||
|
# Percent chance that when the VM information is gathered that the VM will be renamed
|
||||||
|
# (optional; default 0%)
|
||||||
|
#
|
||||||
|
# - createvm_max_time
|
||||||
|
# Maximum amount of random time a VM creation action will take, in seconds
|
||||||
|
# (optional; default 0 seconds)
|
||||||
|
#
|
||||||
|
# - createvm_fail_percent
|
||||||
|
# Percent chance that a VM creation action will fail
|
||||||
|
# (optional; default 0%)
|
||||||
|
#
|
||||||
|
# - createdisk_max_time
|
||||||
|
# Maximum amount of random time a VM create disk action will take, in seconds
|
||||||
|
# (optional; default 0 seconds)
|
||||||
|
#
|
||||||
|
# - createdisk_fail_percent
|
||||||
|
# Percent chance that a VM create disk action will fail
|
||||||
|
# (optional; default 0%)
|
||||||
|
#
|
||||||
|
# - createsnapshot_max_time
|
||||||
|
# Maximum amount of random time a VM create snapshot action will take, in seconds
|
||||||
|
# (optional; default 0 seconds)
|
||||||
|
#
|
||||||
|
# - createsnapshot_fail_percent
|
||||||
|
# Percent chance that a VM create snapshot action will fail
|
||||||
|
# (optional; default 0%)
|
||||||
|
#
|
||||||
|
# - revertsnapshot_max_time
|
||||||
|
# Maximum amount of random time a VM revert snapshot action will take, in seconds
|
||||||
|
# (optional; default 0 seconds)
|
||||||
|
#
|
||||||
|
# - revertsnapshot_fail_percent
|
||||||
|
# Percent chance that a VM revert snapshot action will fail
|
||||||
|
# (optional; default 0%)
|
||||||
|
#
|
||||||
|
# - destroyvm_max_shutdown_time
|
||||||
|
# Maximum amount of random time a VM shutdown action will take during destroy, in seconds
|
||||||
|
# (optional; default 0 seconds)
|
||||||
|
#
|
||||||
|
# - destroyvm_max_time
|
||||||
|
# Maximum amount of random time a VM destroy action will take, in seconds
|
||||||
|
# (optional; default 0 seconds)
|
||||||
|
#
|
||||||
|
# - destroyvm_fail_percent
|
||||||
|
# Percent chance that a VM destroy action will fail
|
||||||
|
# (optional; default 0%)
|
||||||
|
#
|
||||||
|
# - vmready_fail_percent
|
||||||
|
# Percent chance that an error is raised when vm_ready? is called
|
||||||
|
# (optional; default 0%)
|
||||||
|
|
||||||
# Example:
|
# Example:
|
||||||
|
|
||||||
:vsphere:
|
:dummy:
|
||||||
server: 'vsphere.company.com'
|
filename: '/tmp/dummy-backing.yaml'
|
||||||
username: 'vmpooler'
|
|
||||||
password: 'swimsw1msw!m'
|
|
||||||
|
|
||||||
# :redis:
|
# :redis:
|
||||||
#
|
#
|
||||||
|
|
@ -54,58 +161,58 @@
|
||||||
server: 'redis.company.com'
|
server: 'redis.company.com'
|
||||||
|
|
||||||
|
|
||||||
# :graphs:
|
# :graphs:
|
||||||
#
|
#
|
||||||
# This section contains the server and prefix information for a graphite-
|
# This section contains the server and prefix information for a graphite-
|
||||||
# compatible web front-end where graphs may be viewed. This is used by the
|
# compatible web front-end where graphs may be viewed. This is used by the
|
||||||
# vmpooler dashboard to retrieve statistics and graphs for a given instance.
|
# vmpooler dashboard to retrieve statistics and graphs for a given instance.
|
||||||
#
|
#
|
||||||
# NOTE: This is not the endpoint for publishing metrics data. See `graphite:`
|
# NOTE: This is not the endpoint for publishing metrics data. See `graphite:`
|
||||||
# and `statsd:` below.
|
# and `statsd:` below.
|
||||||
#
|
#
|
||||||
# NOTE: If `graphs:` is not set, for legacy compatibility, `graphite:` will be
|
# NOTE: If `graphs:` is not set, for legacy compatibility, `graphite:` will be
|
||||||
# consulted for `server`/`prefix` information to use in locating a
|
# consulted for `server`/`prefix` information to use in locating a
|
||||||
# graph server for our dashboard. `graphs:` is recommended over
|
# graph server for our dashboard. `graphs:` is recommended over
|
||||||
# `graphite:`
|
# `graphite:`
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
# Available configuration parameters:
|
# Available configuration parameters:
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
# - server
|
# - server
|
||||||
# The FQDN hostname of the statsd daemon.
|
# The FQDN hostname of the statsd daemon.
|
||||||
# (required)
|
# (required)
|
||||||
#
|
#
|
||||||
# - prefix
|
# - prefix
|
||||||
# The prefix to use while storing statsd data.
|
# The prefix to use while storing statsd data.
|
||||||
# (optional; default: 'vmpooler')
|
# (optional; default: 'vmpooler')
|
||||||
|
|
||||||
# :statsd:
|
# :statsd:
|
||||||
#
|
#
|
||||||
# This section contains the connection information required to store
|
# This section contains the connection information required to store
|
||||||
# historical data via statsd. This is mutually exclusive with graphite
|
# historical data via statsd. This is mutually exclusive with graphite
|
||||||
# and takes precedence.
|
# and takes precedence.
|
||||||
#
|
#
|
||||||
# Available configuration parameters:
|
# Available configuration parameters:
|
||||||
#
|
#
|
||||||
# - server
|
# - server
|
||||||
# The FQDN hostname of the statsd daemon.
|
# The FQDN hostname of the statsd daemon.
|
||||||
# (required)
|
# (required)
|
||||||
#
|
#
|
||||||
# - prefix
|
# - prefix
|
||||||
# The prefix to use while storing statsd data.
|
# The prefix to use while storing statsd data.
|
||||||
# (optional; default: 'vmpooler')
|
# (optional; default: 'vmpooler')
|
||||||
#
|
#
|
||||||
# - port
|
# - port
|
||||||
# The UDP port to communicate with the statsd daemon.
|
# The UDP port to communicate with the statsd daemon.
|
||||||
# (optional; default: 8125)
|
# (optional; default: 8125)
|
||||||
|
|
||||||
# Example:
|
# Example:
|
||||||
|
|
||||||
:statsd:
|
:statsd:
|
||||||
server: 'statsd.company.com'
|
server: 'statsd.company.com'
|
||||||
prefix: 'vmpooler'
|
prefix: 'vmpooler'
|
||||||
port: 8125
|
port: 8125
|
||||||
|
|
||||||
# :graphite:
|
# :graphite:
|
||||||
#
|
#
|
||||||
|
|
@ -217,7 +324,7 @@
|
||||||
# (optional; default: same cluster/host as origin template)
|
# (optional; default: same cluster/host as origin template)
|
||||||
#
|
#
|
||||||
# - task_limit
|
# - task_limit
|
||||||
# The number of concurrent VMware vSphere tasks to perform.
|
# The number of concurrent VM creation tasks to perform.
|
||||||
# (optional; default: '10')
|
# (optional; default: '10')
|
||||||
#
|
#
|
||||||
# - timeout
|
# - timeout
|
||||||
|
|
@ -249,21 +356,23 @@
|
||||||
#
|
#
|
||||||
# - migration_limit
|
# - migration_limit
|
||||||
# When set to any value greater than 0 enable VM migration at checkout.
|
# When set to any value greater than 0 enable VM migration at checkout.
|
||||||
# When enabled this capability will evaluate a VM for migration when it is requested
|
# When enabled this capability will evaluate a VM for migration to a different host when it is requested
|
||||||
# in an effort to maintain a more even distribution of load across compute resources.
|
# in an effort to maintain a more even distribution of load across compute resources.
|
||||||
# The migration_limit ensures that no more than n migrations will be evaluated at any one time
|
# The migration_limit ensures that no more than the specified migrations will be evaluated at any one time
|
||||||
# and greatly reduces the possibilty of VMs ending up bunched together on a particular host.
|
# and greatly reduces the possibilty of VMs ending up bunched together on a particular host.
|
||||||
#
|
#
|
||||||
# - max_tries
|
# - max_tries
|
||||||
# Set the max number of times a connection should retry in vsphere helper.
|
# Set the max number of times a connection should retry in VM providers.
|
||||||
# This optional setting allows a user to dial in retry limits to
|
# This optional setting allows a user to dial in retry limits to
|
||||||
# suit your environment.
|
# suit your environment.
|
||||||
|
# (optional; default: 3)
|
||||||
#
|
#
|
||||||
# - retry_factor
|
# - retry_factor
|
||||||
# When retrying, each attempt sleeps for the try count * retry_factor.
|
# When retrying, each attempt sleeps for the try count * retry_factor.
|
||||||
# Increase this number to lengthen the delay between retry attempts.
|
# Increase this number to lengthen the delay between retry attempts.
|
||||||
# This is particularly useful for instances with a large number of pools
|
# This is particularly useful for instances with a large number of pools
|
||||||
# to prevent a thundering herd when retrying connections.
|
# to prevent a thundering herd when retrying connections.
|
||||||
|
# (optional; default: 10)
|
||||||
|
|
||||||
# Example:
|
# Example:
|
||||||
|
|
||||||
|
|
@ -300,18 +409,15 @@
|
||||||
# The template or virtual machine target to spawn clones from.
|
# The template or virtual machine target to spawn clones from.
|
||||||
# (required)
|
# (required)
|
||||||
#
|
#
|
||||||
# - folder
|
|
||||||
# The vSphere 'folder' destination for spawned clones.
|
|
||||||
# (required)
|
|
||||||
#
|
|
||||||
# - datastore
|
|
||||||
# The vSphere 'datastore' destination for spawned clones.
|
|
||||||
# (required)
|
|
||||||
#
|
|
||||||
# - size
|
# - size
|
||||||
# The number of waiting VMs to keep in a pool.
|
# The number of waiting VMs to keep in a pool.
|
||||||
# (required)
|
# (required)
|
||||||
#
|
#
|
||||||
|
# - provider
|
||||||
|
# The name of the VM provider which manage this pool. This should match
|
||||||
|
# a name in the :providers: section above e.g. vsphere
|
||||||
|
# (required; will default to vsphere for backwards compatibility)
|
||||||
|
#
|
||||||
# - clone_target
|
# - clone_target
|
||||||
# Per-pool option to override the global 'clone_target' cluster.
|
# Per-pool option to override the global 'clone_target' cluster.
|
||||||
# (optional)
|
# (optional)
|
||||||
|
|
@ -323,8 +429,18 @@
|
||||||
#
|
#
|
||||||
# - ready_ttl
|
# - ready_ttl
|
||||||
# How long (in minutes) to keep VMs in 'ready' queues before destroying.
|
# How long (in minutes) to keep VMs in 'ready' queues before destroying.
|
||||||
# (optional)
|
# (optional; default: no limit)
|
||||||
|
#
|
||||||
|
# Provider specific pool settings
|
||||||
|
# vSphere provider
|
||||||
|
# - folder
|
||||||
|
# The vSphere 'folder' destination for spawned clones.
|
||||||
|
# (required)
|
||||||
|
#
|
||||||
|
# - datastore
|
||||||
|
# The vSphere 'datastore' destination for spawned clones.
|
||||||
|
# (required)
|
||||||
|
#
|
||||||
# Example:
|
# Example:
|
||||||
|
|
||||||
:pools:
|
:pools:
|
||||||
|
|
@ -336,6 +452,7 @@
|
||||||
size: 5
|
size: 5
|
||||||
timeout: 15
|
timeout: 15
|
||||||
ready_ttl: 1440
|
ready_ttl: 1440
|
||||||
|
provider: vsphere
|
||||||
- name: 'debian-7-x86_64'
|
- name: 'debian-7-x86_64'
|
||||||
alias: [ 'debian-7-64', 'debian-7-amd64' ]
|
alias: [ 'debian-7-64', 'debian-7-amd64' ]
|
||||||
template: 'Templates/debian-7-x86_64'
|
template: 'Templates/debian-7-x86_64'
|
||||||
|
|
@ -344,3 +461,4 @@
|
||||||
size: 5
|
size: 5
|
||||||
timeout: 15
|
timeout: 15
|
||||||
ready_ttl: 1440
|
ready_ttl: 1440
|
||||||
|
provider: vsphere
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue