mirror of
https://github.com/puppetlabs/vmpooler.git
synced 2026-01-26 01:58:41 -05:00
Fix Rubocop "safe" auto-corrections
Generated using `bundle exec rubocop --safe --auto-correct`
This commit is contained in:
parent
252a2c2344
commit
29519006fa
14 changed files with 327 additions and 302 deletions
|
|
@ -58,7 +58,7 @@ module Vmpooler
|
|||
parsed_config[:config]['task_limit'] = string_to_int(ENV['TASK_LIMIT']) || parsed_config[:config]['task_limit'] || 10
|
||||
parsed_config[:config]['migration_limit'] = string_to_int(ENV['MIGRATION_LIMIT']) if ENV['MIGRATION_LIMIT']
|
||||
parsed_config[:config]['vm_checktime'] = string_to_int(ENV['VM_CHECKTIME']) || parsed_config[:config]['vm_checktime'] || 1
|
||||
parsed_config[:config]['vm_lifetime'] = string_to_int(ENV['VM_LIFETIME']) || parsed_config[:config]['vm_lifetime'] || 24
|
||||
parsed_config[:config]['vm_lifetime'] = string_to_int(ENV['VM_LIFETIME']) || parsed_config[:config]['vm_lifetime'] || 24
|
||||
parsed_config[:config]['prefix'] = ENV['PREFIX'] || parsed_config[:config]['prefix'] || ''
|
||||
|
||||
parsed_config[:config]['logfile'] = ENV['LOGFILE'] if ENV['LOGFILE']
|
||||
|
|
@ -130,10 +130,8 @@ module Vmpooler
|
|||
end
|
||||
end
|
||||
|
||||
if parsed_config[:tagfilter]
|
||||
parsed_config[:tagfilter].keys.each do |tag|
|
||||
parsed_config[:tagfilter][tag] = Regexp.new(parsed_config[:tagfilter][tag])
|
||||
end
|
||||
parsed_config[:tagfilter]&.keys&.each do |tag|
|
||||
parsed_config[:tagfilter][tag] = Regexp.new(parsed_config[:tagfilter][tag])
|
||||
end
|
||||
|
||||
parsed_config[:uptime] = Time.now
|
||||
|
|
@ -179,7 +177,7 @@ module Vmpooler
|
|||
def self.pool_index(pools)
|
||||
pools_hash = {}
|
||||
index = 0
|
||||
for pool in pools
|
||||
pools.each do |pool|
|
||||
pools_hash[pool['name']] = index
|
||||
index += 1
|
||||
end
|
||||
|
|
@ -190,11 +188,12 @@ module Vmpooler
|
|||
# Returns a integer if input is a string
|
||||
return if s.nil?
|
||||
return unless s =~ /\d/
|
||||
return Integer(s)
|
||||
|
||||
Integer(s)
|
||||
end
|
||||
|
||||
def self.true?(obj)
|
||||
obj.to_s.downcase == "true"
|
||||
obj.to_s.downcase == 'true'
|
||||
end
|
||||
|
||||
def self.set_linked_clone(parsed_config)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
module Vmpooler
|
||||
class API
|
||||
class Dashboard < Sinatra::Base
|
||||
|
||||
helpers do
|
||||
include Vmpooler::API::Helpers
|
||||
end
|
||||
|
|
@ -21,9 +20,11 @@ module Vmpooler
|
|||
|
||||
if config[:graphs]
|
||||
return false unless config[:graphs]['server']
|
||||
|
||||
@graph_server = config[:graphs]['server']
|
||||
elsif config[:graphite]
|
||||
return false unless config[:graphite]['server']
|
||||
|
||||
@graph_server = config[:graphite]['server']
|
||||
else
|
||||
false
|
||||
|
|
@ -36,9 +37,11 @@ module Vmpooler
|
|||
|
||||
if config[:graphs]
|
||||
return 'vmpooler' unless config[:graphs]['prefix']
|
||||
|
||||
@graph_prefix = config[:graphs]['prefix']
|
||||
elsif config[:graphite]
|
||||
return false unless config[:graphite]['prefix']
|
||||
|
||||
@graph_prefix = config[:graphite]['prefix']
|
||||
else
|
||||
false
|
||||
|
|
@ -48,12 +51,14 @@ module Vmpooler
|
|||
# what is the base URL for viewable graphs?
|
||||
def graph_url
|
||||
return false unless graph_server && graph_prefix
|
||||
|
||||
@graph_url ||= "http://#{graph_server}/render?target=#{graph_prefix}"
|
||||
end
|
||||
|
||||
# return a full URL to a viewable graph for a given metrics target (graphite syntax)
|
||||
def graph_link(target = '')
|
||||
return '' unless graph_url
|
||||
|
||||
graph_url + target
|
||||
end
|
||||
|
||||
|
|
@ -100,7 +105,7 @@ module Vmpooler
|
|||
end
|
||||
end
|
||||
end
|
||||
rescue
|
||||
rescue StandardError
|
||||
end
|
||||
else
|
||||
pools.each do |pool|
|
||||
|
|
@ -147,7 +152,7 @@ module Vmpooler
|
|||
end
|
||||
end
|
||||
end
|
||||
rescue
|
||||
rescue StandardError
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -71,6 +71,7 @@ module Vmpooler
|
|||
)
|
||||
|
||||
return true if ldap.bind
|
||||
|
||||
return false
|
||||
end
|
||||
|
||||
|
|
@ -90,7 +91,7 @@ module Vmpooler
|
|||
auth[:ldap]['user_object'],
|
||||
search_base,
|
||||
username_str,
|
||||
password_str,
|
||||
password_str
|
||||
)
|
||||
return true if result == true
|
||||
end
|
||||
|
|
@ -101,7 +102,7 @@ module Vmpooler
|
|||
auth[:ldap]['user_object'],
|
||||
ldap_base,
|
||||
username_str,
|
||||
password_str,
|
||||
password_str
|
||||
)
|
||||
return result
|
||||
end
|
||||
|
|
@ -124,6 +125,7 @@ module Vmpooler
|
|||
|
||||
tags.each_pair do |tag, value|
|
||||
next unless filter = Vmpooler::API.settings.config[:tagfilter][tag]
|
||||
|
||||
tags[tag] = value.match(filter).captures.join if value.match(filter)
|
||||
end
|
||||
|
||||
|
|
@ -161,7 +163,7 @@ module Vmpooler
|
|||
backend.scard(key + pool['name'])
|
||||
end
|
||||
end
|
||||
res.inject(0){ |m, x| m+x }.to_i
|
||||
res.inject(0) { |m, x| m + x }.to_i
|
||||
end
|
||||
|
||||
# Takes the pools and a key to run scard on
|
||||
|
|
@ -201,12 +203,12 @@ module Vmpooler
|
|||
def get_capacity_metrics(pools, backend)
|
||||
capacity = {
|
||||
current: 0,
|
||||
total: 0,
|
||||
total: 0,
|
||||
percent: 0
|
||||
}
|
||||
|
||||
pools.each do |pool|
|
||||
capacity[:total] += pool['size'].to_i
|
||||
capacity[:total] += pool['size'].to_i
|
||||
end
|
||||
|
||||
capacity[:current] = get_total_across_pools_redis_scard(pools, 'vmpooler__ready__', backend)
|
||||
|
|
@ -220,16 +222,16 @@ module Vmpooler
|
|||
|
||||
def get_queue_metrics(pools, backend)
|
||||
queue = {
|
||||
pending: 0,
|
||||
cloning: 0,
|
||||
booting: 0,
|
||||
ready: 0,
|
||||
running: 0,
|
||||
pending: 0,
|
||||
cloning: 0,
|
||||
booting: 0,
|
||||
ready: 0,
|
||||
running: 0,
|
||||
completed: 0,
|
||||
total: 0
|
||||
total: 0
|
||||
}
|
||||
|
||||
queue[:pending] = get_total_across_pools_redis_scard(pools,'vmpooler__pending__', backend)
|
||||
queue[:pending] = get_total_across_pools_redis_scard(pools, 'vmpooler__pending__', backend)
|
||||
queue[:ready] = get_total_across_pools_redis_scard(pools, 'vmpooler__ready__', backend)
|
||||
queue[:running] = get_total_across_pools_redis_scard(pools, 'vmpooler__running__', backend)
|
||||
queue[:completed] = get_total_across_pools_redis_scard(pools, 'vmpooler__completed__', backend)
|
||||
|
|
@ -306,11 +308,11 @@ module Vmpooler
|
|||
task = {
|
||||
duration: {
|
||||
average: 0,
|
||||
min: 0,
|
||||
max: 0,
|
||||
total: 0
|
||||
min: 0,
|
||||
max: 0,
|
||||
total: 0
|
||||
},
|
||||
count: {
|
||||
count: {
|
||||
total: 0
|
||||
}
|
||||
}
|
||||
|
|
@ -450,7 +452,7 @@ module Vmpooler
|
|||
def pool_index(pools)
|
||||
pools_hash = {}
|
||||
index = 0
|
||||
for pool in pools
|
||||
pools.each do |pool|
|
||||
pools_hash[pool['name']] = index
|
||||
index += 1
|
||||
end
|
||||
|
|
@ -461,13 +463,14 @@ module Vmpooler
|
|||
prepared_template = backend.hget('vmpooler__template__prepared', pool['name'])
|
||||
return false if prepared_template.nil?
|
||||
return true if pool['template'] == prepared_template
|
||||
|
||||
return false
|
||||
end
|
||||
|
||||
def is_integer?(x)
|
||||
Integer(x)
|
||||
true
|
||||
rescue
|
||||
rescue StandardError
|
||||
false
|
||||
end
|
||||
|
||||
|
|
@ -487,7 +490,7 @@ module Vmpooler
|
|||
def vm_ready?(vm_name, domain = nil)
|
||||
begin
|
||||
open_socket(vm_name, domain)
|
||||
rescue => _err
|
||||
rescue StandardError => _e
|
||||
return false
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -37,22 +37,24 @@ module Vmpooler
|
|||
end
|
||||
|
||||
def checkoutlock
|
||||
Vmpooler::API::settings.checkoutlock
|
||||
Vmpooler::API.settings.checkoutlock
|
||||
end
|
||||
|
||||
def fetch_single_vm(template)
|
||||
template_backends = [template]
|
||||
aliases = Vmpooler::API.settings.config[:alias]
|
||||
if aliases
|
||||
template_backends = template_backends + aliases[template] if aliases[template].is_a?(Array)
|
||||
template_backends += aliases[template] if aliases[template].is_a?(Array)
|
||||
template_backends << aliases[template] if aliases[template].is_a?(String)
|
||||
pool_index = pool_index(pools)
|
||||
weighted_pools = {}
|
||||
template_backends.each do |t|
|
||||
next unless pool_index.key? t
|
||||
|
||||
index = pool_index[t]
|
||||
clone_target = pools[index]['clone_target'] || config['clone_target']
|
||||
next unless config.key?('backend_weight')
|
||||
|
||||
weight = config['backend_weight'][clone_target]
|
||||
if weight
|
||||
weighted_pools[t] = weight
|
||||
|
|
@ -75,6 +77,7 @@ module Vmpooler
|
|||
template_backends.each do |template_backend|
|
||||
vms = backend.smembers("vmpooler__ready__#{template_backend}")
|
||||
next if vms.empty?
|
||||
|
||||
vms.reverse.each do |vm|
|
||||
ready = vm_ready?(vm, config['domain'])
|
||||
if ready
|
||||
|
|
@ -104,7 +107,7 @@ module Vmpooler
|
|||
|
||||
backend.hset('vmpooler__vm__' + vm, 'token:token', request.env['HTTP_X_AUTH_TOKEN'])
|
||||
backend.hset('vmpooler__vm__' + vm, 'token:user',
|
||||
backend.hget('vmpooler__token__' + request.env['HTTP_X_AUTH_TOKEN'], 'user')
|
||||
backend.hget('vmpooler__token__' + request.env['HTTP_X_AUTH_TOKEN'], 'user')
|
||||
)
|
||||
|
||||
if config['vm_lifetime_auth'].to_i > 0
|
||||
|
|
@ -136,14 +139,14 @@ module Vmpooler
|
|||
metrics.increment('checkout.empty.' + requested)
|
||||
break
|
||||
else
|
||||
vms << [ vmpool, vmname, vmtemplate ]
|
||||
vms << [vmpool, vmname, vmtemplate]
|
||||
metrics.increment('checkout.success.' + vmtemplate)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if failed
|
||||
vms.each do |(vmpool, vmname, vmtemplate)|
|
||||
vms.each do |(vmpool, vmname, _vmtemplate)|
|
||||
return_vm_to_ready_state(vmpool, vmname)
|
||||
end
|
||||
status 503
|
||||
|
|
@ -203,7 +206,7 @@ module Vmpooler
|
|||
def reset_pool(payload)
|
||||
result = { 'ok' => false }
|
||||
|
||||
payload.each do |poolname, count|
|
||||
payload.each do |poolname, _count|
|
||||
backend.sadd('vmpooler__poolreset', poolname)
|
||||
end
|
||||
status 201
|
||||
|
|
@ -234,42 +237,36 @@ module Vmpooler
|
|||
def sync_pool_templates
|
||||
pool_index = pool_index(pools)
|
||||
template_configs = backend.hgetall('vmpooler__config__template')
|
||||
unless template_configs.nil?
|
||||
template_configs.each do |poolname, template|
|
||||
template_configs&.each do |poolname, template|
|
||||
if pool_index.include? poolname
|
||||
unless pools[pool_index[poolname]]['template'] == template
|
||||
pools[pool_index[poolname]]['template'] = template
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def sync_pool_sizes
|
||||
pool_index = pool_index(pools)
|
||||
poolsize_configs = backend.hgetall('vmpooler__config__poolsize')
|
||||
unless poolsize_configs.nil?
|
||||
poolsize_configs.each do |poolname, size|
|
||||
poolsize_configs&.each do |poolname, size|
|
||||
if pool_index.include? poolname
|
||||
unless pools[pool_index[poolname]]['size'] == size.to_i
|
||||
pools[pool_index[poolname]]['size'] == size.to_i
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def sync_clone_targets
|
||||
pool_index = pool_index(pools)
|
||||
clone_target_configs = backend.hgetall('vmpooler__config__clone_target')
|
||||
unless clone_target_configs.nil?
|
||||
clone_target_configs.each do |poolname, clone_target|
|
||||
clone_target_configs&.each do |poolname, clone_target|
|
||||
if pool_index.include? poolname
|
||||
unless pools[pool_index[poolname]]['clone_target'] == clone_target
|
||||
pools[pool_index[poolname]]['clone_target'] == clone_target
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -368,36 +365,38 @@ module Vmpooler
|
|||
pending_hash = get_list_across_pools_redis_scard(pools, 'vmpooler__pending__', backend)
|
||||
lastBoot_hash = get_list_across_pools_redis_hget(pools, 'vmpooler__lastboot', backend)
|
||||
|
||||
pools.each do |pool|
|
||||
# REMIND: move this out of the API and into the back-end
|
||||
ready = ready_hash[pool['name']]
|
||||
running = running_hash[pool['name']]
|
||||
pending = pending_hash[pool['name']]
|
||||
max = pool['size']
|
||||
lastBoot = lastBoot_hash[pool['name']]
|
||||
aka = pool['alias']
|
||||
unless views and not views.include?("pools")
|
||||
pools.each do |pool|
|
||||
# REMIND: move this out of the API and into the back-end
|
||||
ready = ready_hash[pool['name']]
|
||||
running = running_hash[pool['name']]
|
||||
pending = pending_hash[pool['name']]
|
||||
max = pool['size']
|
||||
lastBoot = lastBoot_hash[pool['name']]
|
||||
aka = pool['alias']
|
||||
|
||||
result[:pools][pool['name']] = {
|
||||
ready: ready,
|
||||
running: running,
|
||||
pending: pending,
|
||||
max: max,
|
||||
lastBoot: lastBoot
|
||||
}
|
||||
result[:pools][pool['name']] = {
|
||||
ready: ready,
|
||||
running: running,
|
||||
pending: pending,
|
||||
max: max,
|
||||
lastBoot: lastBoot
|
||||
}
|
||||
|
||||
if aka
|
||||
result[:pools][pool['name']][:alias] = aka
|
||||
if aka
|
||||
result[:pools][pool['name']][:alias] = aka
|
||||
end
|
||||
|
||||
# for backwards compatibility, include separate "empty" stats in "status" block
|
||||
if ready == 0
|
||||
result[:status][:empty] ||= []
|
||||
result[:status][:empty].push(pool['name'])
|
||||
|
||||
result[:status][:ok] = false
|
||||
result[:status][:message] = "Found #{result[:status][:empty].length} empty pools."
|
||||
end
|
||||
end
|
||||
|
||||
# for backwards compatibility, include separate "empty" stats in "status" block
|
||||
if ready == 0
|
||||
result[:status][:empty] ||= []
|
||||
result[:status][:empty].push(pool['name'])
|
||||
|
||||
result[:status][:ok] = false
|
||||
result[:status][:message] = "Found #{result[:status][:empty].length} empty pools."
|
||||
end
|
||||
end unless views and not views.include?("pools")
|
||||
end
|
||||
|
||||
result[:status][:uptime] = (Time.now - Vmpooler::API.settings.config[:uptime]).round(1) if Vmpooler::API.settings.config[:uptime]
|
||||
|
||||
|
|
@ -442,7 +441,6 @@ module Vmpooler
|
|||
if aka
|
||||
result[:pools][pool['name']][:alias] = aka
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
ready_hash = get_list_across_pools_redis_scard(poolscopy, 'vmpooler__ready__', backend)
|
||||
|
|
@ -456,7 +454,7 @@ module Vmpooler
|
|||
get "#{api_prefix}/totalrunning/?" do
|
||||
content_type :json
|
||||
queue = {
|
||||
running: 0,
|
||||
running: 0
|
||||
}
|
||||
|
||||
queue[:running] = get_total_across_pools_redis_scard(pools, 'vmpooler__running__', backend)
|
||||
|
|
@ -753,7 +751,7 @@ module Vmpooler
|
|||
|
||||
def invalid_pool(payload)
|
||||
invalid = []
|
||||
payload.each do |pool, clone_target|
|
||||
payload.each do |pool, _clone_target|
|
||||
invalid << pool unless pool_exists?(pool)
|
||||
end
|
||||
invalid
|
||||
|
|
@ -837,7 +835,7 @@ module Vmpooler
|
|||
# Look up IP address of the hostname
|
||||
begin
|
||||
ipAddress = TCPSocket.gethostbyname(params[:hostname])[3]
|
||||
rescue
|
||||
rescue StandardError
|
||||
ipAddress = ""
|
||||
end
|
||||
|
||||
|
|
@ -893,7 +891,7 @@ module Vmpooler
|
|||
if backend.exists('vmpooler__vm__' + params[:hostname])
|
||||
begin
|
||||
jdata = JSON.parse(request.body.read)
|
||||
rescue
|
||||
rescue StandardError
|
||||
halt 400, JSON.pretty_generate(result)
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
module Vmpooler
|
||||
class Dashboard < Sinatra::Base
|
||||
|
||||
def config
|
||||
Vmpooler.config
|
||||
end
|
||||
|
|
|
|||
|
|
@ -20,15 +20,15 @@ module Vmpooler
|
|||
start = Time.now
|
||||
conn = checkout(options)
|
||||
timespan_ms = ((Time.now - start) * 1000).to_i
|
||||
@metrics.gauge(@metric_prefix + '.available', @available.length) unless @metrics.nil?
|
||||
@metrics.timing(@metric_prefix + '.waited', timespan_ms) unless @metrics.nil?
|
||||
@metrics&.gauge(@metric_prefix + '.available', @available.length)
|
||||
@metrics&.timing(@metric_prefix + '.waited', timespan_ms)
|
||||
begin
|
||||
Thread.handle_interrupt(Exception => :immediate) do
|
||||
yield conn
|
||||
end
|
||||
ensure
|
||||
checkin
|
||||
@metrics.gauge(@metric_prefix + '.available', @available.length) unless @metrics.nil?
|
||||
@metrics&.gauge(@metric_prefix + '.available', @available.length)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -38,13 +38,13 @@ module Vmpooler
|
|||
start = Time.now
|
||||
conn = checkout(options)
|
||||
timespan_ms = ((Time.now - start) * 1000).to_i
|
||||
@metrics.gauge(@metric_prefix + '.available', @available.length) unless @metrics.nil?
|
||||
@metrics.timing(@metric_prefix + '.waited', timespan_ms) unless @metrics.nil?
|
||||
@metrics&.gauge(@metric_prefix + '.available', @available.length)
|
||||
@metrics&.timing(@metric_prefix + '.waited', timespan_ms)
|
||||
begin
|
||||
yield conn
|
||||
ensure
|
||||
checkin
|
||||
@metrics.gauge(@metric_prefix + '.available', @available.length) unless @metrics.nil?
|
||||
@metrics&.gauge(@metric_prefix + '.available', @available.length)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -5,9 +5,7 @@ module Vmpooler
|
|||
attr_reader :server, :port, :prefix
|
||||
|
||||
def initialize(params = {})
|
||||
if params['server'].nil? || params['server'].empty?
|
||||
raise ArgumentError, "Graphite server is required. Config: #{params.inspect}"
|
||||
end
|
||||
raise ArgumentError, "Graphite server is required. Config: #{params.inspect}" if params['server'].nil? || params['server'].empty?
|
||||
|
||||
@server = params['server']
|
||||
@port = params['port'] || 2003
|
||||
|
|
@ -35,8 +33,8 @@ module Vmpooler
|
|||
socket.close
|
||||
end
|
||||
end
|
||||
rescue => err
|
||||
$stderr.puts "Failure logging #{path} to graphite server [#{server}:#{port}]: #{err}"
|
||||
rescue StandardError => e
|
||||
warn "Failure logging #{path} to graphite server [#{server}:#{port}]: #{e}"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ module Vmpooler
|
|||
$redis.del("vmpooler__pool__#{pool}")
|
||||
end
|
||||
end
|
||||
return
|
||||
nil
|
||||
end
|
||||
|
||||
# Check the state of a VM
|
||||
|
|
@ -71,8 +71,8 @@ module Vmpooler
|
|||
Thread.new do
|
||||
begin
|
||||
_check_pending_vm(vm, pool, timeout, provider)
|
||||
rescue => err
|
||||
$logger.log('s', "[!] [#{pool}] '#{vm}' #{timeout} #{provider} errored while checking a pending vm : #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "[!] [#{pool}] '#{vm}' #{timeout} #{provider} errored while checking a pending vm : #{e}")
|
||||
fail_pending_vm(vm, pool, timeout)
|
||||
raise
|
||||
end
|
||||
|
|
@ -82,6 +82,7 @@ module Vmpooler
|
|||
def _check_pending_vm(vm, pool, timeout, provider)
|
||||
mutex = vm_mutex(vm)
|
||||
return if mutex.locked?
|
||||
|
||||
mutex.synchronize do
|
||||
if provider.vm_ready?(pool, vm)
|
||||
move_pending_vm_to_ready(vm, pool)
|
||||
|
|
@ -111,8 +112,8 @@ module Vmpooler
|
|||
end
|
||||
end
|
||||
true
|
||||
rescue => err
|
||||
$logger.log('d', "Fail pending VM failed with an error: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('d', "Fail pending VM failed with an error: #{e}")
|
||||
false
|
||||
end
|
||||
|
||||
|
|
@ -134,8 +135,9 @@ module Vmpooler
|
|||
def vm_still_ready?(pool_name, vm_name, provider)
|
||||
# Check if the VM is still ready/available
|
||||
return true if provider.vm_ready?(pool_name, vm_name)
|
||||
|
||||
raise("VM #{vm_name} is not ready")
|
||||
rescue
|
||||
rescue StandardError
|
||||
move_vm_queue(pool_name, vm_name, 'ready', 'completed', "is unreachable, removed from 'ready' queue")
|
||||
end
|
||||
|
||||
|
|
@ -143,8 +145,8 @@ module Vmpooler
|
|||
Thread.new do
|
||||
begin
|
||||
_check_ready_vm(vm, pool_name, ttl, provider)
|
||||
rescue => err
|
||||
$logger.log('s', "[!] [#{pool_name}] '#{vm}' failed while checking a ready vm : #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "[!] [#{pool_name}] '#{vm}' failed while checking a ready vm : #{e}")
|
||||
raise
|
||||
end
|
||||
end
|
||||
|
|
@ -154,6 +156,7 @@ module Vmpooler
|
|||
# Periodically check that the VM is available
|
||||
mutex = vm_mutex(vm)
|
||||
return if mutex.locked?
|
||||
|
||||
mutex.synchronize do
|
||||
check_stamp = $redis.hget('vmpooler__vm__' + vm, 'check')
|
||||
return if check_stamp && (((Time.now - Time.parse(check_stamp)) / 60) <= $config[:config]['vm_checktime'])
|
||||
|
|
@ -201,22 +204,24 @@ module Vmpooler
|
|||
# Check if the hostname has magically changed from underneath Pooler
|
||||
vm_hash = provider.get_vm(pool_name, vm)
|
||||
return unless vm_hash.is_a? Hash
|
||||
|
||||
hostname = vm_hash['hostname']
|
||||
|
||||
return if hostname.nil?
|
||||
return if hostname.empty?
|
||||
return if hostname == vm
|
||||
|
||||
$redis.smove('vmpooler__ready__' + pool_name, 'vmpooler__completed__' + pool_name, vm)
|
||||
$logger.log('d', "[!] [#{pool_name}] '#{vm}' has mismatched hostname #{hostname}, removed from 'ready' queue")
|
||||
return true
|
||||
true
|
||||
end
|
||||
|
||||
def check_running_vm(vm, pool, ttl, provider)
|
||||
Thread.new do
|
||||
begin
|
||||
_check_running_vm(vm, pool, ttl, provider)
|
||||
rescue => err
|
||||
$logger.log('s', "[!] [#{pool}] '#{vm}' failed while checking VM with an error: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "[!] [#{pool}] '#{vm}' failed while checking VM with an error: #{e}")
|
||||
raise
|
||||
end
|
||||
end
|
||||
|
|
@ -225,6 +230,7 @@ module Vmpooler
|
|||
def _check_running_vm(vm, pool, ttl, provider)
|
||||
mutex = vm_mutex(vm)
|
||||
return if mutex.locked?
|
||||
|
||||
mutex.synchronize do
|
||||
# Check that VM is within defined lifetime
|
||||
checkouttime = $redis.hget('vmpooler__active__' + pool, vm)
|
||||
|
|
@ -245,7 +251,7 @@ module Vmpooler
|
|||
if host
|
||||
return
|
||||
else
|
||||
move_vm_queue(pool, vm, 'running', 'completed', "is no longer in inventory, removing from running")
|
||||
move_vm_queue(pool, vm, 'running', 'completed', 'is no longer in inventory, removing from running')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -261,14 +267,14 @@ module Vmpooler
|
|||
Thread.new do
|
||||
begin
|
||||
_clone_vm(pool_name, provider)
|
||||
rescue => err
|
||||
$logger.log('s', "[!] [#{pool_name}] failed while cloning VM with an error: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "[!] [#{pool_name}] failed while cloning VM with an error: #{e}")
|
||||
raise
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def generate_and_check_hostname(pool_name)
|
||||
def generate_and_check_hostname(_pool_name)
|
||||
# Generate a randomized hostname. The total name must no longer than 15
|
||||
# character including the hyphen. The shortest adjective in the corpus is
|
||||
# three characters long. Therefore, we can technically select a noun up to 11
|
||||
|
|
@ -278,12 +284,12 @@ module Vmpooler
|
|||
# inviting more conflicts. We favor selecting a longer noun rather than a
|
||||
# longer adjective because longer adjectives tend to be less fun.
|
||||
noun = @name_generator.noun(max: 10)
|
||||
adjective = @name_generator.adjective(max: 14-noun.length)
|
||||
adjective = @name_generator.adjective(max: 14 - noun.length)
|
||||
random_name = [adjective, noun].join('-')
|
||||
hostname = $config[:config]['prefix'] + random_name
|
||||
available = $redis.hlen('vmpooler__vm__' + hostname) == 0
|
||||
|
||||
return hostname, available
|
||||
[hostname, available]
|
||||
end
|
||||
|
||||
def find_unique_hostname(pool_name)
|
||||
|
|
@ -297,9 +303,9 @@ module Vmpooler
|
|||
$metrics.increment("errors.duplicatehostname.#{pool_name}")
|
||||
$logger.log('s', "[!] [#{pool_name}] Generated hostname #{hostname} was not unique (attempt \##{hostname_retries} of #{max_hostname_retries})")
|
||||
end
|
||||
|
||||
|
||||
raise "Unable to generate a unique hostname after #{hostname_retries} attempts. The last hostname checked was #{hostname}" unless available
|
||||
|
||||
|
||||
hostname
|
||||
end
|
||||
|
||||
|
|
@ -322,11 +328,11 @@ module Vmpooler
|
|||
$logger.log('s', "[+] [#{pool_name}] '#{new_vmname}' cloned in #{finish} seconds")
|
||||
|
||||
$metrics.timing("clone.#{pool_name}", finish)
|
||||
rescue => _err
|
||||
rescue StandardError => _e
|
||||
$redis.srem("vmpooler__pending__#{pool_name}", new_vmname)
|
||||
expiration_ttl = $config[:redis]['data_ttl'].to_i * 60 * 60
|
||||
$redis.expire("vmpooler__vm__#{new_vmname}", expiration_ttl)
|
||||
raise _err
|
||||
raise _e
|
||||
ensure
|
||||
$redis.decr('vmpooler__tasks__clone')
|
||||
end
|
||||
|
|
@ -337,8 +343,8 @@ module Vmpooler
|
|||
Thread.new do
|
||||
begin
|
||||
_destroy_vm(vm, pool, provider)
|
||||
rescue => err
|
||||
$logger.log('d', "[!] [#{pool}] '#{vm}' failed while destroying the VM with an error: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('d', "[!] [#{pool}] '#{vm}' failed while destroying the VM with an error: #{e}")
|
||||
raise
|
||||
end
|
||||
end
|
||||
|
|
@ -347,6 +353,7 @@ module Vmpooler
|
|||
def _destroy_vm(vm, pool, provider)
|
||||
mutex = vm_mutex(vm)
|
||||
return if mutex.locked?
|
||||
|
||||
mutex.synchronize do
|
||||
$redis.hdel('vmpooler__active__' + pool, vm)
|
||||
$redis.hset('vmpooler__vm__' + vm, 'destroy', Time.now)
|
||||
|
|
@ -370,11 +377,13 @@ module Vmpooler
|
|||
|
||||
def get_vm_usage_labels(vm)
|
||||
return unless $config[:config]['usage_stats']
|
||||
|
||||
checkout = $redis.hget("vmpooler__vm__#{vm}", 'checkout')
|
||||
return if checkout.nil?
|
||||
|
||||
jenkins_build_url = $redis.hget("vmpooler__vm__#{vm}", 'tag:jenkins_build_url')
|
||||
user = $redis.hget("vmpooler__vm__#{vm}", 'token:user') || 'unauthenticated'
|
||||
poolname = $redis.hget("vmpooler__vm__#{vm}", "template")
|
||||
poolname = $redis.hget("vmpooler__vm__#{vm}", 'template')
|
||||
|
||||
unless jenkins_build_url
|
||||
user = user.gsub('.', '_')
|
||||
|
|
@ -404,25 +413,24 @@ module Vmpooler
|
|||
poolname
|
||||
]
|
||||
|
||||
metric_parts = metric_parts.reject { |s| s.nil? }
|
||||
metric_parts = metric_parts.reject(&:nil?)
|
||||
metric_parts = metric_parts.map { |s| s.gsub('.', '_') }
|
||||
|
||||
$metrics.increment(metric_parts.join('.'))
|
||||
rescue => err
|
||||
logger.log('d', "[!] [#{poolname}] failed while evaluating usage labels on '#{vm}' with an error: #{err}")
|
||||
rescue StandardError => e
|
||||
logger.log('d', "[!] [#{poolname}] failed while evaluating usage labels on '#{vm}' with an error: #{e}")
|
||||
end
|
||||
|
||||
def component_to_test(match, labels_string)
|
||||
return if labels_string.nil?
|
||||
|
||||
labels_string_parts = labels_string.split(',')
|
||||
labels_string_parts.each do |part|
|
||||
key, value = part.split('=')
|
||||
next if value.nil?
|
||||
if key == match
|
||||
return value
|
||||
end
|
||||
return value if key == match
|
||||
end
|
||||
return
|
||||
nil
|
||||
end
|
||||
|
||||
def purge_unused_vms_and_folders
|
||||
|
|
@ -435,13 +443,13 @@ module Vmpooler
|
|||
Thread.new do
|
||||
begin
|
||||
purge_vms_and_folders($providers[provider.to_s])
|
||||
rescue => err
|
||||
$logger.log('s', "[!] failed while purging provider #{provider.to_s} VMs and folders with an error: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "[!] failed while purging provider #{provider} VMs and folders with an error: #{e}")
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
return
|
||||
nil
|
||||
end
|
||||
|
||||
# Return a list of pool folders
|
||||
|
|
@ -450,6 +458,7 @@ module Vmpooler
|
|||
folders = {}
|
||||
$config[:pools].each do |pool|
|
||||
next unless pool['provider'] == provider_name
|
||||
|
||||
folder_parts = pool['folder'].split('/')
|
||||
datacenter = provider.get_target_datacenter_from_config(pool['name'])
|
||||
folders[folder_parts.pop] = "#{datacenter}/vm/#{folder_parts.join('/')}"
|
||||
|
|
@ -459,7 +468,7 @@ module Vmpooler
|
|||
|
||||
def get_base_folders(folders)
|
||||
base = []
|
||||
folders.each do |key, value|
|
||||
folders.each do |_key, value|
|
||||
base << value
|
||||
end
|
||||
base.uniq
|
||||
|
|
@ -476,8 +485,8 @@ module Vmpooler
|
|||
Thread.new do
|
||||
begin
|
||||
_create_vm_disk(pool_name, vm, disk_size, provider)
|
||||
rescue => err
|
||||
$logger.log('d', "[!] [#{pool_name}] '#{vm}' failed while creating disk: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('d', "[!] [#{pool_name}] '#{vm}' failed while creating disk: #{e}")
|
||||
raise
|
||||
end
|
||||
end
|
||||
|
|
@ -512,8 +521,8 @@ module Vmpooler
|
|||
Thread.new do
|
||||
begin
|
||||
_create_vm_snapshot(pool_name, vm, snapshot_name, provider)
|
||||
rescue => err
|
||||
$logger.log('d', "[!] [#{pool_name}] '#{vm}' failed while creating snapshot: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('d', "[!] [#{pool_name}] '#{vm}' failed while creating snapshot: #{e}")
|
||||
raise
|
||||
end
|
||||
end
|
||||
|
|
@ -541,8 +550,8 @@ module Vmpooler
|
|||
Thread.new do
|
||||
begin
|
||||
_revert_vm_snapshot(pool_name, vm, snapshot_name, provider)
|
||||
rescue => err
|
||||
$logger.log('d', "[!] [#{pool_name}] '#{vm}' failed while reverting snapshot: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('d', "[!] [#{pool_name}] '#{vm}' failed while reverting snapshot: #{e}")
|
||||
raise
|
||||
end
|
||||
end
|
||||
|
|
@ -574,14 +583,14 @@ module Vmpooler
|
|||
# ie. ["vsphere", "dummy"]
|
||||
def used_providers
|
||||
pools = config[:pools] || []
|
||||
@used_providers ||= (pools.map { |pool| pool[:provider] || pool['provider'] }.compact + default_providers ).uniq
|
||||
@used_providers ||= (pools.map { |pool| pool[:provider] || pool['provider'] }.compact + default_providers).uniq
|
||||
end
|
||||
|
||||
# @return [Array] - returns a list of providers that should always be loaded
|
||||
# note: vsphere is the default if user does not specify although this should not be
|
||||
# if vsphere is to no longer be loaded by default please remove
|
||||
def default_providers
|
||||
@default_providers ||= %w( vsphere dummy )
|
||||
@default_providers ||= %w[vsphere dummy]
|
||||
end
|
||||
|
||||
def get_pool_name_for_vm(vm_name)
|
||||
|
|
@ -594,6 +603,7 @@ module Vmpooler
|
|||
def get_provider_for_pool(pool_name)
|
||||
pool = $config[:pools].find { |pool| pool['name'] == pool_name }
|
||||
return nil unless pool
|
||||
|
||||
provider_name = pool.fetch('provider', nil)
|
||||
$providers[provider_name]
|
||||
end
|
||||
|
|
@ -609,6 +619,7 @@ module Vmpooler
|
|||
|
||||
unless maxloop.zero?
|
||||
break if loop_count >= maxloop
|
||||
|
||||
loop_count += 1
|
||||
end
|
||||
end
|
||||
|
|
@ -627,8 +638,8 @@ module Vmpooler
|
|||
raise("Missing Provider for vm #{vm_name} in pool #{pool_name}") if provider.nil?
|
||||
|
||||
create_vm_disk(pool_name, vm_name, disk_size, provider)
|
||||
rescue => err
|
||||
$logger.log('s', "[!] [disk_manager] disk creation appears to have failed: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "[!] [disk_manager] disk creation appears to have failed: #{e}")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -644,6 +655,7 @@ module Vmpooler
|
|||
|
||||
unless maxloop.zero?
|
||||
break if loop_count >= maxloop
|
||||
|
||||
loop_count += 1
|
||||
end
|
||||
end
|
||||
|
|
@ -663,8 +675,8 @@ module Vmpooler
|
|||
raise("Missing Provider for vm #{vm_name} in pool #{pool_name}") if provider.nil?
|
||||
|
||||
create_vm_snapshot(pool_name, vm_name, snapshot_name, provider)
|
||||
rescue => err
|
||||
$logger.log('s', "[!] [snapshot_manager] snapshot create appears to have failed: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "[!] [snapshot_manager] snapshot create appears to have failed: #{e}")
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -680,8 +692,8 @@ module Vmpooler
|
|||
raise("Missing Provider for vm #{vm_name} in pool #{pool_name}") if provider.nil?
|
||||
|
||||
revert_vm_snapshot(pool_name, vm_name, snapshot_name, provider)
|
||||
rescue => err
|
||||
$logger.log('s', "[!] [snapshot_manager] snapshot revert appears to have failed: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "[!] [snapshot_manager] snapshot revert appears to have failed: #{e}")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -694,8 +706,8 @@ module Vmpooler
|
|||
$redis.srem("vmpooler__migrating__#{pool_name}", vm_name)
|
||||
provider.migrate_vm(pool_name, vm_name)
|
||||
end
|
||||
rescue => err
|
||||
$logger.log('s', "[x] [#{pool_name}] '#{vm_name}' migration failed with an error: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "[x] [#{pool_name}] '#{vm_name}' migration failed with an error: #{e}")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -724,17 +736,11 @@ module Vmpooler
|
|||
wakeup_by = Time.now + wakeup_period
|
||||
return if time_passed?(:exit_by, exit_by)
|
||||
|
||||
if options[:pool_size_change]
|
||||
initial_ready_size = $redis.scard("vmpooler__ready__#{options[:poolname]}")
|
||||
end
|
||||
initial_ready_size = $redis.scard("vmpooler__ready__#{options[:poolname]}") if options[:pool_size_change]
|
||||
|
||||
if options[:clone_target_change]
|
||||
initial_clone_target = $redis.hget("vmpooler__pool__#{options[:poolname]}", options[:clone_target])
|
||||
end
|
||||
initial_clone_target = $redis.hget("vmpooler__pool__#{options[:poolname]}", options[:clone_target]) if options[:clone_target_change]
|
||||
|
||||
if options[:pool_template_change]
|
||||
initial_template = $redis.hget('vmpooler__template__prepared', options[:poolname])
|
||||
end
|
||||
initial_template = $redis.hget('vmpooler__template__prepared', options[:poolname]) if options[:pool_template_change]
|
||||
|
||||
loop do
|
||||
sleep(1)
|
||||
|
|
@ -751,7 +757,7 @@ module Vmpooler
|
|||
end
|
||||
|
||||
if options[:clone_target_change]
|
||||
clone_target = $redis.hget("vmpooler__config__clone_target}", options[:poolname])
|
||||
clone_target = $redis.hget('vmpooler__config__clone_target}', options[:poolname])
|
||||
if clone_target
|
||||
break unless clone_target == initial_clone_target
|
||||
end
|
||||
|
|
@ -795,6 +801,7 @@ module Vmpooler
|
|||
loop_delay = loop_delay_min
|
||||
provider = get_provider_for_pool(pool['name'])
|
||||
raise("Could not find provider '#{pool['provider']}") if provider.nil?
|
||||
|
||||
sync_pool_template(pool)
|
||||
loop do
|
||||
result = _check_pool(pool, provider)
|
||||
|
|
@ -809,11 +816,12 @@ module Vmpooler
|
|||
|
||||
unless maxloop.zero?
|
||||
break if loop_count >= maxloop
|
||||
|
||||
loop_count += 1
|
||||
end
|
||||
end
|
||||
rescue => err
|
||||
$logger.log('s', "[!] [#{pool['name']}] Error while checking the pool: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "[!] [#{pool['name']}] Error while checking the pool: #{e}")
|
||||
raise
|
||||
end
|
||||
end
|
||||
|
|
@ -828,19 +836,13 @@ module Vmpooler
|
|||
end
|
||||
|
||||
def dereference_mutex(vmname)
|
||||
if @vm_mutex.delete(vmname)
|
||||
return true
|
||||
else
|
||||
return
|
||||
end
|
||||
true if @vm_mutex.delete(vmname)
|
||||
end
|
||||
|
||||
def sync_pool_template(pool)
|
||||
pool_template = $redis.hget('vmpooler__config__template', pool['name'])
|
||||
if pool_template
|
||||
unless pool['template'] == pool_template
|
||||
pool['template'] = pool_template
|
||||
end
|
||||
pool['template'] = pool_template unless pool['template'] == pool_template
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -850,8 +852,8 @@ module Vmpooler
|
|||
begin
|
||||
provider.create_template_delta_disks(pool)
|
||||
$redis.sadd('vmpooler__template__deltas', pool['template'])
|
||||
rescue => err
|
||||
$logger.log('s', "[!] [#{pool['name']}] failed while preparing a template with an error. As a result vmpooler could not create the template delta disks. Either a template delta disk already exists, or the template delta disk creation failed. The error is: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "[!] [#{pool['name']}] failed while preparing a template with an error. As a result vmpooler could not create the template delta disks. Either a template delta disk already exists, or the template delta disk creation failed. The error is: #{e}")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -863,6 +865,7 @@ module Vmpooler
|
|||
prepared_template = $redis.hget('vmpooler__template__prepared', pool['name'])
|
||||
configured_template = $redis.hget('vmpooler__config__template', pool['name'])
|
||||
return if mutex.locked?
|
||||
|
||||
if prepared_template.nil?
|
||||
mutex.synchronize do
|
||||
prepare_template(pool, provider)
|
||||
|
|
@ -878,6 +881,7 @@ module Vmpooler
|
|||
end
|
||||
return if configured_template.nil?
|
||||
return if configured_template == prepared_template
|
||||
|
||||
mutex.synchronize do
|
||||
update_pool_template(pool, provider, configured_template, prepared_template)
|
||||
end
|
||||
|
|
@ -913,9 +917,11 @@ module Vmpooler
|
|||
def update_clone_target(pool)
|
||||
mutex = pool_mutex(pool['name'])
|
||||
return if mutex.locked?
|
||||
|
||||
clone_target = $redis.hget('vmpooler__config__clone_target', pool['name'])
|
||||
return if clone_target.nil?
|
||||
return if clone_target == pool['clone_target']
|
||||
|
||||
$logger.log('s', "[*] [#{pool['name']}] clone updated from #{pool['clone_target']} to #{clone_target}")
|
||||
mutex.synchronize do
|
||||
pool['clone_target'] = clone_target
|
||||
|
|
@ -930,9 +936,11 @@ module Vmpooler
|
|||
total = $redis.scard("vmpooler__pending__#{pool['name']}") + ready
|
||||
return if total.nil?
|
||||
return if total == 0
|
||||
|
||||
mutex = pool_mutex(pool['name'])
|
||||
return if mutex.locked?
|
||||
return unless ready > pool['size']
|
||||
|
||||
mutex.synchronize do
|
||||
difference = ready - pool['size']
|
||||
difference.times do
|
||||
|
|
@ -950,10 +958,13 @@ module Vmpooler
|
|||
def update_pool_size(pool)
|
||||
mutex = pool_mutex(pool['name'])
|
||||
return if mutex.locked?
|
||||
|
||||
poolsize = $redis.hget('vmpooler__config__poolsize', pool['name'])
|
||||
return if poolsize.nil?
|
||||
|
||||
poolsize = Integer(poolsize)
|
||||
return if poolsize == pool['size']
|
||||
|
||||
mutex.synchronize do
|
||||
pool['size'] = poolsize
|
||||
end
|
||||
|
|
@ -962,6 +973,7 @@ module Vmpooler
|
|||
def reset_pool(pool)
|
||||
poolname = pool['name']
|
||||
return unless $redis.sismember('vmpooler__poolreset', poolname)
|
||||
|
||||
$redis.srem('vmpooler__poolreset', poolname)
|
||||
mutex = pool_mutex(poolname)
|
||||
mutex.synchronize do
|
||||
|
|
@ -992,9 +1004,9 @@ module Vmpooler
|
|||
inventory[vm['name']] = 1
|
||||
end
|
||||
end
|
||||
rescue => err
|
||||
$logger.log('s', "[!] [#{pool['name']}] _check_pool failed with an error while running create_inventory: #{err}")
|
||||
raise(err)
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "[!] [#{pool['name']}] _check_pool failed with an error while running create_inventory: #{e}")
|
||||
raise(e)
|
||||
end
|
||||
inventory
|
||||
end
|
||||
|
|
@ -1006,8 +1018,8 @@ module Vmpooler
|
|||
vm_lifetime = $redis.hget('vmpooler__vm__' + vm, 'lifetime') || $config[:config]['vm_lifetime'] || 12
|
||||
pool_check_response[:checked_running_vms] += 1
|
||||
check_running_vm(vm, pool_name, vm_lifetime, provider)
|
||||
rescue => err
|
||||
$logger.log('d', "[!] [#{pool_name}] _check_pool with an error while evaluating running VMs: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('d', "[!] [#{pool_name}] _check_pool with an error while evaluating running VMs: #{e}")
|
||||
end
|
||||
else
|
||||
move_vm_queue(pool_name, vm, 'running', 'completed', 'is a running VM but is missing from inventory. Marking as completed.')
|
||||
|
|
@ -1021,8 +1033,8 @@ module Vmpooler
|
|||
begin
|
||||
pool_check_response[:checked_ready_vms] += 1
|
||||
check_ready_vm(vm, pool_name, pool_ttl || 0, provider)
|
||||
rescue => err
|
||||
$logger.log('d', "[!] [#{pool_name}] _check_pool failed with an error while evaluating ready VMs: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('d', "[!] [#{pool_name}] _check_pool failed with an error while evaluating ready VMs: #{e}")
|
||||
end
|
||||
else
|
||||
move_vm_queue(pool_name, vm, 'ready', 'completed', 'is a ready VM but is missing from inventory. Marking as completed.')
|
||||
|
|
@ -1037,8 +1049,8 @@ module Vmpooler
|
|||
begin
|
||||
pool_check_response[:checked_pending_vms] += 1
|
||||
check_pending_vm(vm, pool_name, pool_timeout, provider)
|
||||
rescue => err
|
||||
$logger.log('d', "[!] [#{pool_name}] _check_pool failed with an error while evaluating pending VMs: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('d', "[!] [#{pool_name}] _check_pool failed with an error while evaluating pending VMs: #{e}")
|
||||
end
|
||||
else
|
||||
fail_pending_vm(vm, pool_name, pool_timeout, false)
|
||||
|
|
@ -1052,11 +1064,11 @@ module Vmpooler
|
|||
begin
|
||||
pool_check_response[:destroyed_vms] += 1
|
||||
destroy_vm(vm, pool_name, provider)
|
||||
rescue => err
|
||||
rescue StandardError => e
|
||||
$redis.srem("vmpooler__completed__#{pool_name}", vm)
|
||||
$redis.hdel("vmpooler__active__#{pool_name}", vm)
|
||||
$redis.del("vmpooler__vm__#{vm}")
|
||||
$logger.log('d', "[!] [#{pool_name}] _check_pool failed with an error while evaluating completed VMs: #{err}")
|
||||
$logger.log('d', "[!] [#{pool_name}] _check_pool failed with an error while evaluating completed VMs: #{e}")
|
||||
end
|
||||
else
|
||||
$logger.log('s', "[!] [#{pool_name}] '#{vm}' not found in inventory, removed from 'completed' queue")
|
||||
|
|
@ -1068,22 +1080,18 @@ module Vmpooler
|
|||
end
|
||||
|
||||
def check_discovered_pool_vms(pool_name)
|
||||
begin
|
||||
$redis.smembers("vmpooler__discovered__#{pool_name}").reverse.each do |vm|
|
||||
%w[pending ready running completed].each do |queue|
|
||||
if $redis.sismember("vmpooler__#{queue}__#{pool_name}", vm)
|
||||
$logger.log('d', "[!] [#{pool_name}] '#{vm}' found in '#{queue}', removed from 'discovered' queue")
|
||||
$redis.srem("vmpooler__discovered__#{pool_name}", vm)
|
||||
end
|
||||
end
|
||||
|
||||
if $redis.sismember("vmpooler__discovered__#{pool_name}", vm)
|
||||
$redis.smove("vmpooler__discovered__#{pool_name}", "vmpooler__completed__#{pool_name}", vm)
|
||||
$redis.smembers("vmpooler__discovered__#{pool_name}").reverse.each do |vm|
|
||||
%w[pending ready running completed].each do |queue|
|
||||
if $redis.sismember("vmpooler__#{queue}__#{pool_name}", vm)
|
||||
$logger.log('d', "[!] [#{pool_name}] '#{vm}' found in '#{queue}', removed from 'discovered' queue")
|
||||
$redis.srem("vmpooler__discovered__#{pool_name}", vm)
|
||||
end
|
||||
end
|
||||
rescue => err
|
||||
$logger.log('d', "[!] [#{pool_name}] _check_pool failed with an error while evaluating discovered VMs: #{err}")
|
||||
|
||||
$redis.smove("vmpooler__discovered__#{pool_name}", "vmpooler__completed__#{pool_name}", vm) if $redis.sismember("vmpooler__discovered__#{pool_name}", vm)
|
||||
end
|
||||
rescue StandardError => e
|
||||
$logger.log('d', "[!] [#{pool_name}] _check_pool failed with an error while evaluating discovered VMs: #{e}")
|
||||
end
|
||||
|
||||
def check_migrating_pool_vms(pool_name, provider, pool_check_response, inventory)
|
||||
|
|
@ -1092,8 +1100,8 @@ module Vmpooler
|
|||
begin
|
||||
pool_check_response[:migrated_vms] += 1
|
||||
migrate_vm(vm, pool_name, provider)
|
||||
rescue => err
|
||||
$logger.log('s', "[x] [#{pool_name}] '#{vm}' failed to migrate: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "[x] [#{pool_name}] '#{vm}' failed to migrate: #{e}")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -1101,6 +1109,7 @@ module Vmpooler
|
|||
|
||||
def repopulate_pool_vms(pool_name, provider, pool_check_response, pool_size)
|
||||
return if pool_mutex(pool_name).locked?
|
||||
|
||||
ready = $redis.scard("vmpooler__ready__#{pool_name}")
|
||||
total = $redis.scard("vmpooler__pending__#{pool_name}") + ready
|
||||
|
||||
|
|
@ -1120,8 +1129,8 @@ module Vmpooler
|
|||
$redis.incr('vmpooler__tasks__clone')
|
||||
pool_check_response[:cloned_vms] += 1
|
||||
clone_vm(pool_name, provider)
|
||||
rescue => err
|
||||
$logger.log('s', "[!] [#{pool_name}] clone failed during check_pool with an error: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "[!] [#{pool_name}] clone failed during check_pool with an error: #{e}")
|
||||
$redis.decr('vmpooler__tasks__clone')
|
||||
raise
|
||||
end
|
||||
|
|
@ -1142,7 +1151,7 @@ module Vmpooler
|
|||
|
||||
begin
|
||||
inventory = create_inventory(pool, provider, pool_check_response)
|
||||
rescue => err
|
||||
rescue StandardError => e
|
||||
return(pool_check_response)
|
||||
end
|
||||
|
||||
|
|
@ -1195,6 +1204,7 @@ module Vmpooler
|
|||
provider_klass = Vmpooler::PoolManager::Provider
|
||||
provider_klass.constants.each do |classname|
|
||||
next unless classname.to_s.casecmp(provider_class) == 0
|
||||
|
||||
return provider_klass.const_get(classname).new(config, logger, metrics, provider_name, options)
|
||||
end
|
||||
raise("Provider '#{provider_class}' is unknown for pool with provider name '#{provider_name}'") if provider.nil?
|
||||
|
|
@ -1257,8 +1267,8 @@ module Vmpooler
|
|||
end
|
||||
begin
|
||||
$providers[provider_name] = create_provider_object($config, $logger, $metrics, provider_class, provider_name, {}) if $providers[provider_name].nil?
|
||||
rescue => err
|
||||
$logger.log('s', "Error while creating provider for pool #{pool['name']}: #{err}")
|
||||
rescue StandardError => e
|
||||
$logger.log('s', "Error while creating provider for pool #{pool['name']}: #{e}")
|
||||
raise
|
||||
end
|
||||
end
|
||||
|
|
@ -1294,6 +1304,7 @@ module Vmpooler
|
|||
|
||||
unless maxloop.zero?
|
||||
break if loop_count >= maxloop
|
||||
|
||||
loop_count += 1
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -2,26 +2,25 @@ require 'pathname'
|
|||
|
||||
module Vmpooler
|
||||
class Providers
|
||||
|
||||
# @param names [Array] - an array of names or string name of a provider
|
||||
# @return [Array] - list of provider files loaded
|
||||
# ie. ["lib/vmpooler/providers/base.rb", "lib/vmpooler/providers/dummy.rb", "lib/vmpooler/providers/vsphere.rb"]
|
||||
def self.load_by_name(names)
|
||||
names = Array(names)
|
||||
instance = self.new
|
||||
names.map {|name| instance.load_from_gems(name)}.flatten
|
||||
instance = new
|
||||
names.map { |name| instance.load_from_gems(name) }.flatten
|
||||
end
|
||||
|
||||
# @return [Array] - array of provider files
|
||||
# ie. ["lib/vmpooler/providers/base.rb", "lib/vmpooler/providers/dummy.rb", "lib/vmpooler/providers/vsphere.rb"]
|
||||
# although these files can come from any gem
|
||||
def self.load_all_providers
|
||||
self.new.load_from_gems
|
||||
new.load_from_gems
|
||||
end
|
||||
|
||||
# @return [Array] - returns an array of gem names that contain a provider
|
||||
def self.installed_providers
|
||||
self.new.vmpooler_provider_gem_list.map(&:name)
|
||||
new.vmpooler_provider_gem_list.map(&:name)
|
||||
end
|
||||
|
||||
# @return [Array] returns a list of vmpooler providers gem plugin specs
|
||||
|
|
@ -50,7 +49,7 @@ module Vmpooler
|
|||
# @return [String] - the relative path to the vmpooler provider dir
|
||||
# this is used when searching gems for this path
|
||||
def provider_path
|
||||
File.join('lib','vmpooler','providers')
|
||||
File.join('lib', 'vmpooler', 'providers')
|
||||
end
|
||||
|
||||
# Add constants to array to skip over classes, ie. Vmpooler::PoolManager::Provider::Dummy
|
||||
|
|
@ -81,8 +80,6 @@ module Vmpooler
|
|||
@plugin_map ||= Hash[plugin_classes.map { |gem| [gem.send(:name), gem] }]
|
||||
end
|
||||
|
||||
|
||||
|
||||
# Internal: Retrieve a list of available gem paths from RubyGems.
|
||||
#
|
||||
# Returns an Array of Pathname objects.
|
||||
|
|
@ -90,11 +87,11 @@ module Vmpooler
|
|||
dirs = []
|
||||
if has_rubygems?
|
||||
dirs = gemspecs.map do |spec|
|
||||
lib_path = File.expand_path(File.join(spec.full_gem_path,provider_path))
|
||||
lib_path if File.exists? lib_path
|
||||
lib_path = File.expand_path(File.join(spec.full_gem_path, provider_path))
|
||||
lib_path if File.exist? lib_path
|
||||
end + included_lib_dirs
|
||||
end
|
||||
dirs.reject { |dir| dir.nil? }.uniq
|
||||
dirs.reject(&:nil?).uniq
|
||||
end
|
||||
|
||||
# Internal: Check if RubyGems is loaded and available.
|
||||
|
|
@ -114,6 +111,5 @@ module Vmpooler
|
|||
Gem.searcher.init_gemspecs
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -222,7 +222,7 @@ module Vmpooler
|
|||
# [Hash] pool : Configuration for the pool
|
||||
# returns
|
||||
# nil when successful. Raises error when encountered
|
||||
def create_template_delta_disks(pool)
|
||||
def create_template_delta_disks(_pool)
|
||||
raise("#{self.class.name} does not implement create_template_delta_disks")
|
||||
end
|
||||
|
||||
|
|
@ -230,11 +230,11 @@ module Vmpooler
|
|||
# [String] provider_name : Name of the provider
|
||||
# returns
|
||||
# Hash of folders
|
||||
def get_target_datacenter_from_config(provider_name)
|
||||
def get_target_datacenter_from_config(_provider_name)
|
||||
raise("#{self.class.name} does not implement get_target_datacenter_from_config")
|
||||
end
|
||||
|
||||
def purge_unconfigured_folders(base_folders, configured_folders, whitelist)
|
||||
def purge_unconfigured_folders(_base_folders, _configured_folders, _whitelist)
|
||||
raise("#{self.class.name} does not implement purge_unconfigured_folders")
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -73,10 +73,10 @@ module Vmpooler
|
|||
return current_vm['vm_host'] if provider_config['migratevm_couldmove_percent'].nil?
|
||||
|
||||
# Only migrate if migratevm_couldmove_percent is met
|
||||
return current_vm['vm_host'] if 1 + rand(100) > provider_config['migratevm_couldmove_percent']
|
||||
return current_vm['vm_host'] if rand(1..100) > provider_config['migratevm_couldmove_percent']
|
||||
|
||||
# Simulate a 10 node cluster and randomly pick a different one
|
||||
new_host = 'HOST' + (1 + rand(10)).to_s while new_host == current_vm['vm_host']
|
||||
new_host = 'HOST' + rand(1..10).to_s while new_host == current_vm['vm_host']
|
||||
|
||||
new_host
|
||||
end
|
||||
|
|
@ -93,7 +93,7 @@ module Vmpooler
|
|||
|
||||
# Inject clone failure
|
||||
unless provider_config['migratevm_fail_percent'].nil?
|
||||
raise('Dummy Failure for migratevm_fail_percent') if 1 + rand(100) <= provider_config['migratevm_fail_percent']
|
||||
raise('Dummy Failure for migratevm_fail_percent') if rand(1..100) <= provider_config['migratevm_fail_percent']
|
||||
end
|
||||
|
||||
@write_lock.synchronize do
|
||||
|
|
@ -114,7 +114,7 @@ module Vmpooler
|
|||
|
||||
# Randomly power off the VM
|
||||
unless dummy['powerstate'] != 'PoweredOn' || provider_config['getvm_poweroff_percent'].nil?
|
||||
if 1 + rand(100) <= provider_config['getvm_poweroff_percent']
|
||||
if rand(1..100) <= provider_config['getvm_poweroff_percent']
|
||||
@write_lock.synchronize do
|
||||
dummy = get_dummy_vm(pool_name, vm_name)
|
||||
dummy['powerstate'] = 'PoweredOff'
|
||||
|
|
@ -126,7 +126,7 @@ module Vmpooler
|
|||
|
||||
# Randomly rename the host
|
||||
unless dummy['hostname'] != dummy['name'] || provider_config['getvm_rename_percent'].nil?
|
||||
if 1 + rand(100) <= provider_config['getvm_rename_percent']
|
||||
if rand(1..100) <= provider_config['getvm_rename_percent']
|
||||
@write_lock.synchronize do
|
||||
dummy = get_dummy_vm(pool_name, vm_name)
|
||||
dummy['hostname'] = 'DUMMY' + dummy['name']
|
||||
|
|
@ -194,7 +194,7 @@ module Vmpooler
|
|||
begin
|
||||
# Inject clone failure
|
||||
unless provider_config['createvm_fail_percent'].nil?
|
||||
raise('Dummy Failure for createvm_fail_percent') if 1 + rand(100) <= provider_config['createvm_fail_percent']
|
||||
raise('Dummy Failure for createvm_fail_percent') if rand(1..100) <= provider_config['createvm_fail_percent']
|
||||
end
|
||||
|
||||
# Assert the VM is ready for use
|
||||
|
|
@ -202,7 +202,7 @@ module Vmpooler
|
|||
vm['dummy_state'] = 'RUNNING'
|
||||
write_backing_file
|
||||
end
|
||||
rescue => _err
|
||||
rescue StandardError => _e
|
||||
@write_lock.synchronize do
|
||||
remove_dummy_vm(pool_name, dummy_hostname)
|
||||
write_backing_file
|
||||
|
|
@ -227,7 +227,7 @@ module Vmpooler
|
|||
|
||||
# Inject create failure
|
||||
unless provider_config['createdisk_fail_percent'].nil?
|
||||
raise('Dummy Failure for createdisk_fail_percent') if 1 + rand(100) <= provider_config['createdisk_fail_percent']
|
||||
raise('Dummy Failure for createdisk_fail_percent') if rand(1..100) <= provider_config['createdisk_fail_percent']
|
||||
end
|
||||
|
||||
@write_lock.synchronize do
|
||||
|
|
@ -253,7 +253,7 @@ module Vmpooler
|
|||
|
||||
# Inject create failure
|
||||
unless provider_config['createsnapshot_fail_percent'].nil?
|
||||
raise('Dummy Failure for createsnapshot_fail_percent') if 1 + rand(100) <= provider_config['createsnapshot_fail_percent']
|
||||
raise('Dummy Failure for createsnapshot_fail_percent') if rand(1..100) <= provider_config['createsnapshot_fail_percent']
|
||||
end
|
||||
|
||||
@write_lock.synchronize do
|
||||
|
|
@ -280,7 +280,7 @@ module Vmpooler
|
|||
|
||||
# Inject create failure
|
||||
unless provider_config['revertsnapshot_fail_percent'].nil?
|
||||
raise('Dummy Failure for revertsnapshot_fail_percent') if 1 + rand(100) <= provider_config['revertsnapshot_fail_percent']
|
||||
raise('Dummy Failure for revertsnapshot_fail_percent') if rand(1..100) <= provider_config['revertsnapshot_fail_percent']
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -318,7 +318,7 @@ module Vmpooler
|
|||
|
||||
# Inject destroy VM failure
|
||||
unless provider_config['destroyvm_fail_percent'].nil?
|
||||
raise('Dummy Failure for migratevm_fail_percent') if 1 + rand(100) <= provider_config['destroyvm_fail_percent']
|
||||
raise('Dummy Failure for migratevm_fail_percent') if rand(1..100) <= provider_config['destroyvm_fail_percent']
|
||||
end
|
||||
|
||||
# 'Destroy' the VM
|
||||
|
|
@ -352,7 +352,7 @@ module Vmpooler
|
|||
sleep(2)
|
||||
|
||||
unless provider_config['vmready_fail_percent'].nil?
|
||||
raise('Dummy Failure for vmready_fail_percent') if 1 + rand(100) <= provider_config['vmready_fail_percent']
|
||||
raise('Dummy Failure for vmready_fail_percent') if rand(1..100) <= provider_config['vmready_fail_percent']
|
||||
end
|
||||
|
||||
@write_lock.synchronize do
|
||||
|
|
@ -370,6 +370,7 @@ module Vmpooler
|
|||
|
||||
def remove_dummy_vm(pool_name, vm_name)
|
||||
return if @dummylist['pool'][pool_name].nil?
|
||||
|
||||
new_poollist = @dummylist['pool'][pool_name].delete_if { |vm| vm['name'] == vm_name }
|
||||
@dummylist['pool'][pool_name] = new_poollist
|
||||
end
|
||||
|
|
@ -395,6 +396,7 @@ module Vmpooler
|
|||
def write_backing_file
|
||||
dummyfilename = provider_config['filename']
|
||||
return if dummyfilename.nil?
|
||||
|
||||
File.open(dummyfilename, 'w') { |file| file.write(YAML.dump(@dummylist)) }
|
||||
end
|
||||
end
|
||||
|
|
|
|||
|
|
@ -50,7 +50,8 @@ module Vmpooler
|
|||
end
|
||||
return false unless configured_folders.keys.include?(folder_title)
|
||||
return false unless configured_folders[folder_title] == base_folder
|
||||
return true
|
||||
|
||||
true
|
||||
end
|
||||
|
||||
def destroy_vm_and_log(vm_name, vm_object, pool, data_ttl)
|
||||
|
|
@ -69,7 +70,7 @@ module Vmpooler
|
|||
logger.log('s', "[!] [#{pool}] '#{vm_name}' is a folder, bailing on destroying")
|
||||
raise('Expected VM, but received a folder object')
|
||||
end
|
||||
vm_object.PowerOffVM_Task.wait_for_completion if vm_object.runtime && vm_object.runtime.powerState && vm_object.runtime.powerState == 'poweredOn'
|
||||
vm_object.PowerOffVM_Task.wait_for_completion if vm_object.runtime&.powerState && vm_object.runtime.powerState == 'poweredOn'
|
||||
vm_object.Destroy_Task.wait_for_completion
|
||||
|
||||
finish = format('%.2f', Time.now - start)
|
||||
|
|
@ -77,9 +78,9 @@ module Vmpooler
|
|||
metrics.timing("destroy.#{pool}", finish)
|
||||
rescue RuntimeError
|
||||
raise
|
||||
rescue => err
|
||||
rescue StandardError => e
|
||||
try += 1
|
||||
logger.log('s', "[!] [#{pool}] failed to destroy '#{vm_name}' with an error: #{err}")
|
||||
logger.log('s', "[!] [#{pool}] failed to destroy '#{vm_name}' with an error: #{e}")
|
||||
try >= max_tries ? raise : retry
|
||||
end
|
||||
|
||||
|
|
@ -104,7 +105,7 @@ module Vmpooler
|
|||
max_tries = 3
|
||||
logger.log('s', "[-] [#{folder_object.name}] removing unconfigured folder")
|
||||
folder_object.Destroy_Task.wait_for_completion
|
||||
rescue
|
||||
rescue StandardError
|
||||
try += 1
|
||||
try >= max_tries ? raise : retry
|
||||
end
|
||||
|
|
@ -118,9 +119,7 @@ module Vmpooler
|
|||
unless folder_children.empty?
|
||||
folder_children.each do |folder_hash|
|
||||
folder_hash.each do |folder_title, folder_object|
|
||||
unless folder_configured?(folder_title, base_folder, configured_folders, whitelist)
|
||||
destroy_folder_and_children(folder_object)
|
||||
end
|
||||
destroy_folder_and_children(folder_object) unless folder_configured?(folder_title, base_folder, configured_folders, whitelist)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -132,8 +131,8 @@ module Vmpooler
|
|||
folders = []
|
||||
|
||||
propSpecs = {
|
||||
:entity => self,
|
||||
:inventoryPath => folder_name
|
||||
entity: self,
|
||||
inventoryPath: folder_name
|
||||
}
|
||||
folder_object = connection.searchIndex.FindByInventoryPath(propSpecs)
|
||||
|
||||
|
|
@ -141,6 +140,7 @@ module Vmpooler
|
|||
|
||||
folder_object.childEntity.each do |folder|
|
||||
next unless folder.is_a? RbVmomi::VIM::Folder
|
||||
|
||||
folders << { folder.name => folder }
|
||||
end
|
||||
|
||||
|
|
@ -171,9 +171,9 @@ module Vmpooler
|
|||
target[dc]['checking'] = true
|
||||
hosts_hash = find_least_used_hosts(cluster, datacenter, percentage)
|
||||
target[dc] = hosts_hash
|
||||
rescue => _err
|
||||
rescue StandardError => _e
|
||||
target[dc] = {}
|
||||
raise(_err)
|
||||
raise(_e)
|
||||
ensure
|
||||
target[dc]['check_time_finished'] = Time.now
|
||||
end
|
||||
|
|
@ -188,36 +188,36 @@ module Vmpooler
|
|||
cluster = get_target_cluster_from_config(pool_name)
|
||||
raise("cluster for pool #{pool_name} cannot be identified") if cluster.nil?
|
||||
raise("datacenter for pool #{pool_name} cannot be identified") if datacenter.nil?
|
||||
|
||||
dc = "#{datacenter}_#{cluster}"
|
||||
unless target.key?(dc)
|
||||
select_target_hosts(target, cluster, datacenter)
|
||||
return
|
||||
end
|
||||
if target[dc].key?('checking')
|
||||
wait_for_host_selection(dc, target, loop_delay, max_age)
|
||||
end
|
||||
wait_for_host_selection(dc, target, loop_delay, max_age) if target[dc].key?('checking')
|
||||
if target[dc].key?('check_time_finished')
|
||||
if now - target[dc]['check_time_finished'] > max_age
|
||||
select_target_hosts(target, cluster, datacenter)
|
||||
end
|
||||
select_target_hosts(target, cluster, datacenter) if now - target[dc]['check_time_finished'] > max_age
|
||||
end
|
||||
end
|
||||
|
||||
def wait_for_host_selection(dc, target, maxloop = 0, loop_delay = 1, max_age = 60)
|
||||
loop_count = 1
|
||||
until target.key?(dc) and target[dc].key?('check_time_finished')
|
||||
until target.key?(dc) && target[dc].key?('check_time_finished')
|
||||
sleep(loop_delay)
|
||||
unless maxloop.zero?
|
||||
break if loop_count >= maxloop
|
||||
|
||||
loop_count += 1
|
||||
end
|
||||
end
|
||||
return unless target[dc].key?('check_time_finished')
|
||||
|
||||
loop_count = 1
|
||||
while Time.now - target[dc]['check_time_finished'] > max_age
|
||||
sleep(loop_delay)
|
||||
unless maxloop.zero?
|
||||
break if loop_count >= maxloop
|
||||
|
||||
loop_count += 1
|
||||
end
|
||||
end
|
||||
|
|
@ -228,10 +228,12 @@ module Vmpooler
|
|||
cluster = get_target_cluster_from_config(pool_name)
|
||||
raise("cluster for pool #{pool_name} cannot be identified") if cluster.nil?
|
||||
raise("datacenter for pool #{pool_name} cannot be identified") if datacenter.nil?
|
||||
|
||||
dc = "#{datacenter}_#{cluster}"
|
||||
@provider_hosts_lock.synchronize do
|
||||
if architecture
|
||||
raise("there is no candidate in vcenter that meets all the required conditions, that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory") unless target[dc].key?('architectures')
|
||||
|
||||
host = target[dc]['architectures'][architecture].shift
|
||||
target[dc]['architectures'][architecture] << host
|
||||
if target[dc]['hosts'].include?(host)
|
||||
|
|
@ -241,12 +243,11 @@ module Vmpooler
|
|||
return host
|
||||
else
|
||||
raise("there is no candidate in vcenter that meets all the required conditions, that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory") unless target[dc].key?('hosts')
|
||||
|
||||
host = target[dc]['hosts'].shift
|
||||
target[dc]['hosts'] << host
|
||||
target[dc]['architectures'].each do |arch|
|
||||
if arch.include?(host)
|
||||
target[dc]['architectures'][arch] = arch.partition { |v| v != host }.flatten
|
||||
end
|
||||
target[dc]['architectures'][arch] = arch.partition { |v| v != host }.flatten if arch.include?(host)
|
||||
end
|
||||
return host
|
||||
end
|
||||
|
|
@ -258,11 +259,13 @@ module Vmpooler
|
|||
cluster = get_target_cluster_from_config(pool_name)
|
||||
raise("cluster for pool #{pool_name} cannot be identified") if cluster.nil?
|
||||
raise("datacenter for pool #{pool_name} cannot be identified") if datacenter.nil?
|
||||
|
||||
dc = "#{datacenter}_#{cluster}"
|
||||
raise("there is no candidate in vcenter that meets all the required conditions, that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory") unless target[dc].key?('hosts')
|
||||
return true if target[dc]['hosts'].include?(parent_host)
|
||||
return true if target[dc]['architectures'][architecture].include?(parent_host)
|
||||
return false
|
||||
|
||||
false
|
||||
end
|
||||
|
||||
def get_vm(pool_name, vm_name)
|
||||
|
|
@ -280,6 +283,7 @@ module Vmpooler
|
|||
def create_vm(pool_name, new_vmname)
|
||||
pool = pool_config(pool_name)
|
||||
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
||||
|
||||
vm_hash = nil
|
||||
@connection_pool.with_metrics do |pool_object|
|
||||
connection = ensured_vsphere_connection(pool_object)
|
||||
|
|
@ -322,7 +326,7 @@ module Vmpooler
|
|||
host_object = find_host_by_dnsname(connection, target_host)
|
||||
relocate_spec.host = host_object
|
||||
else
|
||||
# Choose a cluster/host to place the new VM on
|
||||
# Choose a cluster/host to place the new VM on
|
||||
target_cluster_object = find_cluster(target_cluster_name, connection, target_datacenter_name)
|
||||
relocate_spec.pool = target_cluster_object.resourcePool
|
||||
end
|
||||
|
|
@ -337,14 +341,12 @@ module Vmpooler
|
|||
|
||||
begin
|
||||
vm_target_folder = find_vm_folder(pool_name, connection)
|
||||
if vm_target_folder.nil? and @config[:config].key?('create_folders') and @config[:config]['create_folders'] == true
|
||||
vm_target_folder = create_folder(connection, target_folder_path, target_datacenter_name)
|
||||
end
|
||||
rescue => _err
|
||||
if @config[:config].key?('create_folders') and @config[:config]['create_folders'] == true
|
||||
vm_target_folder = create_folder(connection, target_folder_path, target_datacenter_name) if vm_target_folder.nil? && @config[:config].key?('create_folders') && (@config[:config]['create_folders'] == true)
|
||||
rescue StandardError => _e
|
||||
if @config[:config].key?('create_folders') && (@config[:config]['create_folders'] == true)
|
||||
vm_target_folder = create_folder(connection, target_folder_path, target_datacenter_name)
|
||||
else
|
||||
raise(_err)
|
||||
raise(_e)
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -418,7 +420,7 @@ module Vmpooler
|
|||
return true if vm_object.nil?
|
||||
|
||||
# Poweroff the VM if it's running
|
||||
vm_object.PowerOffVM_Task.wait_for_completion if vm_object.runtime && vm_object.runtime.powerState && vm_object.runtime.powerState == 'poweredOn'
|
||||
vm_object.PowerOffVM_Task.wait_for_completion if vm_object.runtime&.powerState && vm_object.runtime.powerState == 'poweredOn'
|
||||
|
||||
# Kill it with fire
|
||||
vm_object.Destroy_Task.wait_for_completion
|
||||
|
|
@ -429,7 +431,7 @@ module Vmpooler
|
|||
def vm_ready?(_pool_name, vm_name)
|
||||
begin
|
||||
open_socket(vm_name, global_config[:config]['domain'])
|
||||
rescue => _err
|
||||
rescue StandardError => _e
|
||||
return false
|
||||
end
|
||||
|
||||
|
|
@ -464,9 +466,9 @@ module Vmpooler
|
|||
pool_configuration = pool_config(pool_name)
|
||||
return nil if pool_configuration.nil?
|
||||
|
||||
hostname = vm_object.summary.guest.hostName if vm_object.summary && vm_object.summary.guest && vm_object.summary.guest.hostName
|
||||
boottime = vm_object.runtime.bootTime if vm_object.runtime && vm_object.runtime.bootTime
|
||||
powerstate = vm_object.runtime.powerState if vm_object.runtime && vm_object.runtime.powerState
|
||||
hostname = vm_object.summary.guest.hostName if vm_object.summary&.guest && vm_object.summary.guest.hostName
|
||||
boottime = vm_object.runtime.bootTime if vm_object.runtime&.bootTime
|
||||
powerstate = vm_object.runtime.powerState if vm_object.runtime&.powerState
|
||||
|
||||
hash = {
|
||||
'name' => vm_object.name,
|
||||
|
|
@ -474,7 +476,7 @@ module Vmpooler
|
|||
'template' => pool_configuration['template'],
|
||||
'poolname' => pool_name,
|
||||
'boottime' => boottime,
|
||||
'powerstate' => powerstate,
|
||||
'powerstate' => powerstate
|
||||
}
|
||||
|
||||
hash
|
||||
|
|
@ -492,9 +494,9 @@ module Vmpooler
|
|||
|
||||
def vsphere_connection_ok?(connection)
|
||||
_result = connection.serviceInstance.CurrentTime
|
||||
return true
|
||||
rescue
|
||||
return false
|
||||
true
|
||||
rescue StandardError
|
||||
false
|
||||
end
|
||||
|
||||
def connect_to_vsphere
|
||||
|
|
@ -507,10 +509,11 @@ module Vmpooler
|
|||
password: provider_config['password'],
|
||||
insecure: provider_config['insecure'] || false
|
||||
metrics.increment('connect.open')
|
||||
return connection
|
||||
rescue => err
|
||||
connection
|
||||
rescue StandardError => e
|
||||
metrics.increment('connect.fail')
|
||||
raise err if try >= max_tries
|
||||
raise e if try >= max_tries
|
||||
|
||||
sleep(try * retry_factor)
|
||||
try += 1
|
||||
retry
|
||||
|
|
@ -610,6 +613,7 @@ module Vmpooler
|
|||
def find_datastore(datastorename, connection, datacentername)
|
||||
datacenter = connection.serviceInstance.find_datacenter(datacentername)
|
||||
raise("Datacenter #{datacentername} does not exist") if datacenter.nil?
|
||||
|
||||
datacenter.find_datastore(datastorename)
|
||||
end
|
||||
|
||||
|
|
@ -625,9 +629,7 @@ module Vmpooler
|
|||
devices = find_disk_devices(vm)
|
||||
|
||||
devices.keys.sort.each do |device|
|
||||
if devices[device]['children'].length < 15
|
||||
return find_device(vm, devices[device]['device'].deviceInfo.label)
|
||||
end
|
||||
return find_device(vm, devices[device]['device'].deviceInfo.label) if devices[device]['children'].length < 15
|
||||
end
|
||||
|
||||
nil
|
||||
|
|
@ -667,6 +669,7 @@ module Vmpooler
|
|||
|
||||
devices.keys.sort.each do |c|
|
||||
next unless controller.key == devices[c]['device'].key
|
||||
|
||||
used_unit_numbers.push(devices[c]['device'].scsiCtlrUnitNumber)
|
||||
devices[c]['children'].each do |disk|
|
||||
used_unit_numbers.push(disk.unitNumber)
|
||||
|
|
@ -674,12 +677,10 @@ module Vmpooler
|
|||
end
|
||||
|
||||
(0..15).each do |scsi_id|
|
||||
if used_unit_numbers.grep(scsi_id).length <= 0
|
||||
available_unit_numbers.push(scsi_id)
|
||||
end
|
||||
available_unit_numbers.push(scsi_id) if used_unit_numbers.grep(scsi_id).length <= 0
|
||||
end
|
||||
|
||||
available_unit_numbers.sort[0]
|
||||
available_unit_numbers.min
|
||||
end
|
||||
|
||||
# Finds a folder object by inventory path
|
||||
|
|
@ -692,17 +693,19 @@ module Vmpooler
|
|||
# Returns nil when the object found is not a folder
|
||||
pool_configuration = pool_config(pool_name)
|
||||
return nil if pool_configuration.nil?
|
||||
|
||||
folder = pool_configuration['folder']
|
||||
datacenter = get_target_datacenter_from_config(pool_name)
|
||||
return nil if datacenter.nil?
|
||||
|
||||
propSpecs = {
|
||||
:entity => self,
|
||||
:inventoryPath => "#{datacenter}/vm/#{folder}"
|
||||
entity: self,
|
||||
inventoryPath: "#{datacenter}/vm/#{folder}"
|
||||
}
|
||||
|
||||
folder_object = connection.searchIndex.FindByInventoryPath(propSpecs)
|
||||
return nil unless folder_object.class == RbVmomi::VIM::Folder
|
||||
|
||||
folder_object
|
||||
end
|
||||
|
||||
|
|
@ -752,6 +755,7 @@ module Vmpooler
|
|||
def cpu_utilization_for(host)
|
||||
cpu_usage = host.summary.quickStats.overallCpuUsage
|
||||
return nil if cpu_usage.nil?
|
||||
|
||||
cpu_size = host.summary.hardware.cpuMhz * host.summary.hardware.numCpuCores
|
||||
(cpu_usage.to_f / cpu_size.to_f) * 100
|
||||
end
|
||||
|
|
@ -759,6 +763,7 @@ module Vmpooler
|
|||
def memory_utilization_for(host)
|
||||
memory_usage = host.summary.quickStats.overallMemoryUsage
|
||||
return nil if memory_usage.nil?
|
||||
|
||||
memory_size = host.summary.hardware.memorySize / 1024 / 1024
|
||||
(memory_usage.to_f / memory_size.to_f) * 100
|
||||
end
|
||||
|
|
@ -769,13 +774,13 @@ module Vmpooler
|
|||
end
|
||||
|
||||
def build_compatible_hosts_lists(hosts, percentage)
|
||||
hosts_with_arch_versions = hosts.map { |h|
|
||||
hosts_with_arch_versions = hosts.map do |h|
|
||||
{
|
||||
'utilization' => h[0],
|
||||
'host_object' => h[1],
|
||||
'architecture' => get_host_cpu_arch_version(h[1])
|
||||
'utilization' => h[0],
|
||||
'host_object' => h[1],
|
||||
'architecture' => get_host_cpu_arch_version(h[1])
|
||||
}
|
||||
}
|
||||
end
|
||||
versions = hosts_with_arch_versions.map { |host| host['architecture'] }.uniq
|
||||
architectures = {}
|
||||
versions.each do |version|
|
||||
|
|
@ -796,12 +801,13 @@ module Vmpooler
|
|||
|
||||
def select_least_used_hosts(hosts, percentage)
|
||||
raise('Provided hosts list to select_least_used_hosts is empty') if hosts.empty?
|
||||
|
||||
average_utilization = get_average_cluster_utilization(hosts)
|
||||
least_used_hosts = []
|
||||
hosts.each do |host|
|
||||
least_used_hosts << host if host[0] <= average_utilization
|
||||
end
|
||||
hosts_to_select = (hosts.count * (percentage / 100.0)).to_int
|
||||
hosts_to_select = (hosts.count * (percentage / 100.0)).to_int
|
||||
hosts_to_select = hosts.count - 1 if percentage == 100
|
||||
least_used_hosts.sort[0..hosts_to_select].map { |host| host[1].name }
|
||||
end
|
||||
|
|
@ -811,8 +817,10 @@ module Vmpooler
|
|||
connection = ensured_vsphere_connection(pool_object)
|
||||
cluster_object = find_cluster(cluster, connection, datacentername)
|
||||
raise("Cluster #{cluster} cannot be found") if cluster_object.nil?
|
||||
|
||||
target_hosts = get_cluster_host_utilization(cluster_object)
|
||||
raise("there is no candidate in vcenter that meets all the required conditions, that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory'") if target_hosts.empty?
|
||||
|
||||
architectures = build_compatible_hosts_lists(target_hosts, percentage)
|
||||
least_used_hosts = select_least_used_hosts(target_hosts, percentage)
|
||||
{
|
||||
|
|
@ -825,6 +833,7 @@ module Vmpooler
|
|||
def find_host_by_dnsname(connection, dnsname)
|
||||
host_object = connection.searchIndex.FindByDnsName(dnsName: dnsname, vmSearch: false)
|
||||
return nil if host_object.nil?
|
||||
|
||||
host_object
|
||||
end
|
||||
|
||||
|
|
@ -832,7 +841,8 @@ module Vmpooler
|
|||
cluster_object = find_cluster(cluster, connection, datacentername)
|
||||
target_hosts = get_cluster_host_utilization(cluster_object)
|
||||
raise("There is no host candidate in vcenter that meets all the required conditions, check that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory'") if target_hosts.empty?
|
||||
least_used_host = target_hosts.sort[0][1]
|
||||
|
||||
least_used_host = target_hosts.min[1]
|
||||
least_used_host
|
||||
end
|
||||
|
||||
|
|
@ -848,7 +858,7 @@ module Vmpooler
|
|||
cv = connection.serviceContent.viewManager.CreateContainerView(
|
||||
container: datacenter.hostFolder,
|
||||
type: ['ComputeResource', 'ClusterComputeResource'],
|
||||
recursive: true,
|
||||
recursive: true
|
||||
)
|
||||
cluster = cv.view.find { |cluster_object| cluster_object.name == cluster }
|
||||
cv.DestroyView
|
||||
|
|
@ -870,7 +880,8 @@ module Vmpooler
|
|||
cluster = source_host.parent
|
||||
target_hosts = get_cluster_host_utilization(cluster, model)
|
||||
raise("There is no host candidate in vcenter that meets all the required conditions, check that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory'") if target_hosts.empty?
|
||||
target_host = target_hosts.sort[0][1]
|
||||
|
||||
target_host = target_hosts.min[1]
|
||||
[target_host, target_host.name]
|
||||
end
|
||||
|
||||
|
|
@ -891,13 +902,14 @@ module Vmpooler
|
|||
# Returns nil when a VM, or pool configuration, cannot be found
|
||||
pool_configuration = pool_config(pool_name)
|
||||
return nil if pool_configuration.nil?
|
||||
|
||||
folder = pool_configuration['folder']
|
||||
datacenter = get_target_datacenter_from_config(pool_name)
|
||||
return nil if datacenter.nil?
|
||||
|
||||
propSpecs = {
|
||||
:entity => self,
|
||||
:inventoryPath => "#{datacenter}/vm/#{folder}/#{vmname}"
|
||||
entity: self,
|
||||
inventoryPath: "#{datacenter}/vm/#{folder}/#{vmname}"
|
||||
}
|
||||
|
||||
connection.searchIndex.FindByInventoryPath(propSpecs)
|
||||
|
|
@ -929,8 +941,10 @@ module Vmpooler
|
|||
def get_vm_details(pool_name, vm_name, connection)
|
||||
vm_object = find_vm(pool_name, vm_name, connection)
|
||||
return nil if vm_object.nil?
|
||||
parent_host_object = vm_object.summary.runtime.host if vm_object.summary && vm_object.summary.runtime && vm_object.summary.runtime.host
|
||||
|
||||
parent_host_object = vm_object.summary.runtime.host if vm_object.summary&.runtime && vm_object.summary.runtime.host
|
||||
raise('Unable to determine which host the VM is running on') if parent_host_object.nil?
|
||||
|
||||
parent_host = parent_host_object.name
|
||||
architecture = get_host_cpu_arch_version(parent_host_object)
|
||||
{
|
||||
|
|
@ -944,6 +958,7 @@ module Vmpooler
|
|||
migration_limit = config[:config]['migration_limit']
|
||||
return false unless migration_limit.is_a? Integer
|
||||
return true if migration_limit > 0
|
||||
|
||||
false
|
||||
end
|
||||
|
||||
|
|
@ -969,9 +984,9 @@ module Vmpooler
|
|||
else
|
||||
logger.log('s', "[ ] [#{pool_name}] '#{vm_name}' is running on #{vm_hash['host_name']}")
|
||||
end
|
||||
rescue => _err
|
||||
rescue StandardError => _e
|
||||
logger.log('s', "[!] [#{pool_name}] '#{vm_name}' is running on #{vm_hash['host_name']}")
|
||||
raise _err
|
||||
raise _e
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
@ -1008,8 +1023,9 @@ module Vmpooler
|
|||
|
||||
def create_folder(connection, new_folder, datacenter)
|
||||
dc = connection.serviceInstance.find_datacenter(datacenter)
|
||||
folder_object = dc.vmFolder.traverse(new_folder, type=RbVmomi::VIM::Folder, create=true)
|
||||
folder_object = dc.vmFolder.traverse(new_folder, type = RbVmomi::VIM::Folder, create = true)
|
||||
raise("Cannot create folder #{new_folder}") if folder_object.nil?
|
||||
|
||||
folder_object
|
||||
end
|
||||
|
||||
|
|
@ -1018,8 +1034,8 @@ module Vmpooler
|
|||
raise('cannot find datacenter') if datacenter.nil?
|
||||
|
||||
propSpecs = {
|
||||
:entity => self,
|
||||
:inventoryPath => "#{datacenter}/vm/#{pool['template']}"
|
||||
entity: self,
|
||||
inventoryPath: "#{datacenter}/vm/#{pool['template']}"
|
||||
}
|
||||
|
||||
template_vm_object = connection.searchIndex.FindByInventoryPath(propSpecs)
|
||||
|
|
@ -1041,12 +1057,14 @@ module Vmpooler
|
|||
return false unless template.include?('/')
|
||||
return false if template[0] == '/'
|
||||
return false if template[-1] == '/'
|
||||
return true
|
||||
|
||||
true
|
||||
end
|
||||
|
||||
def get_disk_backing(pool)
|
||||
return :moveChildMostDiskBacking if linked_clone?(pool)
|
||||
return :moveAllDiskBackingsAndConsolidate
|
||||
|
||||
:moveAllDiskBackingsAndConsolidate
|
||||
end
|
||||
|
||||
def linked_clone?(pool)
|
||||
|
|
|
|||
|
|
@ -6,9 +6,7 @@ module Vmpooler
|
|||
attr_reader :server, :port, :prefix
|
||||
|
||||
def initialize(params = {})
|
||||
if params['server'].nil? || params['server'].empty?
|
||||
raise ArgumentError, "Statsd server is required. Config: #{params.inspect}"
|
||||
end
|
||||
raise ArgumentError, "Statsd server is required. Config: #{params.inspect}" if params['server'].nil? || params['server'].empty?
|
||||
|
||||
host = params['server']
|
||||
@port = params['port'] || 8125
|
||||
|
|
@ -18,20 +16,20 @@ module Vmpooler
|
|||
|
||||
def increment(label)
|
||||
server.increment(prefix + '.' + label)
|
||||
rescue => err
|
||||
$stderr.puts "Failure incrementing #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{err}"
|
||||
rescue StandardError => e
|
||||
warn "Failure incrementing #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}"
|
||||
end
|
||||
|
||||
def gauge(label, value)
|
||||
server.gauge(prefix + '.' + label, value)
|
||||
rescue => err
|
||||
$stderr.puts "Failure updating gauge #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{err}"
|
||||
rescue StandardError => e
|
||||
warn "Failure updating gauge #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}"
|
||||
end
|
||||
|
||||
def timing(label, duration)
|
||||
server.timing(prefix + '.' + label, duration)
|
||||
rescue => err
|
||||
$stderr.puts "Failure updating timing #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{err}"
|
||||
rescue StandardError => e
|
||||
warn "Failure updating timing #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue