Merge pull request #423 from puppetlabs/pooler-192

(POOLER-192) Use Rubocop 1.0
This commit is contained in:
John O'Connor 2020-10-26 20:34:27 +00:00 committed by GitHub
commit 96ceba1954
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 182 additions and 225 deletions

View file

@ -53,17 +53,13 @@ module Vmpooler
# Bail out if someone attempts to start vmpooler with dummy authentication
# without enbaling debug mode.
if parsed_config.key? :auth
if parsed_config[:auth]['provider'] == 'dummy'
unless ENV['VMPOOLER_DEBUG']
warning = [
'Dummy authentication should not be used outside of debug mode',
'please set environment variable VMPOOLER_DEBUG to \'true\' if you want to use dummy authentication'
]
if parsed_config.key?(:auth) && parsed_config[:auth]['provider'] == 'dummy' && !ENV['VMPOOLER_DEBUG']
warning = [
'Dummy authentication should not be used outside of debug mode',
'please set environment variable VMPOOLER_DEBUG to \'true\' if you want to use dummy authentication'
]
raise warning.join(";\s")
end
end
raise warning.join(";\s")
end
# Set some configuration defaults
@ -140,14 +136,14 @@ module Vmpooler
parsed_config[:pool_names] << pool['name']
pool['ready_ttl'] ||= parsed_config[:config]['ready_ttl']
if pool['alias']
if pool['alias'].is_a?(Array)
if pool['alias'].instance_of?(Array)
pool['alias'].each do |pool_alias|
parsed_config[:alias] ||= {}
parsed_config[:alias][pool_alias] = [pool['name']] unless parsed_config[:alias].key? pool_alias
parsed_config[:alias][pool_alias] << pool['name'] unless parsed_config[:alias][pool_alias].include? pool['name']
parsed_config[:pool_names] << pool_alias
end
elsif pool['alias'].is_a?(String)
elsif pool['alias'].instance_of?(String)
parsed_config[:alias][pool['alias']] = pool['name']
parsed_config[:pool_names] << pool['alias']
end

View file

@ -128,34 +128,32 @@ module Vmpooler
pools.each do |pool|
running = running_hash[pool['name']]
pool['major'] = Regexp.last_match[1] if pool['name'] =~ /^(\w+)\-/
pool['major'] = Regexp.last_match[1] if pool['name'] =~ /^(\w+)-/
result[pool['major']] ||= {}
result[pool['major']]['running'] = result[pool['major']]['running'].to_i + running.to_i
end
if params[:history]
if graph_url
begin
buffer = URI.parse(graph_link('.running.*&from=-1hour&format=json')).read
JSON.parse(buffer).each do |pool|
if pool['target'] =~ /.*\.(.*)$/
pool['name'] = Regexp.last_match[1]
pool['major'] = Regexp.last_match[1] if pool['name'] =~ /^(\w+)\-/
result[pool['major']]['history'] ||= []
if params[:history] && graph_url
begin
buffer = URI.parse(graph_link('.running.*&from=-1hour&format=json')).read
JSON.parse(buffer).each do |pool|
if pool['target'] =~ /.*\.(.*)$/
pool['name'] = Regexp.last_match[1]
pool['major'] = Regexp.last_match[1] if pool['name'] =~ /^(\w+)-/
result[pool['major']]['history'] ||= []
for i in 0..pool['datapoints'].length
if pool['datapoints'][i] && pool['datapoints'][i][0]
pool['last'] = pool['datapoints'][i][0]
result[pool['major']]['history'][i] ||= 0
result[pool['major']]['history'][i] = result[pool['major']]['history'][i].to_i + pool['datapoints'][i][0].to_i
else
result[pool['major']]['history'][i] = result[pool['major']]['history'][i].to_i + pool['last'].to_i
end
for i in 0..pool['datapoints'].length
if pool['datapoints'][i] && pool['datapoints'][i][0]
pool['last'] = pool['datapoints'][i][0]
result[pool['major']]['history'][i] ||= 0
result[pool['major']]['history'][i] = result[pool['major']]['history'][i].to_i + pool['datapoints'][i][0].to_i
else
result[pool['major']]['history'][i] = result[pool['major']]['history'][i].to_i + pool['last'].to_i
end
end
end
rescue StandardError
end
rescue StandardError
end
end
JSON.pretty_generate(result)

View file

@ -13,12 +13,12 @@ module Vmpooler
def valid_token?(backend)
return false unless has_token?
backend.exists?('vmpooler__token__' + request.env['HTTP_X_AUTH_TOKEN']) ? true : false
backend.exists?("vmpooler__token__#{request.env['HTTP_X_AUTH_TOKEN']}") ? true : false
end
def validate_token(backend)
if valid_token?(backend)
backend.hset('vmpooler__token__' + request.env['HTTP_X_AUTH_TOKEN'], 'last', Time.now)
backend.hset("vmpooler__token__#{request.env['HTTP_X_AUTH_TOKEN']}last#{Time.now}")
return true
end
@ -118,8 +118,8 @@ module Vmpooler
tags.each_pair do |tag, value|
next if value.nil? or value.empty?
backend.hset('vmpooler__vm__' + hostname, 'tag:' + tag, value)
backend.hset('vmpooler__tag__' + Date.today.to_s, hostname + ':' + tag, value)
backend.hset("vmpooler__vm__#{hostname}", "tag:#{tag}", value)
backend.hset("vmpooler__tag__#{Date.today}", "#{hostname}:#{tag}", value)
end
end
end
@ -147,7 +147,7 @@ module Vmpooler
def hostname_shorten(hostname, domain=nil)
if domain && hostname =~ /^[\w-]+\.#{domain}$/
hostname = hostname[/[^\.]+/]
hostname = hostname[/[^.]+/]
end
hostname
@ -253,17 +253,15 @@ module Vmpooler
tags = {}
backend.hgetall('vmpooler__tag__' + date_str).each do |key, value|
backend.hgetall("vmpooler__tag__#{date_str}").each do |key, value|
hostname = 'unknown'
tag = 'unknown'
if key =~ /\:/
if key =~ /:/
hostname, tag = key.split(':', 2)
end
if opts[:only]
next unless tag == opts[:only]
end
next if opts[:only] && tag != opts[:only]
tags[tag] ||= {}
tags[tag][value] ||= 0
@ -321,7 +319,7 @@ module Vmpooler
}
}
task[:count][:total] = backend.hlen('vmpooler__' + task_str + '__' + date_str).to_i
task[:count][:total] = backend.hlen("vmpooler__#{task_str}__#{date_str}").to_i
if task[:count][:total] > 0
if opts[:bypool] == true
@ -330,11 +328,11 @@ module Vmpooler
task[:count][:pool] = {}
task[:duration][:pool] = {}
backend.hgetall('vmpooler__' + task_str + '__' + date_str).each do |key, value|
backend.hgetall("vmpooler__#{task_str}__#{date_str}").each do |key, value|
pool = 'unknown'
hostname = 'unknown'
if key =~ /\:/
if key =~ /:/
pool, hostname = key.split(':')
else
hostname = key

View file

@ -174,13 +174,13 @@ module Vmpooler
if Vmpooler::API.settings.config[:auth] and has_token?
validate_token(backend)
backend.hset('vmpooler__vm__' + vm, 'token:token', request.env['HTTP_X_AUTH_TOKEN'])
backend.hset('vmpooler__vm__' + vm, 'token:user',
backend.hget('vmpooler__token__' + request.env['HTTP_X_AUTH_TOKEN'], 'user')
backend.hset("vmpooler__vm__#{vm}", 'token:token', request.env['HTTP_X_AUTH_TOKEN'])
backend.hset("vmpooler__vm__#{vm}", 'token:user',
backend.hget("vmpooler__token__#{request.env['HTTP_X_AUTH_TOKEN']}", 'user')
)
if config['vm_lifetime_auth'].to_i > 0
backend.hset('vmpooler__vm__' + vm, 'lifetime', config['vm_lifetime_auth'].to_i)
backend.hset("vmpooler__vm__#{vm}", 'lifetime', config['vm_lifetime_auth'].to_i)
end
end
end
@ -205,11 +205,11 @@ module Vmpooler
vmname, vmpool, vmtemplate = fetch_single_vm(requested)
if !vmname
failed = true
metrics.increment('checkout.empty.' + requested)
metrics.increment("checkout.empty.#{requested}")
break
else
vms << [vmpool, vmname, vmtemplate]
metrics.increment('checkout.success.' + vmtemplate)
metrics.increment("checkout.success.#{vmtemplate}")
end
end
end
@ -337,7 +337,7 @@ module Vmpooler
payload&.each do |poolname, count|
next unless count.to_i > config['max_ondemand_instances_per_request']
metrics.increment('ondemandrequest_fail.toomanyrequests.' + poolname)
metrics.increment("ondemandrequest_fail.toomanyrequests.#{poolname}")
return true
end
false
@ -380,7 +380,7 @@ module Vmpooler
if Vmpooler::API.settings.config[:auth] and has_token?
backend.hset("vmpooler__odrequest__#{request_id}", 'token:token', request.env['HTTP_X_AUTH_TOKEN'])
backend.hset("vmpooler__odrequest__#{request_id}", 'token:user',
backend.hget('vmpooler__token__' + request.env['HTTP_X_AUTH_TOKEN'], 'user'))
backend.hget("vmpooler__token__#{request.env['HTTP_X_AUTH_TOKEN']}", 'user'))
end
result['domain'] = config['domain'] if config['domain']
@ -542,9 +542,9 @@ module Vmpooler
if subpool.include?(p['name'])
true
elsif !p['alias'].nil?
if p['alias'].is_a?(Array)
if p['alias'].instance_of?(Array)
(p['alias'] & subpool).any?
elsif p['alias'].is_a?(String)
elsif p['alias'].instance_of?(String)
subpool.include?(p['alias'])
end
end
@ -727,14 +727,14 @@ module Vmpooler
result = { 'ok' => false }
if Vmpooler::API.settings.config[:auth]
token = backend.hgetall('vmpooler__token__' + params[:token])
token = backend.hgetall("vmpooler__token__#{params[:token]}")
if not token.nil? and not token.empty?
status 200
pools.each do |pool|
backend.smembers('vmpooler__running__' + pool['name']).each do |vm|
if backend.hget('vmpooler__vm__' + vm, 'token:token') == params[:token]
backend.smembers("vmpooler__running__#{pool['name']}").each do |vm|
if backend.hget("vmpooler__vm__#{vm}", 'token:token') == params[:token]
token['vms'] ||= {}
token['vms']['running'] ||= []
token['vms']['running'].push(vm)
@ -760,7 +760,7 @@ module Vmpooler
need_auth!
if backend.del('vmpooler__token__' + params[:token]).to_i > 0
if backend.del("vmpooler__token__#{params[:token]}").to_i > 0
status 200
result['ok'] = true
end
@ -783,8 +783,8 @@ module Vmpooler
o = [('a'..'z'), ('0'..'9')].map(&:to_a).flatten
result['token'] = o[rand(25)] + (0...31).map { o[rand(o.length)] }.join
backend.hset('vmpooler__token__' + result['token'], 'user', @auth.username)
backend.hset('vmpooler__token__' + result['token'], 'created', Time.now)
backend.hset("vmpooler__token__#{result['token']}", 'user', @auth.username)
backend.hset("vmpooler__token__#{result['token']}", 'created', Time.now)
status 200
result['ok'] = true
@ -823,7 +823,7 @@ module Vmpooler
else
result[:bad_templates] = invalid
invalid.each do |bad_template|
metrics.increment('ondemandrequest_fail.invalid.' + bad_template)
metrics.increment("ondemandrequest_fail.invalid.#{bad_template}")
end
status 404
end
@ -858,7 +858,7 @@ module Vmpooler
else
result[:bad_templates] = invalid
invalid.each do |bad_template|
metrics.increment('ondemandrequest_fail.invalid.' + bad_template)
metrics.increment("ondemandrequest_fail.invalid.#{bad_template}")
end
status 404
end
@ -904,7 +904,7 @@ module Vmpooler
result = atomically_allocate_vms(payload)
else
invalid.each do |bad_template|
metrics.increment('checkout.invalid.' + bad_template)
metrics.increment("checkout.invalid.#{bad_template}")
end
status 404
end
@ -980,7 +980,8 @@ module Vmpooler
result['ok'] = true
status 202
if request_hash['status'] == 'ready'
case request_hash['status']
when 'ready'
result['ready'] = true
Parsing.get_platform_pool_count(request_hash['requested']) do |platform_alias, pool, _count|
instances = backend.smembers("vmpooler__#{request_id}__#{platform_alias}__#{pool}")
@ -993,10 +994,10 @@ module Vmpooler
end
result['domain'] = config['domain'] if config['domain']
status 200
elsif request_hash['status'] == 'failed'
when 'failed'
result['message'] = "The request failed to provision instances within the configured ondemand_request_ttl '#{config['ondemand_request_ttl']}'"
status 200
elsif request_hash['status'] == 'deleted'
when 'deleted'
result['message'] = 'The request has been deleted'
status 200
else
@ -1059,7 +1060,7 @@ module Vmpooler
result = atomically_allocate_vms(payload)
else
invalid.each do |bad_template|
metrics.increment('checkout.invalid.' + bad_template)
metrics.increment("checkout.invalid.#{bad_template}")
end
status 404
end
@ -1082,7 +1083,7 @@ module Vmpooler
params[:hostname] = hostname_shorten(params[:hostname], config['domain'])
rdata = backend.hgetall('vmpooler__vm__' + params[:hostname])
rdata = backend.hgetall("vmpooler__vm__#{params[:hostname]}")
unless rdata.empty?
status 200
result['ok'] = true
@ -1155,12 +1156,12 @@ module Vmpooler
params[:hostname] = hostname_shorten(params[:hostname], config['domain'])
rdata = backend.hgetall('vmpooler__vm__' + params[:hostname])
rdata = backend.hgetall("vmpooler__vm__#{params[:hostname]}")
unless rdata.empty?
need_token! if rdata['token:token']
if backend.srem('vmpooler__running__' + rdata['template'], params[:hostname])
backend.sadd('vmpooler__completed__' + rdata['template'], params[:hostname])
if backend.srem("vmpooler__running__#{rdata['template']}", params[:hostname])
backend.sadd("vmpooler__completed__#{rdata['template']}", params[:hostname])
status 200
result['ok'] = true
@ -1184,7 +1185,7 @@ module Vmpooler
params[:hostname] = hostname_shorten(params[:hostname], config['domain'])
if backend.exists?('vmpooler__vm__' + params[:hostname])
if backend.exists?("vmpooler__vm__#{params[:hostname]}")
begin
jdata = JSON.parse(request.body.read)
rescue StandardError
@ -1212,13 +1213,8 @@ module Vmpooler
end
when 'tags'
unless arg.is_a?(Hash)
failure.push("You provided tags (#{arg}) as something other than a hash.")
end
if config['allowed_tags']
failure.push("You provided unsuppored tags (#{arg}).") if not (arg.keys - config['allowed_tags']).empty?
end
failure.push("You provided tags (#{arg}) as something other than a hash.") unless arg.is_a?(Hash)
failure.push("You provided unsuppored tags (#{arg}).") if config['allowed_tags'] && !(arg.keys - config['allowed_tags']).empty?
else
failure.push("Unknown argument #{arg}.")
end
@ -1235,7 +1231,7 @@ module Vmpooler
arg = arg.to_i
backend.hset('vmpooler__vm__' + params[:hostname], param, arg)
backend.hset("vmpooler__vm__#{params[:hostname]}", param, arg)
when 'tags'
filter_tags(arg)
export_tags(backend, params[:hostname], arg)
@ -1261,11 +1257,11 @@ module Vmpooler
params[:hostname] = hostname_shorten(params[:hostname], config['domain'])
if ((params[:size].to_i > 0 )and (backend.exists?('vmpooler__vm__' + params[:hostname])))
if ((params[:size].to_i > 0 )and (backend.exists?("vmpooler__vm__#{params[:hostname]}")))
result[params[:hostname]] = {}
result[params[:hostname]]['disk'] = "+#{params[:size]}gb"
backend.sadd('vmpooler__tasks__disk', params[:hostname] + ':' + params[:size])
backend.sadd('vmpooler__tasks__disk', "#{params[:hostname]}:#{params[:size]}")
status 202
result['ok'] = true
@ -1285,13 +1281,13 @@ module Vmpooler
params[:hostname] = hostname_shorten(params[:hostname], config['domain'])
if backend.exists?('vmpooler__vm__' + params[:hostname])
if backend.exists?("vmpooler__vm__#{params[:hostname]}")
result[params[:hostname]] = {}
o = [('a'..'z'), ('0'..'9')].map(&:to_a).flatten
result[params[:hostname]]['snapshot'] = o[rand(25)] + (0...31).map { o[rand(o.length)] }.join
backend.sadd('vmpooler__tasks__snapshot', params[:hostname] + ':' + result[params[:hostname]]['snapshot'])
backend.sadd('vmpooler__tasks__snapshot', "#{params[:hostname]}:#{result[params[:hostname]]['snapshot']}")
status 202
result['ok'] = true
@ -1311,8 +1307,8 @@ module Vmpooler
params[:hostname] = hostname_shorten(params[:hostname], config['domain'])
unless backend.hget('vmpooler__vm__' + params[:hostname], 'snapshot:' + params[:snapshot]).to_i.zero?
backend.sadd('vmpooler__tasks__snapshot-revert', params[:hostname] + ':' + params[:snapshot])
unless backend.hget("vmpooler__vm__#{params[:hostname]}", "snapshot:#{params[:snapshot]}").to_i.zero?
backend.sadd('vmpooler__tasks__snapshot-revert', "#{params[:hostname]}:#{params[:snapshot]}")
status 202
result['ok'] = true

View file

@ -5,9 +5,6 @@ module Vmpooler
class DummyStatsd < Metrics
attr_reader :server, :port, :prefix
def initialize(*)
end
def increment(*)
true
end

View file

@ -7,6 +7,7 @@ module Vmpooler
class Graphite < Metrics
attr_reader :server, :port, :prefix
# rubocop:disable Lint/MissingSuper
def initialize(logger, params = {})
raise ArgumentError, "Graphite server is required. Config: #{params.inspect}" if params['server'].nil? || params['server'].empty?
@ -15,6 +16,7 @@ module Vmpooler
@prefix = params['prefix'] || 'vmpooler'
@logger = logger
end
# rubocop:enable Lint/MissingSuper
def increment(label)
log label, 1

View file

@ -23,6 +23,7 @@ module Vmpooler
@p_metrics = {}
@torun = []
# rubocop:disable Lint/MissingSuper
def initialize(logger, params = {})
@prefix = params['prefix'] || 'vmpooler'
@prometheus_prefix = params['prometheus_prefix'] || 'vmpooler'
@ -32,6 +33,7 @@ module Vmpooler
# Setup up prometheus registry and data structures
@prometheus = Prometheus::Client.registry
end
# rubocop:enable Lint/MissingSuper
=begin # rubocop:disable Style/BlockComments
The Metrics table is used to register metrics and translate/interpret the incoming metrics.

View file

@ -8,6 +8,7 @@ module Vmpooler
class Statsd < Metrics
attr_reader :server, :port, :prefix
# rubocop:disable Lint/MissingSuper
def initialize(logger, params = {})
raise ArgumentError, "Statsd server is required. Config: #{params.inspect}" if params['server'].nil? || params['server'].empty?
@ -17,21 +18,22 @@ module Vmpooler
@server = ::Statsd.new(host, @port)
@logger = logger
end
# rubocop:enable Lint/MissingSuper
def increment(label)
server.increment(prefix + '.' + label)
server.increment("#{prefix}.#{label}")
rescue StandardError => e
@logger.log('s', "[!] Failure incrementing #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}")
end
def gauge(label, value)
server.gauge(prefix + '.' + label, value)
server.gauge("#{prefix}.#{label}", value)
rescue StandardError => e
@logger.log('s', "[!] Failure updating gauge #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}")
end
def timing(label, duration)
server.timing(prefix + '.' + label, duration)
server.timing("#{prefix}.#{label}", duration)
rescue StandardError => e
@logger.log('s', "[!] Failure updating timing #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}")
end

View file

@ -108,7 +108,7 @@ module Vmpooler
$logger.log('d', "[!] [#{pool}] '#{vm}' no longer exists. Removing from pending.")
end
def fail_pending_vm(vm, pool, timeout, redis, exists = true)
def fail_pending_vm(vm, pool, timeout, redis, exists: true)
clone_stamp = redis.hget("vmpooler__vm__#{vm}", 'clone')
time_since_clone = (Time.now - Time.parse(clone_stamp)) / 60
@ -117,7 +117,7 @@ module Vmpooler
request_id = redis.hget("vmpooler__vm__#{vm}", 'request_id')
pool_alias = redis.hget("vmpooler__vm__#{vm}", 'pool_alias') if request_id
redis.multi
redis.smove('vmpooler__pending__' + pool, 'vmpooler__completed__' + pool, vm)
redis.smove("vmpooler__pending__#{pool}", "vmpooler__completed__#{pool}", vm)
redis.zadd('vmpooler__odcreate__task', 1, "#{pool_alias}:#{pool}:1:#{request_id}") if request_id
redis.exec
$metrics.increment("errors.markedasfailed.#{pool}")
@ -133,15 +133,16 @@ module Vmpooler
end
def move_pending_vm_to_ready(vm, pool, redis, request_id = nil)
clone_time = redis.hget('vmpooler__vm__' + vm, 'clone')
clone_time = redis.hget("vmpooler__vm__#{vm}", 'clone')
finish = format('%<time>.2f', time: Time.now - Time.parse(clone_time))
if request_id
ondemandrequest_hash = redis.hgetall("vmpooler__odrequest__#{request_id}")
if ondemandrequest_hash['status'] == 'failed'
case ondemandrequest_hash['status']
when 'failed'
move_vm_queue(pool, vm, 'pending', 'completed', redis, "moved to completed queue. '#{request_id}' could not be filled in time")
return nil
elsif ondemandrequest_hash['status'] == 'deleted'
when 'deleted'
move_vm_queue(pool, vm, 'pending', 'completed', redis, "moved to completed queue. '#{request_id}' has been deleted")
return nil
end
@ -160,11 +161,11 @@ module Vmpooler
move_vm_queue(pool, vm, 'pending', 'running', redis)
check_ondemand_request_ready(request_id, redis)
else
redis.smove('vmpooler__pending__' + pool, 'vmpooler__ready__' + pool, vm)
redis.smove("vmpooler__pending__#{pool}", "vmpooler__ready__#{pool}", vm)
end
redis.pipelined do
redis.hset('vmpooler__boot__' + Date.today.to_s, pool + ':' + vm, finish) # maybe remove as this is never used by vmpooler itself?
redis.hset("vmpooler__boot__#{Date.today}", "#{pool}:#{vm}", finish) # maybe remove as this is never used by vmpooler itself?
redis.hset("vmpooler__vm__#{vm}", 'ready', Time.now)
# last boot time is displayed in API, and used by alarming script
@ -203,11 +204,11 @@ module Vmpooler
mutex.synchronize do
@redis.with_metrics do |redis|
check_stamp = redis.hget('vmpooler__vm__' + vm, 'check')
check_stamp = redis.hget("vmpooler__vm__#{vm}", 'check')
last_checked_too_soon = ((Time.now - Time.parse(check_stamp)).to_i < $config[:config]['vm_checktime'] * 60) if check_stamp
break if check_stamp && last_checked_too_soon
redis.hset('vmpooler__vm__' + vm, 'check', Time.now)
redis.hset("vmpooler__vm__#{vm}", 'check', Time.now)
# Check if the hosts TTL has expired
# if 'boottime' is nil, set bootime to beginning of unix epoch, forces TTL to be assumed expired
boottime = redis.hget("vmpooler__vm__#{vm}", 'ready')
@ -217,7 +218,7 @@ module Vmpooler
boottime = Time.at(0)
end
if (Time.now - boottime).to_i > ttl * 60
redis.smove('vmpooler__ready__' + pool_name, 'vmpooler__completed__' + pool_name, vm)
redis.smove("vmpooler__ready__#{pool_name}", "vmpooler__completed__#{pool_name}", vm)
$logger.log('d', "[!] [#{pool_name}] '#{vm}' reached end of TTL after #{ttl} minutes, removed from 'ready' queue")
return nil
@ -256,7 +257,7 @@ module Vmpooler
return if hostname.empty?
return if hostname == vm
redis.smove('vmpooler__ready__' + pool_name, 'vmpooler__completed__' + pool_name, vm)
redis.smove("vmpooler__ready__#{pool_name}", "vmpooler__completed__#{pool_name}", vm)
$logger.log('d', "[!] [#{pool_name}] '#{vm}' has mismatched hostname #{hostname}, removed from 'ready' queue")
true
end
@ -280,7 +281,7 @@ module Vmpooler
catch :stop_checking do
@redis.with_metrics do |redis|
# Check that VM is within defined lifetime
checkouttime = redis.hget('vmpooler__active__' + pool, vm)
checkouttime = redis.hget("vmpooler__active__#{pool}", vm)
if checkouttime
time_since_checkout = Time.now - Time.parse(checkouttime)
running = time_since_checkout / 60 / 60
@ -341,7 +342,7 @@ module Vmpooler
adjective = @name_generator.adjective(max: 14 - noun.length)
random_name = [adjective, noun].join('-')
hostname = $config[:config]['prefix'] + random_name
available = redis.hlen('vmpooler__vm__' + hostname) == 0
available = redis.hlen("vmpooler__vm__#{hostname}") == 0
[hostname, available]
end
@ -395,12 +396,12 @@ module Vmpooler
@redis.with_metrics do |redis|
# Add VM to Redis inventory ('pending' pool)
redis.multi
redis.sadd('vmpooler__pending__' + pool_name, new_vmname)
redis.hset('vmpooler__vm__' + new_vmname, 'clone', Time.now)
redis.hset('vmpooler__vm__' + new_vmname, 'template', pool_name) # This value is used to represent the pool.
redis.hset('vmpooler__vm__' + new_vmname, 'pool', pool_name)
redis.hset('vmpooler__vm__' + new_vmname, 'request_id', request_id) if request_id
redis.hset('vmpooler__vm__' + new_vmname, 'pool_alias', pool_alias) if pool_alias
redis.sadd("vmpooler__pending__#{pool_name}", new_vmname)
redis.hset("vmpooler__vm__#{new_vmname}", 'clone', Time.now)
redis.hset("vmpooler__vm__#{new_vmname}", 'template', pool_name) # This value is used to represent the pool.
redis.hset("vmpooler__vm__#{new_vmname}", 'pool', pool_name)
redis.hset("vmpooler__vm__#{new_vmname}", 'request_id', request_id) if request_id
redis.hset("vmpooler__vm__#{new_vmname}", 'pool_alias', pool_alias) if pool_alias
redis.exec
end
@ -412,8 +413,8 @@ module Vmpooler
@redis.with_metrics do |redis|
redis.pipelined do
redis.hset('vmpooler__clone__' + Date.today.to_s, pool_name + ':' + new_vmname, finish)
redis.hset('vmpooler__vm__' + new_vmname, 'clone_time', finish)
redis.hset("vmpooler__clone__#{Date.today}", "#{pool_name}:#{new_vmname}", finish)
redis.hset("vmpooler__vm__#{new_vmname}", 'clone_time', finish)
end
end
$logger.log('s', "[+] [#{pool_name}] '#{new_vmname}' cloned in #{finish} seconds")
@ -456,18 +457,18 @@ module Vmpooler
mutex.synchronize do
@redis.with_metrics do |redis|
redis.pipelined do
redis.hdel('vmpooler__active__' + pool, vm)
redis.hset('vmpooler__vm__' + vm, 'destroy', Time.now)
redis.hdel("vmpooler__active__#{pool}", vm)
redis.hset("vmpooler__vm__#{vm}", 'destroy', Time.now)
# Auto-expire metadata key
redis.expire('vmpooler__vm__' + vm, ($config[:redis]['data_ttl'].to_i * 60 * 60))
redis.expire("vmpooler__vm__#{vm}", ($config[:redis]['data_ttl'].to_i * 60 * 60))
end
start = Time.now
provider.destroy_vm(pool, vm)
redis.srem('vmpooler__completed__' + pool, vm)
redis.srem("vmpooler__completed__#{pool}", vm)
finish = format('%<time>.2f', time: Time.now - start)
$logger.log('s', "[-] [#{pool}] '#{vm}' destroyed in #{finish} seconds")
@ -603,10 +604,10 @@ module Vmpooler
if result
@redis.with_metrics do |redis|
rdisks = redis.hget('vmpooler__vm__' + vm_name, 'disk')
rdisks = redis.hget("vmpooler__vm__#{vm_name}", 'disk')
disks = rdisks ? rdisks.split(':') : []
disks.push("+#{disk_size}gb")
redis.hset('vmpooler__vm__' + vm_name, 'disk', disks.join(':'))
redis.hset("vmpooler__vm__#{vm_name}", 'disk', disks.join(':'))
end
$logger.log('s', "[+] [disk_manager] '#{vm_name}' attached #{disk_size}gb disk in #{finish} seconds")
@ -638,7 +639,7 @@ module Vmpooler
if result
@redis.with_metrics do |redis|
redis.hset('vmpooler__vm__' + vm_name, 'snapshot:' + snapshot_name, Time.now.to_s)
redis.hset("vmpooler__vm__#{vm_name}", "snapshot:#{snapshot_name}", Time.now.to_s)
end
$logger.log('s', "[+] [snapshot_manager] '#{vm_name}' snapshot created in #{finish} seconds")
else
@ -697,7 +698,7 @@ module Vmpooler
def get_pool_name_for_vm(vm_name, redis)
# the 'template' is a bad name. Should really be 'poolname'
redis.hget('vmpooler__vm__' + vm_name, 'template')
redis.hget("vmpooler__vm__#{vm_name}", 'template')
end
# @param pool_name [String] - the name of the pool
@ -867,16 +868,12 @@ module Vmpooler
if options[:clone_target_change]
clone_target = redis.hget('vmpooler__config__clone_target}', options[:poolname])
if clone_target
break unless clone_target == initial_clone_target
end
break if clone_target && clone_target != initial_clone_target
end
if options[:pool_template_change]
configured_template = redis.hget('vmpooler__config__template', options[:poolname])
if configured_template
break unless initial_template == configured_template
end
break if configured_template && initial_template != configured_template
end
if options[:pool_reset]
@ -970,21 +967,17 @@ module Vmpooler
def sync_pool_template(pool)
@redis.with_metrics do |redis|
pool_template = redis.hget('vmpooler__config__template', pool['name'])
if pool_template
pool['template'] = pool_template unless pool['template'] == pool_template
end
pool['template'] = pool_template if pool_template && pool['template'] != pool_template
end
end
def prepare_template(pool, provider, redis)
if $config[:config]['create_template_delta_disks']
unless redis.sismember('vmpooler__template__deltas', pool['template'])
begin
provider.create_template_delta_disks(pool)
redis.sadd('vmpooler__template__deltas', pool['template'])
rescue StandardError => e
$logger.log('s', "[!] [#{pool['name']}] failed while preparing a template with an error. As a result vmpooler could not create the template delta disks. Either a template delta disk already exists, or the template delta disk creation failed. The error is: #{e}")
end
if $config[:config]['create_template_delta_disks'] && !redis.sismember('vmpooler__template__deltas', pool['template'])
begin
provider.create_template_delta_disks(pool)
redis.sadd('vmpooler__template__deltas', pool['template'])
rescue StandardError => e
$logger.log('s', "[!] [#{pool['name']}] failed while preparing a template with an error. As a result vmpooler could not create the template delta disks. Either a template delta disk already exists, or the template delta disk creation failed. The error is: #{e}")
end
end
redis.hset('vmpooler__template__prepared', pool['name'], pool['template'])
@ -1135,15 +1128,15 @@ module Vmpooler
mutex.synchronize do
@redis.with_metrics do |redis|
provider.vms_in_pool(pool['name']).each do |vm|
if !redis.sismember('vmpooler__running__' + pool['name'], vm['name']) &&
!redis.sismember('vmpooler__ready__' + pool['name'], vm['name']) &&
!redis.sismember('vmpooler__pending__' + pool['name'], vm['name']) &&
!redis.sismember('vmpooler__completed__' + pool['name'], vm['name']) &&
!redis.sismember('vmpooler__discovered__' + pool['name'], vm['name']) &&
!redis.sismember('vmpooler__migrating__' + pool['name'], vm['name'])
if !redis.sismember("vmpooler__running__#{pool['name']}", vm['name']) &&
!redis.sismember("vmpooler__ready__#{pool['name']}", vm['name']) &&
!redis.sismember("vmpooler__pending__#{pool['name']}", vm['name']) &&
!redis.sismember("vmpooler__completed__#{pool['name']}", vm['name']) &&
!redis.sismember("vmpooler__discovered__#{pool['name']}", vm['name']) &&
!redis.sismember("vmpooler__migrating__#{pool['name']}", vm['name'])
pool_check_response[:discovered_vms] += 1
redis.sadd('vmpooler__discovered__' + pool['name'], vm['name'])
redis.sadd("vmpooler__discovered__#{pool['name']}", vm['name'])
$logger.log('s', "[?] [#{pool['name']}] '#{vm['name']}' added to 'discovered' queue")
end
@ -1164,7 +1157,7 @@ module Vmpooler
redis.smembers("vmpooler__running__#{pool_name}").each do |vm|
if inventory[vm]
begin
vm_lifetime = redis.hget('vmpooler__vm__' + vm, 'lifetime') || $config[:config]['vm_lifetime'] || 12
vm_lifetime = redis.hget("vmpooler__vm__#{vm}", 'lifetime') || $config[:config]['vm_lifetime'] || 12
pool_check_response[:checked_running_vms] += 1
check_running_vm(vm, pool_name, vm_lifetime, provider)
rescue StandardError => e
@ -1206,7 +1199,7 @@ module Vmpooler
$logger.log('d', "[!] [#{pool_name}] _check_pool failed with an error while evaluating pending VMs: #{e}")
end
else
fail_pending_vm(vm, pool_name, pool_timeout, redis, false)
fail_pending_vm(vm, pool_name, pool_timeout, redis, exists: false)
end
end
end

View file

@ -79,7 +79,7 @@ module Vmpooler
return current_vm['vm_host'] if rand(1..100) > provider_config['migratevm_couldmove_percent']
# Simulate a 10 node cluster and randomly pick a different one
new_host = 'HOST' + rand(1..10).to_s while new_host == current_vm['vm_host']
new_host = "HOST#{rand(1..10)}" while new_host == current_vm['vm_host']
new_host
end
@ -95,9 +95,7 @@ module Vmpooler
end
# Inject clone failure
unless provider_config['migratevm_fail_percent'].nil?
raise('Dummy Failure for migratevm_fail_percent') if rand(1..100) <= provider_config['migratevm_fail_percent']
end
raise('Dummy Failure for migratevm_fail_percent') if !provider_config['migratevm_fail_percent'].nil? && rand(1..100) <= provider_config['migratevm_fail_percent']
@write_lock.synchronize do
current_vm = get_dummy_vm(pool_name, vm_name)
@ -116,27 +114,23 @@ module Vmpooler
return nil if dummy.nil?
# Randomly power off the VM
unless dummy['powerstate'] != 'PoweredOn' || provider_config['getvm_poweroff_percent'].nil?
if rand(1..100) <= provider_config['getvm_poweroff_percent']
@write_lock.synchronize do
dummy = get_dummy_vm(pool_name, vm_name)
dummy['powerstate'] = 'PoweredOff'
write_backing_file
end
logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy Powered Off")
if !(dummy['powerstate'] != 'PoweredOn' || provider_config['getvm_poweroff_percent'].nil?) && rand(1..100) <= provider_config['getvm_poweroff_percent']
@write_lock.synchronize do
dummy = get_dummy_vm(pool_name, vm_name)
dummy['powerstate'] = 'PoweredOff'
write_backing_file
end
logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy Powered Off")
end
# Randomly rename the host
unless dummy['hostname'] != dummy['name'] || provider_config['getvm_rename_percent'].nil?
if rand(1..100) <= provider_config['getvm_rename_percent']
@write_lock.synchronize do
dummy = get_dummy_vm(pool_name, vm_name)
dummy['hostname'] = 'DUMMY' + dummy['name']
write_backing_file
end
logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy renamed")
if !(dummy['hostname'] != dummy['name'] || provider_config['getvm_rename_percent'].nil?) && rand(1..100) <= provider_config['getvm_rename_percent']
@write_lock.synchronize do
dummy = get_dummy_vm(pool_name, vm_name)
dummy['hostname'] = "DUMMY#{dummy['name']}"
write_backing_file
end
logger.log('d', "[ ] [#{dummy['poolname']}] '#{dummy['name']}' is being Dummy renamed")
end
obj['name'] = dummy['name']
@ -196,9 +190,7 @@ module Vmpooler
begin
# Inject clone failure
unless provider_config['createvm_fail_percent'].nil?
raise('Dummy Failure for createvm_fail_percent') if rand(1..100) <= provider_config['createvm_fail_percent']
end
raise('Dummy Failure for createvm_fail_percent') if !provider_config['createvm_fail_percent'].nil? && rand(1..100) <= provider_config['createvm_fail_percent']
# Assert the VM is ready for use
@write_lock.synchronize do
@ -229,9 +221,7 @@ module Vmpooler
end
# Inject create failure
unless provider_config['createdisk_fail_percent'].nil?
raise('Dummy Failure for createdisk_fail_percent') if rand(1..100) <= provider_config['createdisk_fail_percent']
end
raise('Dummy Failure for createdisk_fail_percent') if !provider_config['createdisk_fail_percent'].nil? && rand(1..100) <= provider_config['createdisk_fail_percent']
@write_lock.synchronize do
vm_object = get_dummy_vm(pool_name, vm_name)
@ -255,9 +245,7 @@ module Vmpooler
end
# Inject create failure
unless provider_config['createsnapshot_fail_percent'].nil?
raise('Dummy Failure for createsnapshot_fail_percent') if rand(1..100) <= provider_config['createsnapshot_fail_percent']
end
raise('Dummy Failure for createsnapshot_fail_percent') if !provider_config['createsnapshot_fail_percent'].nil? && rand(1..100) <= provider_config['createsnapshot_fail_percent']
@write_lock.synchronize do
vm_object = get_dummy_vm(pool_name, vm_name)
@ -282,9 +270,7 @@ module Vmpooler
end
# Inject create failure
unless provider_config['revertsnapshot_fail_percent'].nil?
raise('Dummy Failure for revertsnapshot_fail_percent') if rand(1..100) <= provider_config['revertsnapshot_fail_percent']
end
raise('Dummy Failure for revertsnapshot_fail_percent') if !provider_config['revertsnapshot_fail_percent'].nil? && rand(1..100) <= provider_config['revertsnapshot_fail_percent']
end
vm_object['snapshots'].include?(snapshot_name)
@ -320,9 +306,7 @@ module Vmpooler
end
# Inject destroy VM failure
unless provider_config['destroyvm_fail_percent'].nil?
raise('Dummy Failure for migratevm_fail_percent') if rand(1..100) <= provider_config['destroyvm_fail_percent']
end
raise('Dummy Failure for migratevm_fail_percent') if !provider_config['destroyvm_fail_percent'].nil? && rand(1..100) <= provider_config['destroyvm_fail_percent']
# 'Destroy' the VM
@write_lock.synchronize do
@ -354,9 +338,7 @@ module Vmpooler
# it's ready to receive a connection
sleep(2)
unless provider_config['vmready_fail_percent'].nil?
raise('Dummy Failure for vmready_fail_percent') if rand(1..100) <= provider_config['vmready_fail_percent']
end
raise('Dummy Failure for vmready_fail_percent') if !provider_config['vmready_fail_percent'].nil? && rand(1..100) <= provider_config['vmready_fail_percent']
@write_lock.synchronize do
vm_object['ready'] = true

View file

@ -1,6 +1,8 @@
# frozen_string_literal: true
require 'vmpooler/providers/base'
require 'bigdecimal'
require 'bigdecimal/util'
module Vmpooler
class PoolManager
@ -49,9 +51,7 @@ module Vmpooler
end
def folder_configured?(folder_title, base_folder, configured_folders, whitelist)
if whitelist
return true if whitelist.include?(folder_title)
end
return true if whitelist&.include?(folder_title)
return false unless configured_folders.keys.include?(folder_title)
return false unless configured_folders[folder_title] == base_folder
@ -68,7 +68,7 @@ module Vmpooler
redis.hset("vmpooler__vm__#{vm_name}", 'destroy', Time.now)
# Auto-expire metadata key
redis.expire('vmpooler__vm__' + vm_name, (data_ttl * 60 * 60))
redis.expire("vmpooler__vm__#{vm_name}", (data_ttl * 60 * 60))
redis.exec
end
@ -203,9 +203,7 @@ module Vmpooler
return
end
wait_for_host_selection(dc, target, loop_delay, max_age) if target[dc].key?('checking')
if target[dc].key?('check_time_finished')
select_target_hosts(target, cluster, datacenter) if now - target[dc]['check_time_finished'] > max_age
end
select_target_hosts(target, cluster, datacenter) if target[dc].key?('check_time_finished') && now - target[dc]['check_time_finished'] > max_age
end
def wait_for_host_selection(dc, target, maxloop = 0, loop_delay = 1, max_age = 60)
@ -418,15 +416,15 @@ module Vmpooler
# Determine network device type
# All possible device type options here: https://vdc-download.vmware.com/vmwb-repository/dcr-public/98d63b35-d822-47fe-a87a-ddefd469df06/2e3c7b58-f2bd-486e-8bb1-a75eb0640bee/doc/vim.vm.device.VirtualEthernetCard.html
network_device =
if template_vm_network_device.is_a? RbVmomi::VIM::VirtualVmxnet2
if template_vm_network_device.instance_of? RbVmomi::VIM::VirtualVmxnet2
RbVmomi::VIM.VirtualVmxnet2
elsif template_vm_network_device.is_a? RbVmomi::VIM::VirtualVmxnet3
elsif template_vm_network_device.instance_of? RbVmomi::VIM::VirtualVmxnet3
RbVmomi::VIM.VirtualVmxnet3
elsif template_vm_network_device.is_a? RbVmomi::VIM::VirtualE1000
elsif template_vm_network_device.instance_of? RbVmomi::VIM::VirtualE1000
RbVmomi::VIM.VirtualE1000
elsif template_vm_network_device.is_a? RbVmomi::VIM::VirtualE1000e
elsif template_vm_network_device.instance_of? RbVmomi::VIM::VirtualE1000e
RbVmomi::VIM.VirtualE1000e
elsif template_vm_network_device.is_a? RbVmomi::VIM::VirtualSriovEthernetCard
elsif template_vm_network_device.instance_of? RbVmomi::VIM::VirtualSriovEthernetCard
RbVmomi::VIM.VirtualSriovEthernetCard
else
RbVmomi::VIM.VirtualPCNet32
@ -560,7 +558,7 @@ module Vmpooler
boottime = vm_object.runtime.bootTime if vm_object.runtime&.bootTime
powerstate = vm_object.runtime.powerState if vm_object.runtime&.powerState
hash = {
{
'name' => vm_object.name,
'hostname' => hostname,
'template' => pool_configuration['template'],
@ -568,8 +566,6 @@ module Vmpooler
'boottime' => boottime,
'powerstate' => powerstate
}
hash
end
# vSphere helper methods
@ -794,7 +790,7 @@ module Vmpooler
}
folder_object = connection.searchIndex.FindByInventoryPath(propSpecs) # rubocop:disable Naming/VariableName
return nil unless folder_object.class == RbVmomi::VIM::Folder
return nil unless folder_object.instance_of? RbVmomi::VIM::Folder
folder_object
end
@ -810,9 +806,7 @@ module Vmpooler
# the cpu or memory utilization is bigger than the limit param
def get_host_utilization(host, model = nil, limit = 90)
limit = @config[:config]['utilization_limit'] if @config[:config].key?('utilization_limit')
if model
return nil unless host_has_cpu_model?(host, model)
end
return nil if model && !host_has_cpu_model?(host, model)
return nil if host.runtime.inMaintenanceMode
return nil unless host.overallStatus == 'green'
return nil unless host.configIssue.empty?
@ -821,9 +815,9 @@ module Vmpooler
memory_utilization = memory_utilization_for host
return nil if cpu_utilization.nil?
return nil if cpu_utilization == 0.0
return nil if cpu_utilization.to_d == 0.0.to_d
return nil if memory_utilization.nil?
return nil if memory_utilization == 0.0
return nil if memory_utilization.to_d == 0.0.to_d
return nil if cpu_utilization > limit
return nil if memory_utilization > limit
@ -838,8 +832,7 @@ module Vmpooler
def get_host_cpu_arch_version(host)
cpu_model = host.hardware.cpuPkg[0].description
cpu_model_parts = cpu_model.split
arch_version = cpu_model_parts[4]
arch_version
cpu_model_parts[4]
end
def cpu_utilization_for(host)
@ -931,8 +924,7 @@ module Vmpooler
target_hosts = get_cluster_host_utilization(cluster_object)
raise("There is no host candidate in vcenter that meets all the required conditions, check that the cluster has available hosts in a 'green' status, not in maintenance mode and not overloaded CPU and memory'") if target_hosts.empty?
least_used_host = target_hosts.min[1]
least_used_host
target_hosts.min[1]
end
def find_cluster(cluster, connection, datacentername)
@ -979,11 +971,10 @@ module Vmpooler
end
def build_propSpecs(datacenter, folder, vmname) # rubocop:disable Naming/MethodName
propSpecs = { # rubocop:disable Naming/VariableName
{
entity => self,
:inventoryPath => "#{datacenter}/vm/#{folder}/#{vmname}"
}
propSpecs # rubocop:disable Naming/VariableName
end
def find_vm(pool_name, vmname, connection)

View file

@ -588,7 +588,7 @@ def mock_RbVmomi_VIM_VirtualVmxnet3(options = {})
mock.addressType = options[:addressType]
mock.connectable = options[:connectable]
allow(mock).to receive(:is_a?) do |expected_type|
allow(mock).to receive(:instance_of?) do |expected_type|
expected_type == RbVmomi::VIM::VirtualVmxnet3
end

View file

@ -185,7 +185,7 @@ EOT
it 'moves VM to completed queue if VM has exceeded timeout and exists' do
redis_connection_pool.with do |redis|
redis.hset("vmpooler__vm__#{vm}", 'clone',Date.new(2001,1,1).to_s)
expect(subject.fail_pending_vm(vm, pool, timeout, redis, true)).to eq(true)
expect(subject.fail_pending_vm(vm, pool, timeout, redis, exists: true)).to eq(true)
expect(redis.sismember("vmpooler__pending__#{pool}", vm)).to be(false)
expect(redis.sismember("vmpooler__completed__#{pool}", vm)).to be(true)
end
@ -195,7 +195,7 @@ EOT
redis_connection_pool.with do |redis|
redis.hset("vmpooler__vm__#{vm}", 'clone',Date.new(2001,1,1).to_s)
expect(logger).to receive(:log).with('d', "[!] [#{pool}] '#{vm}' marked as 'failed' after #{timeout} minutes")
expect(subject.fail_pending_vm(vm, pool, timeout, redis, true)).to eq(true)
expect(subject.fail_pending_vm(vm, pool, timeout, redis, exists: true)).to eq(true)
end
end
@ -203,14 +203,14 @@ EOT
redis_connection_pool.with do |redis|
redis.hset("vmpooler__vm__#{vm}", 'clone',Date.new(2001,1,1).to_s)
expect(subject).to receive(:remove_nonexistent_vm).with(vm, pool, redis)
expect(subject.fail_pending_vm(vm, pool, timeout, redis, false)).to eq(true)
expect(subject.fail_pending_vm(vm, pool, timeout, redis, exists: false)).to eq(true)
end
end
it 'swallows error if an error is raised' do
redis_connection_pool.with do |redis|
redis.hset("vmpooler__vm__#{vm}", 'clone','iamnotparsable_asdate')
expect(subject.fail_pending_vm(vm, pool, timeout, redis, true)).to eq(false)
expect(subject.fail_pending_vm(vm, pool, timeout, redis, exists: true)).to eq(false)
end
end
@ -219,7 +219,7 @@ EOT
redis.hset("vmpooler__vm__#{vm}", 'clone','iamnotparsable_asdate')
expect(logger).to receive(:log).with('d', String)
subject.fail_pending_vm(vm, pool, timeout, redis, true)
subject.fail_pending_vm(vm, pool, timeout, redis, exists: true)
end
end
@ -230,7 +230,7 @@ EOT
redis.hset("vmpooler__vm__#{vm}", 'clone',(Time.now - 900).to_s)
redis.hset("vmpooler__vm__#{vm}", 'pool_alias', pool)
redis.hset("vmpooler__vm__#{vm}", 'request_id', request_id)
subject.fail_pending_vm(vm, pool, timeout, redis, true)
subject.fail_pending_vm(vm, pool, timeout, redis, exists: true)
expect(redis.zrange('vmpooler__odcreate__task', 0, -1)).to eq(["#{pool}:#{pool}:1:#{request_id}"])
end
end
@ -3950,7 +3950,7 @@ EOT
it 'should call fail_pending_vm' do
redis_connection_pool.with do |redis|
expect(subject).to receive(:fail_pending_vm).with(vm,pool,Integer,redis,false)
expect(subject).to receive(:fail_pending_vm).with(vm, pool, Integer, redis, exists: false)
end
subject.check_pending_pool_vms(pool, provider, pool_check_response, inventory, timeout)

View file

@ -1585,7 +1585,7 @@ EOT
expect(result.location.datastore.name).to eq('datastore0')
expect(result.location.diskMoveType).to eq(:moveChildMostDiskBacking)
expect(result.config.deviceChange.first[:operation]).to eq(:edit)
expect(result.config.deviceChange.first[:device].is_a?(RbVmomi::VIM::VirtualVmxnet3)).to be true
expect(result.config.deviceChange.first[:device].instance_of?(RbVmomi::VIM::VirtualVmxnet3)).to be true
end
end
@ -1885,7 +1885,7 @@ EOT
it 'should return the folder when found' do
allow(connection.searchIndex).to receive(:FindByInventoryPath).and_return(folder_object)
allow(folder_object).to receive(:class).and_return(RbVmomi::VIM::Folder)
allow(folder_object).to receive(:instance_of?).and_return(RbVmomi::VIM::Folder)
result = subject.find_vm_folder(poolname,connection)
expect(result.name).to eq(foldername)
end
@ -1901,7 +1901,7 @@ EOT
it 'should return the folder when found' do
allow(connection.searchIndex).to receive(:FindByInventoryPath).and_return(folder_object)
allow(folder_object).to receive(:class).and_return(RbVmomi::VIM::Folder)
allow(folder_object).to receive(:instance_of?).and_return(RbVmomi::VIM::Folder)
result = subject.find_vm_folder(poolname,connection)
expect(result.name).to eq(foldername)
end
@ -1918,7 +1918,7 @@ EOT
it 'should return the folder when found' do
allow(connection.searchIndex).to receive(:FindByInventoryPath).and_return(folder_object)
allow(folder_object).to receive(:class).and_return(RbVmomi::VIM::Folder)
allow(folder_object).to receive(:instance_of?).and_return(RbVmomi::VIM::Folder)
result = subject.find_vm_folder(poolname,connection)
expect(result.name).to eq(foldername)
end

View file

@ -45,7 +45,7 @@ Gem::Specification.new do |s|
s.add_development_dependency 'pry'
s.add_development_dependency 'rack-test', '>= 0.6'
s.add_development_dependency 'rspec', '>= 3.2'
s.add_development_dependency 'rubocop', '< 1.0'
s.add_development_dependency 'rubocop', '~> 1.0'
s.add_development_dependency 'simplecov', '>= 0.11.2'
s.add_development_dependency 'yarjuf', '>= 2.0'
end