(POOLER-158) Add capability to provision VMs on demand

This change adds a capability to vmpooler to provision instances on
demand. Without this change vmpooler only supports retrieving machines
from pre-provisioned pools.

Additionally, this change refactors redis interactions to reduce round
trips to redis. Specifically, multi and pipelined redis commands are
added where possible to reduce the number of times we are calling redis.

To support the redis refactor the redis interaction has changed to
leveraging a connection pool. In addition to offering multiple
connections for pool manager to use, the redis interactions in pool
manager are now thread safe.

Ready TTL is now a global parameter that can be set as a default for all
pools. A default of 0 has been removed, because this is an unreasonable
default behavior, which would leave a provisioned instance in the pool
indefinitely.

Pool empty messages have been removed when the pool size is set to 0.
Without this change, when a pool was set to a size of 0 the API and pool
manager would both show that a pool is empty.
This commit is contained in:
kirby@puppetlabs.com 2020-04-06 10:52:05 -07:00
parent 1f6f08d172
commit 811fd8b60f
34 changed files with 3326 additions and 1098 deletions

View file

@ -1,6 +1,7 @@
# frozen_string_literal: true
module Vmpooler
require 'concurrent'
require 'date'
require 'json'
require 'net/ldap'
@ -58,9 +59,14 @@ module Vmpooler
# Set some configuration defaults
parsed_config[:config]['task_limit'] = string_to_int(ENV['TASK_LIMIT']) || parsed_config[:config]['task_limit'] || 10
parsed_config[:config]['ondemand_clone_limit'] = string_to_int(ENV['ONDEMAND_CLONE_LIMIT']) || parsed_config[:config]['ondemand_clone_limit'] || 10
parsed_config[:config]['max_ondemand_instances_per_request'] = string_to_int(ENV['MAX_ONDEMAND_INSTANCES_PER_REQUEST']) || parsed_config[:config]['max_ondemand_instances_per_request'] || 10
parsed_config[:config]['migration_limit'] = string_to_int(ENV['MIGRATION_LIMIT']) if ENV['MIGRATION_LIMIT']
parsed_config[:config]['vm_checktime'] = string_to_int(ENV['VM_CHECKTIME']) || parsed_config[:config]['vm_checktime'] || 1
parsed_config[:config]['vm_lifetime'] = string_to_int(ENV['VM_LIFETIME']) || parsed_config[:config]['vm_lifetime'] || 24
parsed_config[:config]['max_lifetime_upper_limit'] = string_to_int(ENV['MAX_LIFETIME_UPPER_LIMIT']) || parsed_config[:config]['max_lifetime_upper_limit']
parsed_config[:config]['ready_ttl'] = string_to_int(ENV['READY_TTL']) || parsed_config[:config]['ready_ttl'] || 60
parsed_config[:config]['ondemand_request_ttl'] = string_to_int(ENV['ONDEMAND_REQUEST_TTL']) || parsed_config[:config]['ondemand_request_ttl'] || 5
parsed_config[:config]['prefix'] = ENV['PREFIX'] || parsed_config[:config]['prefix'] || ''
parsed_config[:config]['logfile'] = ENV['LOGFILE'] if ENV['LOGFILE']
@ -72,7 +78,7 @@ module Vmpooler
parsed_config[:config]['vm_lifetime_auth'] = string_to_int(ENV['VM_LIFETIME_AUTH']) if ENV['VM_LIFETIME_AUTH']
parsed_config[:config]['max_tries'] = string_to_int(ENV['MAX_TRIES']) if ENV['MAX_TRIES']
parsed_config[:config]['retry_factor'] = string_to_int(ENV['RETRY_FACTOR']) if ENV['RETRY_FACTOR']
parsed_config[:config]['create_folders'] = ENV['CREATE_FOLDERS'] if ENV['CREATE_FOLDERS']
parsed_config[:config]['create_folders'] = true?(ENV['CREATE_FOLDERS']) if ENV['CREATE_FOLDERS']
parsed_config[:config]['create_template_delta_disks'] = ENV['CREATE_TEMPLATE_DELTA_DISKS'] if ENV['CREATE_TEMPLATE_DELTA_DISKS']
set_linked_clone(parsed_config)
parsed_config[:config]['experimental_features'] = ENV['EXPERIMENTAL_FEATURES'] if ENV['EXPERIMENTAL_FEATURES']
@ -84,6 +90,8 @@ module Vmpooler
parsed_config[:redis]['port'] = string_to_int(ENV['REDIS_PORT']) if ENV['REDIS_PORT']
parsed_config[:redis]['password'] = ENV['REDIS_PASSWORD'] if ENV['REDIS_PASSWORD']
parsed_config[:redis]['data_ttl'] = string_to_int(ENV['REDIS_DATA_TTL']) || parsed_config[:redis]['data_ttl'] || 168
parsed_config[:redis]['connection_pool_size'] = string_to_int(ENV['REDIS_CONNECTION_POOL_SIZE']) || parsed_config[:redis]['connection_pool_size'] || 10
parsed_config[:redis]['connection_pool_timeout'] = string_to_int(ENV['REDIS_CONNECTION_POOL_TIMEOUT']) || parsed_config[:redis]['connection_pool_timeout'] || 5
parsed_config[:statsd] = parsed_config[:statsd] || {} if ENV['STATSD_SERVER']
parsed_config[:statsd]['server'] = ENV['STATSD_SERVER'] if ENV['STATSD_SERVER']
@ -117,6 +125,7 @@ module Vmpooler
parsed_config[:pools].each do |pool|
parsed_config[:pool_names] << pool['name']
pool['ready_ttl'] ||= parsed_config[:config]['ready_ttl']
if pool['alias']
if pool['alias'].is_a?(Array)
pool['alias'].each do |pool_alias|
@ -154,6 +163,19 @@ module Vmpooler
pools
end
def self.redis_connection_pool(host, port, password, size, timeout, metrics)
Vmpooler::PoolManager::GenericConnectionPool.new(
metrics: metrics,
metric_prefix: 'redis_connection_pool',
size: size,
timeout: timeout
) do
connection = Concurrent::Hash.new
redis = new_redis(host, port, password)
connection['connection'] = redis
end
end
def self.new_redis(host = 'localhost', port = nil, password = nil)
Redis.new(host: host, port: port, password: password)
end

View file

@ -238,7 +238,7 @@ module Vmpooler
queue[:running] = get_total_across_pools_redis_scard(pools, 'vmpooler__running__', backend)
queue[:completed] = get_total_across_pools_redis_scard(pools, 'vmpooler__completed__', backend)
queue[:cloning] = backend.get('vmpooler__tasks__clone').to_i
queue[:cloning] = backend.get('vmpooler__tasks__clone').to_i + backend.get('vmpooler__tasks__ondemandclone').to_i
queue[:booting] = queue[:pending].to_i - queue[:cloning].to_i
queue[:booting] = 0 if queue[:booting] < 0
queue[:total] = queue[:pending].to_i + queue[:ready].to_i + queue[:running].to_i + queue[:completed].to_i

View file

@ -42,6 +42,68 @@ module Vmpooler
Vmpooler::API.settings.checkoutlock
end
def get_template_aliases(template)
result = []
aliases = Vmpooler::API.settings.config[:alias]
if aliases
result += aliases[template] if aliases[template].is_a?(Array)
template_backends << aliases[template] if aliases[template].is_a?(String)
end
result
end
def get_pool_weights(template_backends)
pool_index = pool_index(pools)
weighted_pools = {}
template_backends.each do |t|
next unless pool_index.key? t
index = pool_index[t]
clone_target = pools[index]['clone_target'] || config['clone_target']
next unless config.key?('backend_weight')
weight = config['backend_weight'][clone_target]
if weight
weighted_pools[t] = weight
end
end
weighted_pools
end
def count_selection(selection)
result = {}
selection.uniq.each do |poolname|
result[poolname] = selection.count(poolname)
end
result
end
def evaluate_template_aliases(template, count)
template_backends = []
template_backends << template if backend.sismember('vmpooler__pools', template)
selection = []
aliases = get_template_aliases(template)
if aliases
template_backends += aliases
weighted_pools = get_pool_weights(template_backends)
pickup = Pickup.new(weighted_pools) if weighted_pools.count == template_backends.count
count.to_i.times do
if pickup
selection << pickup.pick
else
selection << template_backends.sample
end
end
else
count.to_i.times do
selection << template
end
end
count_selection(selection)
end
def fetch_single_vm(template)
template_backends = [template]
aliases = Vmpooler::API.settings.config[:alias]
@ -245,11 +307,9 @@ module Vmpooler
pool_index = pool_index(pools)
template_configs = backend.hgetall('vmpooler__config__template')
template_configs&.each do |poolname, template|
if pool_index.include? poolname
unless pools[pool_index[poolname]]['template'] == template
pools[pool_index[poolname]]['template'] = template
end
end
next unless pool_index.include? poolname
pools[pool_index[poolname]]['template'] = template
end
end
@ -257,11 +317,9 @@ module Vmpooler
pool_index = pool_index(pools)
poolsize_configs = backend.hgetall('vmpooler__config__poolsize')
poolsize_configs&.each do |poolname, size|
if pool_index.include? poolname
unless pools[pool_index[poolname]]['size'] == size.to_i
pools[pool_index[poolname]]['size'] == size.to_i
end
end
next unless pool_index.include? poolname
pools[pool_index[poolname]]['size'] = size.to_i
end
end
@ -269,14 +327,69 @@ module Vmpooler
pool_index = pool_index(pools)
clone_target_configs = backend.hgetall('vmpooler__config__clone_target')
clone_target_configs&.each do |poolname, clone_target|
if pool_index.include? poolname
unless pools[pool_index[poolname]]['clone_target'] == clone_target
pools[pool_index[poolname]]['clone_target'] == clone_target
end
end
next unless pool_index.include? poolname
pools[pool_index[poolname]]['clone_target'] = clone_target
end
end
def too_many_requested?(payload)
payload&.each do |_poolname, count|
next unless count.to_i > config['max_ondemand_instances_per_request']
return true
end
false
end
def generate_ondemand_request(payload)
result = { 'ok': false }
requested_instances = payload.reject { |k, _v| k == 'request_id' }
if too_many_requested?(requested_instances)
result['message'] = "requested amount of instances exceeds the maximum #{config['max_ondemand_instances_per_request']}"
status 403
return result
end
score = Time.now.to_i
request_id = payload['request_id']
request_id ||= generate_request_id
result['request_id'] = request_id
if backend.exists("vmpooler__odrequest__#{request_id}")
result['message'] = "request_id '#{request_id}' has already been created"
status 409
return result
end
status 201
platforms_with_aliases = []
requested_instances.each do |poolname, count|
selection = evaluate_template_aliases(poolname, count)
selection.map { |selected_pool, selected_pool_count| platforms_with_aliases << "#{poolname}:#{selected_pool}:#{selected_pool_count}" }
end
platforms_string = platforms_with_aliases.join(',')
return result unless backend.zadd('vmpooler__provisioning__request', score, request_id)
backend.hset("vmpooler__odrequest__#{request_id}", 'requested', platforms_string)
if Vmpooler::API.settings.config[:auth] and has_token?
backend.hset("vmpooler__odrequest__#{request_id}", 'token:token', request.env['HTTP_X_AUTH_TOKEN'])
backend.hset("vmpooler__odrequest__#{request_id}", 'token:user',
backend.hget('vmpooler__token__' + request.env['HTTP_X_AUTH_TOKEN'], 'user'))
end
result['domain'] = config['domain'] if config['domain']
result[:ok] = true
result
end
def generate_request_id
SecureRandom.uuid
end
get '/' do
sync_pool_sizes
redirect to('/dashboard/')
@ -395,7 +508,7 @@ module Vmpooler
end
# for backwards compatibility, include separate "empty" stats in "status" block
if ready == 0
if ready == 0 && max != 0
result[:status][:empty] ||= []
result[:status][:empty].push(pool['name'])
@ -689,6 +802,88 @@ module Vmpooler
JSON.pretty_generate(result)
end
post "#{api_prefix}/ondemandvm/?" do
content_type :json
need_token! if Vmpooler::API.settings.config[:auth]
result = { 'ok' => false }
begin
payload = JSON.parse(request.body.read)
if payload
invalid = invalid_templates(payload.reject { |k, _v| k == 'request_id' })
if invalid.empty?
result = generate_ondemand_request(payload)
else
result[:bad_templates] = invalid
invalid.each do |bad_template|
metrics.increment('ondemandrequest.invalid.' + bad_template)
end
status 404
end
else
metrics.increment('ondemandrequest.invalid.unknown')
status 404
end
rescue JSON::ParserError
status 400
result = {
'ok' => false,
'message' => 'JSON payload could not be parsed'
}
end
JSON.pretty_generate(result)
end
post "#{api_prefix}/ondemandvm/:template/?" do
content_type :json
result = { 'ok' => false }
need_token! if Vmpooler::API.settings.config[:auth]
payload = extract_templates_from_query_params(params[:template])
if payload
invalid = invalid_templates(payload.reject { |k, _v| k == 'request_id' })
if invalid.empty?
result = generate_ondemand_request(payload)
else
result[:bad_templates] = invalid
invalid.each do |bad_template|
metrics.increment('ondemandrequest.invalid.' + bad_template)
end
status 404
end
else
metrics.increment('ondemandrequest.invalid.unknown')
status 404
end
JSON.pretty_generate(result)
end
get "#{api_prefix}/ondemandvm/:requestid/?" do
content_type :json
status 404
result = check_ondemand_request(params[:requestid])
JSON.pretty_generate(result)
end
delete "#{api_prefix}/ondemandvm/:requestid/?" do
content_type :json
need_token! if Vmpooler::API.settings.config[:auth]
status 404
result = delete_ondemand_request(params[:requestid])
JSON.pretty_generate(result)
end
post "#{api_prefix}/vm/?" do
content_type :json
result = { 'ok' => false }
@ -764,6 +959,78 @@ module Vmpooler
invalid
end
def check_ondemand_request(request_id)
result = { 'ok' => false }
request_hash = backend.hgetall("vmpooler__odrequest__#{request_id}")
if request_hash.empty?
result['message'] = "no request found for request_id '#{request_id}'"
return result
end
result['request_id'] = request_id
result['ready'] = false
result['ok'] = true
status 202
if request_hash['status'] == 'ready'
result['ready'] = true
platform_parts = request_hash['requested'].split(',')
platform_parts.each do |platform|
pool_alias, pool, _count = platform.split(':')
instances = backend.smembers("vmpooler__#{request_id}__#{pool_alias}__#{pool}")
result[pool_alias] = { 'hostname': instances }
end
result['domain'] = config['domain'] if config['domain']
status 200
elsif request_hash['status'] == 'failed'
result['message'] = "The request failed to provision instances within the configured ondemand_request_ttl '#{config['ondemand_request_ttl']}'"
status 200
elsif request_hash['status'] == 'deleted'
result['message'] = 'The request has been deleted'
status 200
else
platform_parts = request_hash['requested'].split(',')
platform_parts.each do |platform|
pool_alias, pool, count = platform.split(':')
instance_count = backend.scard("vmpooler__#{request_id}__#{pool_alias}__#{pool}")
result[pool_alias] = {
'ready': instance_count.to_s,
'pending': (count.to_i - instance_count.to_i).to_s
}
end
end
result
end
def delete_ondemand_request(request_id)
result = { 'ok' => false }
platforms = backend.hget("vmpooler__odrequest__#{request_id}", 'requested')
unless platforms
result['message'] = "no request found for request_id '#{request_id}'"
return result
end
if backend.hget("vmpooler__odrequest__#{request_id}", 'status') == 'deleted'
result['message'] = 'the request has already been deleted'
else
backend.hset("vmpooler__odrequest__#{request_id}", 'status', 'deleted')
platforms.split(',').each do |platform|
pool_alias, pool, _count = platform.split(':')
backend.smembers("vmpooler__#{request_id}__#{pool_alias}__#{pool}")&.each do |vm|
backend.smove("vmpooler__running__#{pool}", "vmpooler__completed__#{pool}", vm)
end
backend.del("vmpooler__#{request_id}__#{pool_alias}__#{pool}")
end
backend.expire("vmpooler__odrequest__#{request_id}", 129_600_0)
end
status 200
result['ok'] = true
result
end
post "#{api_prefix}/vm/:template/?" do
content_type :json
result = { 'ok' => false }
@ -923,6 +1190,7 @@ module Vmpooler
unless arg.to_i > 0
failure.push("You provided a lifetime (#{arg}) but you must provide a positive number.")
end
when 'tags'
unless arg.is_a?(Hash)
failure.push("You provided tags (#{arg}) as something other than a hash.")
@ -1047,7 +1315,7 @@ module Vmpooler
invalid.each do |bad_template|
metrics.increment("config.invalid.#{bad_template}")
end
result[:bad_templates] = invalid
result[:not_configured] = invalid
status 400
end
else

View file

@ -15,35 +15,17 @@ module Vmpooler
@metric_prefix = 'connectionpool' if @metric_prefix.nil? || @metric_prefix == ''
end
if Thread.respond_to?(:handle_interrupt)
# MRI
def with_metrics(options = {})
Thread.handle_interrupt(Exception => :never) do
start = Time.now
conn = checkout(options)
timespan_ms = ((Time.now - start) * 1000).to_i
@metrics&.gauge(@metric_prefix + '.available', @available.length)
@metrics&.timing(@metric_prefix + '.waited', timespan_ms)
begin
Thread.handle_interrupt(Exception => :immediate) do
yield conn
end
ensure
checkin
@metrics&.gauge(@metric_prefix + '.available', @available.length)
end
end
end
else
# jruby 1.7.x
def with_metrics(options = {})
def with_metrics(options = {})
Thread.handle_interrupt(Exception => :never) do
start = Time.now
conn = checkout(options)
timespan_ms = ((Time.now - start) * 1000).to_i
@metrics&.gauge(@metric_prefix + '.available', @available.length)
@metrics&.timing(@metric_prefix + '.waited', timespan_ms)
begin
yield conn
Thread.handle_interrupt(Exception => :immediate) do
yield conn
end
ensure
checkin
@metrics&.gauge(@metric_prefix + '.available', @available.length)

File diff suppressed because it is too large Load diff

View file

@ -14,10 +14,11 @@ module Vmpooler
# Provider options passed in during initialization
attr_reader :provider_options
def initialize(config, logger, metrics, name, options)
def initialize(config, logger, metrics, redis_connection_pool, name, options)
@config = config
@logger = logger
@metrics = metrics
@redis = redis_connection_pool
@provider_name = name
# Ensure that there is not a nil provider configuration

View file

@ -9,8 +9,8 @@ module Vmpooler
class Dummy < Vmpooler::PoolManager::Provider::Base
# Fake VM Provider for testing
def initialize(config, logger, metrics, name, options)
super(config, logger, metrics, name, options)
def initialize(config, logger, metrics, redis_connection_pool, name, options)
super(config, logger, metrics, redis_connection_pool, name, options)
dummyfilename = provider_config['filename']
# This initial_state option is only intended to be used by spec tests

View file

@ -9,8 +9,8 @@ module Vmpooler
# The connection_pool method is normally used only for testing
attr_reader :connection_pool
def initialize(config, logger, metrics, name, options)
super(config, logger, metrics, name, options)
def initialize(config, logger, metrics, redis_connection_pool, name, options)
super(config, logger, metrics, redis_connection_pool, name, options)
task_limit = global_config[:config].nil? || global_config[:config]['task_limit'].nil? ? 10 : global_config[:config]['task_limit'].to_i
# The default connection pool size is:
@ -39,6 +39,7 @@ module Vmpooler
end
@provider_hosts = {}
@provider_hosts_lock = Mutex.new
@redis = redis_connection_pool
end
# name of the provider class
@ -59,12 +60,16 @@ module Vmpooler
def destroy_vm_and_log(vm_name, vm_object, pool, data_ttl)
try = 0 if try.nil?
max_tries = 3
$redis.srem("vmpooler__completed__#{pool}", vm_name)
$redis.hdel("vmpooler__active__#{pool}", vm_name)
$redis.hset("vmpooler__vm__#{vm_name}", 'destroy', Time.now)
@redis.with_metrics do |redis|
redis.multi
redis.srem("vmpooler__completed__#{pool}", vm_name)
redis.hdel("vmpooler__active__#{pool}", vm_name)
redis.hset("vmpooler__vm__#{vm_name}", 'destroy', Time.now)
# Auto-expire metadata key
$redis.expire('vmpooler__vm__' + vm_name, (data_ttl * 60 * 60))
# Auto-expire metadata key
redis.expire('vmpooler__vm__' + vm_name, (data_ttl * 60 * 60))
redis.exec
end
start = Time.now
@ -343,7 +348,7 @@ module Vmpooler
begin
vm_target_folder = find_vm_folder(pool_name, connection)
vm_target_folder = create_folder(connection, target_folder_path, target_datacenter_name) if vm_target_folder.nil? && @config[:config].key?('create_folders') && (@config[:config]['create_folders'] == true)
vm_target_folder ||= create_folder(connection, target_folder_path, target_datacenter_name) if @config[:config].key?('create_folders') && (@config[:config]['create_folders'] == true)
rescue StandardError
if @config[:config].key?('create_folders') && (@config[:config]['create_folders'] == true)
vm_target_folder = create_folder(connection, target_folder_path, target_datacenter_name)
@ -351,6 +356,7 @@ module Vmpooler
raise
end
end
raise ArgumentError, "Can not find the configured folder for #{pool_name} #{target_folder_path}" unless vm_target_folder
# Create the new VM
new_vm_object = template_vm_object.CloneVM_Task(
@ -968,22 +974,24 @@ module Vmpooler
begin
connection = ensured_vsphere_connection(pool_object)
vm_hash = get_vm_details(pool_name, vm_name, connection)
$redis.hset("vmpooler__vm__#{vm_name}", 'host', vm_hash['host_name'])
migration_limit = @config[:config]['migration_limit'] if @config[:config].key?('migration_limit')
migration_count = $redis.scard('vmpooler__migration')
if migration_enabled? @config
if migration_count >= migration_limit
logger.log('s', "[ ] [#{pool_name}] '#{vm_name}' is running on #{vm_hash['host_name']}. No migration will be evaluated since the migration_limit has been reached")
break
end
run_select_hosts(pool_name, @provider_hosts)
if vm_in_target?(pool_name, vm_hash['host_name'], vm_hash['architecture'], @provider_hosts)
logger.log('s', "[ ] [#{pool_name}] No migration required for '#{vm_name}' running on #{vm_hash['host_name']}")
@redis.with_metrics do |redis|
redis.hset("vmpooler__vm__#{vm_name}", 'host', vm_hash['host_name'])
migration_count = redis.scard('vmpooler__migration')
migration_limit = @config[:config]['migration_limit'] if @config[:config].key?('migration_limit')
if migration_enabled? @config
if migration_count >= migration_limit
logger.log('s', "[ ] [#{pool_name}] '#{vm_name}' is running on #{vm_hash['host_name']}. No migration will be evaluated since the migration_limit has been reached")
break
end
run_select_hosts(pool_name, @provider_hosts)
if vm_in_target?(pool_name, vm_hash['host_name'], vm_hash['architecture'], @provider_hosts)
logger.log('s', "[ ] [#{pool_name}] No migration required for '#{vm_name}' running on #{vm_hash['host_name']}")
else
migrate_vm_to_new_host(pool_name, vm_name, vm_hash, connection)
end
else
migrate_vm_to_new_host(pool_name, vm_name, vm_hash, connection)
logger.log('s', "[ ] [#{pool_name}] '#{vm_name}' is running on #{vm_hash['host_name']}")
end
else
logger.log('s', "[ ] [#{pool_name}] '#{vm_name}' is running on #{vm_hash['host_name']}")
end
rescue StandardError
logger.log('s', "[!] [#{pool_name}] '#{vm_name}' is running on #{vm_hash['host_name']}")
@ -993,15 +1001,23 @@ module Vmpooler
end
def migrate_vm_to_new_host(pool_name, vm_name, vm_hash, connection)
$redis.sadd('vmpooler__migration', vm_name)
@redis.with_metrics do |redis|
redis.sadd('vmpooler__migration', vm_name)
end
target_host_name = select_next_host(pool_name, @provider_hosts, vm_hash['architecture'])
target_host_object = find_host_by_dnsname(connection, target_host_name)
finish = migrate_vm_and_record_timing(pool_name, vm_name, vm_hash, target_host_object, target_host_name)
$redis.hset("vmpooler__vm__#{vm_name}", 'host', target_host_name)
$redis.hset("vmpooler__vm__#{vm_name}", 'migrated', true)
@redis.with_metrics do |redis|
redis.multi
redis.hset("vmpooler__vm__#{vm_name}", 'host', target_host_name)
redis.hset("vmpooler__vm__#{vm_name}", 'migrated', true)
redis.exec
end
logger.log('s', "[>] [#{pool_name}] '#{vm_name}' migrated from #{vm_hash['host_name']} to #{target_host_name} in #{finish} seconds")
ensure
$redis.srem('vmpooler__migration', vm_name)
@redis.with_metrics do |redis|
redis.srem('vmpooler__migration', vm_name)
end
end
def migrate_vm_and_record_timing(pool_name, vm_name, vm_hash, target_host_object, dest_host_name)
@ -1011,9 +1027,13 @@ module Vmpooler
metrics.timing("migrate.#{pool_name}", finish)
metrics.increment("migrate_from.#{vm_hash['host_name']}")
metrics.increment("migrate_to.#{dest_host_name}")
checkout_to_migration = format('%<time>.2f', time: Time.now - Time.parse($redis.hget("vmpooler__vm__#{vm_name}", 'checkout')))
$redis.hset("vmpooler__vm__#{vm_name}", 'migration_time', finish)
$redis.hset("vmpooler__vm__#{vm_name}", 'checkout_to_migration', checkout_to_migration)
@redis.with_metrics do |redis|
checkout_to_migration = format('%<time>.2f', time: Time.now - Time.parse(redis.hget("vmpooler__vm__#{vm_name}", 'checkout')))
redis.multi
redis.hset("vmpooler__vm__#{vm_name}", 'migration_time', finish)
redis.hset("vmpooler__vm__#{vm_name}", 'checkout_to_migration', checkout_to_migration)
redis.exec
end
finish
end