Merge branch 'master' into test_newruby

This commit is contained in:
mattkirby 2020-09-03 12:09:10 -07:00 committed by GitHub
commit 28de66829a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 76 additions and 49 deletions

View file

@ -9,6 +9,7 @@ redis_port = config[:redis]['port']
redis_password = config[:redis]['password']
redis_connection_pool_size = config[:redis]['connection_pool_size']
redis_connection_pool_timeout = config[:redis]['connection_pool_timeout']
redis_reconnect_attempts = config[:redis]['reconnect_attempts']
logger_file = config[:config]['logfile']
logger = Vmpooler::Logger.new logger_file
@ -43,7 +44,7 @@ if torun.include? :manager
Vmpooler::PoolManager.new(
config,
logger,
Vmpooler.redis_connection_pool(redis_host, redis_port, redis_password, redis_connection_pool_size, redis_connection_pool_timeout, metrics),
Vmpooler.redis_connection_pool(redis_host, redis_port, redis_password, redis_connection_pool_size, redis_connection_pool_timeout, metrics, redis_reconnect_attempts),
metrics
).execute!
end

View file

@ -58,59 +58,58 @@ module Vmpooler
end
# Set some configuration defaults
parsed_config[:config]['task_limit'] = string_to_int(ENV['TASK_LIMIT']) || parsed_config[:config]['task_limit'] || 10
parsed_config[:config]['ondemand_clone_limit'] = string_to_int(ENV['ONDEMAND_CLONE_LIMIT']) || parsed_config[:config]['ondemand_clone_limit'] || 10
parsed_config[:config]['task_limit'] = string_to_int(ENV['TASK_LIMIT']) || parsed_config[:config]['task_limit'] || 10
parsed_config[:config]['ondemand_clone_limit'] = string_to_int(ENV['ONDEMAND_CLONE_LIMIT']) || parsed_config[:config]['ondemand_clone_limit'] || 10
parsed_config[:config]['max_ondemand_instances_per_request'] = string_to_int(ENV['MAX_ONDEMAND_INSTANCES_PER_REQUEST']) || parsed_config[:config]['max_ondemand_instances_per_request'] || 10
parsed_config[:config]['migration_limit'] = string_to_int(ENV['MIGRATION_LIMIT']) if ENV['MIGRATION_LIMIT']
parsed_config[:config]['vm_checktime'] = string_to_int(ENV['VM_CHECKTIME']) || parsed_config[:config]['vm_checktime'] || 1
parsed_config[:config]['vm_lifetime'] = string_to_int(ENV['VM_LIFETIME']) || parsed_config[:config]['vm_lifetime'] || 24
parsed_config[:config]['max_lifetime_upper_limit'] = string_to_int(ENV['MAX_LIFETIME_UPPER_LIMIT']) || parsed_config[:config]['max_lifetime_upper_limit']
parsed_config[:config]['ready_ttl'] = string_to_int(ENV['READY_TTL']) || parsed_config[:config]['ready_ttl'] || 60
parsed_config[:config]['ondemand_request_ttl'] = string_to_int(ENV['ONDEMAND_REQUEST_TTL']) || parsed_config[:config]['ondemand_request_ttl'] || 5
parsed_config[:config]['prefix'] = ENV['PREFIX'] || parsed_config[:config]['prefix'] || ''
parsed_config[:config]['logfile'] = ENV['LOGFILE'] if ENV['LOGFILE']
parsed_config[:config]['site_name'] = ENV['SITE_NAME'] if ENV['SITE_NAME']
parsed_config[:config]['domain'] = ENV['DOMAIN'] if ENV['DOMAIN']
parsed_config[:config]['clone_target'] = ENV['CLONE_TARGET'] if ENV['CLONE_TARGET']
parsed_config[:config]['timeout'] = string_to_int(ENV['TIMEOUT']) if ENV['TIMEOUT']
parsed_config[:config]['vm_lifetime_auth'] = string_to_int(ENV['VM_LIFETIME_AUTH']) if ENV['VM_LIFETIME_AUTH']
parsed_config[:config]['max_tries'] = string_to_int(ENV['MAX_TRIES']) if ENV['MAX_TRIES']
parsed_config[:config]['retry_factor'] = string_to_int(ENV['RETRY_FACTOR']) if ENV['RETRY_FACTOR']
parsed_config[:config]['create_folders'] = true?(ENV['CREATE_FOLDERS']) if ENV['CREATE_FOLDERS']
parsed_config[:config]['create_template_delta_disks'] = ENV['CREATE_TEMPLATE_DELTA_DISKS'] if ENV['CREATE_TEMPLATE_DELTA_DISKS']
parsed_config[:config]['migration_limit'] = string_to_int(ENV['MIGRATION_LIMIT']) if ENV['MIGRATION_LIMIT']
parsed_config[:config]['vm_checktime'] = string_to_int(ENV['VM_CHECKTIME']) || parsed_config[:config]['vm_checktime'] || 1
parsed_config[:config]['vm_lifetime'] = string_to_int(ENV['VM_LIFETIME']) || parsed_config[:config]['vm_lifetime'] || 24
parsed_config[:config]['max_lifetime_upper_limit'] = string_to_int(ENV['MAX_LIFETIME_UPPER_LIMIT']) || parsed_config[:config]['max_lifetime_upper_limit']
parsed_config[:config]['ready_ttl'] = string_to_int(ENV['READY_TTL']) || parsed_config[:config]['ready_ttl'] || 60
parsed_config[:config]['ondemand_request_ttl'] = string_to_int(ENV['ONDEMAND_REQUEST_TTL']) || parsed_config[:config]['ondemand_request_ttl'] || 5
parsed_config[:config]['prefix'] = ENV['PREFIX'] || parsed_config[:config]['prefix'] || ''
parsed_config[:config]['logfile'] = ENV['LOGFILE'] if ENV['LOGFILE']
parsed_config[:config]['site_name'] = ENV['SITE_NAME'] if ENV['SITE_NAME']
parsed_config[:config]['domain'] = ENV['DOMAIN'] if ENV['DOMAIN']
parsed_config[:config]['clone_target'] = ENV['CLONE_TARGET'] if ENV['CLONE_TARGET']
parsed_config[:config]['timeout'] = string_to_int(ENV['TIMEOUT']) if ENV['TIMEOUT']
parsed_config[:config]['vm_lifetime_auth'] = string_to_int(ENV['VM_LIFETIME_AUTH']) if ENV['VM_LIFETIME_AUTH']
parsed_config[:config]['max_tries'] = string_to_int(ENV['MAX_TRIES']) if ENV['MAX_TRIES']
parsed_config[:config]['retry_factor'] = string_to_int(ENV['RETRY_FACTOR']) if ENV['RETRY_FACTOR']
parsed_config[:config]['create_folders'] = true?(ENV['CREATE_FOLDERS']) if ENV['CREATE_FOLDERS']
parsed_config[:config]['experimental_features'] = ENV['EXPERIMENTAL_FEATURES'] if ENV['EXPERIMENTAL_FEATURES']
parsed_config[:config]['purge_unconfigured_folders'] = ENV['PURGE_UNCONFIGURED_FOLDERS'] if ENV['PURGE_UNCONFIGURED_FOLDERS']
parsed_config[:config]['usage_stats'] = ENV['USAGE_STATS'] if ENV['USAGE_STATS']
parsed_config[:config]['request_logger'] = ENV['REQUEST_LOGGER'] if ENV['REQUEST_LOGGER']
parsed_config[:config]['create_template_delta_disks'] = ENV['CREATE_TEMPLATE_DELTA_DISKS'] if ENV['CREATE_TEMPLATE_DELTA_DISKS']
set_linked_clone(parsed_config)
parsed_config[:config]['experimental_features'] = ENV['EXPERIMENTAL_FEATURES'] if ENV['EXPERIMENTAL_FEATURES']
parsed_config[:config]['purge_unconfigured_folders'] = ENV['PURGE_UNCONFIGURED_FOLDERS'] if ENV['PURGE_UNCONFIGURED_FOLDERS']
parsed_config[:config]['usage_stats'] = ENV['USAGE_STATS'] if ENV['USAGE_STATS']
parsed_config[:config]['request_logger'] = ENV['REQUEST_LOGGER'] if ENV['REQUEST_LOGGER']
parsed_config[:redis] = parsed_config[:redis] || {}
parsed_config[:redis]['server'] = ENV['REDIS_SERVER'] || parsed_config[:redis]['server'] || 'localhost'
parsed_config[:redis]['port'] = string_to_int(ENV['REDIS_PORT']) if ENV['REDIS_PORT']
parsed_config[:redis]['password'] = ENV['REDIS_PASSWORD'] if ENV['REDIS_PASSWORD']
parsed_config[:redis]['data_ttl'] = string_to_int(ENV['REDIS_DATA_TTL']) || parsed_config[:redis]['data_ttl'] || 168
parsed_config[:redis]['connection_pool_size'] = string_to_int(ENV['REDIS_CONNECTION_POOL_SIZE']) || parsed_config[:redis]['connection_pool_size'] || 10
parsed_config[:redis] = parsed_config[:redis] || {}
parsed_config[:redis]['server'] = ENV['REDIS_SERVER'] || parsed_config[:redis]['server'] || 'localhost'
parsed_config[:redis]['port'] = string_to_int(ENV['REDIS_PORT']) if ENV['REDIS_PORT']
parsed_config[:redis]['password'] = ENV['REDIS_PASSWORD'] if ENV['REDIS_PASSWORD']
parsed_config[:redis]['data_ttl'] = string_to_int(ENV['REDIS_DATA_TTL']) || parsed_config[:redis]['data_ttl'] || 168
parsed_config[:redis]['connection_pool_size'] = string_to_int(ENV['REDIS_CONNECTION_POOL_SIZE']) || parsed_config[:redis]['connection_pool_size'] || 10
parsed_config[:redis]['connection_pool_timeout'] = string_to_int(ENV['REDIS_CONNECTION_POOL_TIMEOUT']) || parsed_config[:redis]['connection_pool_timeout'] || 5
parsed_config[:redis]['reconnect_attempts'] = string_to_int(ENV['REDIS_RECONNECT_ATTEMPTS']) || parsed_config[:redis]['reconnect_attempts'] || 10
parsed_config[:statsd] = parsed_config[:statsd] || {} if ENV['STATSD_SERVER']
parsed_config[:statsd] = parsed_config[:statsd] || {} if ENV['STATSD_SERVER']
parsed_config[:statsd]['server'] = ENV['STATSD_SERVER'] if ENV['STATSD_SERVER']
parsed_config[:statsd]['prefix'] = ENV['STATSD_PREFIX'] if ENV['STATSD_PREFIX']
parsed_config[:statsd]['port'] = string_to_int(ENV['STATSD_PORT']) if ENV['STATSD_PORT']
parsed_config[:statsd]['port'] = string_to_int(ENV['STATSD_PORT']) if ENV['STATSD_PORT']
parsed_config[:graphite] = parsed_config[:graphite] || {} if ENV['GRAPHITE_SERVER']
parsed_config[:graphite] = parsed_config[:graphite] || {} if ENV['GRAPHITE_SERVER']
parsed_config[:graphite]['server'] = ENV['GRAPHITE_SERVER'] if ENV['GRAPHITE_SERVER']
parsed_config[:graphite]['prefix'] = ENV['GRAPHITE_PREFIX'] if ENV['GRAPHITE_PREFIX']
parsed_config[:graphite]['port'] = string_to_int(ENV['GRAPHITE_PORT']) if ENV['GRAPHITE_PORT']
parsed_config[:graphite]['port'] = string_to_int(ENV['GRAPHITE_PORT']) if ENV['GRAPHITE_PORT']
parsed_config[:auth] = parsed_config[:auth] || {} if ENV['AUTH_PROVIDER']
if parsed_config.key? :auth
parsed_config[:auth]['provider'] = ENV['AUTH_PROVIDER'] if ENV['AUTH_PROVIDER']
parsed_config[:auth][:ldap] = parsed_config[:auth][:ldap] || {} if parsed_config[:auth]['provider'] == 'ldap'
parsed_config[:auth][:ldap]['host'] = ENV['LDAP_HOST'] if ENV['LDAP_HOST']
parsed_config[:auth][:ldap]['port'] = string_to_int(ENV['LDAP_PORT']) if ENV['LDAP_PORT']
parsed_config[:auth][:ldap]['base'] = ENV['LDAP_BASE'] if ENV['LDAP_BASE']
parsed_config[:auth]['provider'] = ENV['AUTH_PROVIDER'] if ENV['AUTH_PROVIDER']
parsed_config[:auth][:ldap] = parsed_config[:auth][:ldap] || {} if parsed_config[:auth]['provider'] == 'ldap'
parsed_config[:auth][:ldap]['host'] = ENV['LDAP_HOST'] if ENV['LDAP_HOST']
parsed_config[:auth][:ldap]['port'] = string_to_int(ENV['LDAP_PORT']) if ENV['LDAP_PORT']
parsed_config[:auth][:ldap]['base'] = ENV['LDAP_BASE'] if ENV['LDAP_BASE']
parsed_config[:auth][:ldap]['user_object'] = ENV['LDAP_USER_OBJECT'] if ENV['LDAP_USER_OBJECT']
end
@ -164,7 +163,7 @@ module Vmpooler
pools
end
def self.redis_connection_pool(host, port, password, size, timeout, metrics)
def self.redis_connection_pool(host, port, password, size, timeout, metrics, redis_reconnect_attempts = 0)
Vmpooler::PoolManager::GenericConnectionPool.new(
metrics: metrics,
connpool_type: 'redis_connection_pool',
@ -173,13 +172,14 @@ module Vmpooler
timeout: timeout
) do
connection = Concurrent::Hash.new
redis = new_redis(host, port, password)
redis = new_redis(host, port, password, redis_reconnect_attempts)
connection['connection'] = redis
end
end
def self.new_redis(host = 'localhost', port = nil, password = nil)
Redis.new(host: host, port: port, password: password)
def self.new_redis(host = 'localhost', port = nil, password = nil, redis_reconnect_attempts = 10)
Redis.new(host: host, port: port, password: password, reconnect_attempts: redis_reconnect_attempts, reconnect_delay: 1.5,
reconnect_delay_max: 10.0)
end
def self.pools(conf)

View file

@ -21,6 +21,7 @@ module Vmpooler
REDIS_CONNECT_BUCKETS = [1.0, 2.0, 3.0, 5.0, 8.0, 13.0, 18.0, 23.0].freeze
@p_metrics = {}
@torun = []
def initialize(logger, params = {})
@prefix = params['prefix'] || 'vmpooler'
@ -368,6 +369,7 @@ module Vmpooler
# Top level method to register all the prometheus metrics.
def setup_prometheus_metrics(torun)
@torun = torun
@p_metrics = vmpooler_metrics_table
@p_metrics.each do |name, metric_spec|
# Only register metrics appropriate to api or manager
@ -399,7 +401,10 @@ module Vmpooler
metric_key = sublabels.shift.to_sym
raise("Invalid Metric #{metric_key} for #{label}") unless @p_metrics.key? metric_key
metric = @p_metrics[metric_key].clone
metric_spec = @p_metrics[metric_key]
raise("Invalid Component #{component} for #{metric_key}") if (metric_spec[:torun] & @torun).nil?
metric = metric_spec.clone
if metric.key? :metric_suffixes
metric_subkey = sublabels.shift.to_sym

View file

@ -25,6 +25,13 @@ describe 'GenericConnectionPool' do
connection: 'connection'
}}
it 'should error out when connection pool could not establish no nothing' do
newsub = Vmpooler.redis_connection_pool("foo,bar", "1234", "fuba", 1, 1, metrics, 0)
expect { newsub.with_metrics do |conn_pool_object|
conn_pool_object.srem('foo', "bar")
end }.to raise_error Redis::CannotConnectError
end
it 'should return a connection object when grabbing one from the pool' do
subject.with_metrics do |conn_pool_object|
expect(conn_pool_object).to be(connection_object)

View file

@ -21,7 +21,10 @@ describe 'prometheus' do
param_labels: %i[first second last] }
end
let!(:labels_hash) { { labels: { :first => nil, :second => nil, :last => nil } } }
before { subject.instance_variable_set(:@p_metrics, { foo: foo_metrics }) }
before {
subject.instance_variable_set(:@p_metrics, { foo: foo_metrics, torun: %i[api] })
subject.instance_variable_set(:@torun, [ :api ])
}
it 'returns the metric for a given label including parsed labels' do
expect(subject.find_metric('foo.bar')).to include(metric_name: 'mtest_foo_bar')
@ -41,10 +44,15 @@ describe 'prometheus' do
context "Node Name Handling" do
let!(:node_metrics) do
{ metric_name: 'connection_to',
param_labels: %i[node] }
param_labels: %i[node],
torun: %i[api]
}
end
let!(:nodename_hash) { { labels: { :node => 'test.bar.net'}}}
before { subject.instance_variable_set(:@p_metrics, { connection_to: node_metrics }) }
before {
subject.instance_variable_set(:@p_metrics, { connection_to: node_metrics })
subject.instance_variable_set(:@torun, [ :api ])
}
it 'Return final remaining fields (e.g. fqdn) in last label' do
expect(subject.find_metric('connection_to.test.bar.net')).to include(nodename_hash)

View file

@ -204,6 +204,12 @@
# - redis_connection_pool_timeout
# How long a task should wait (in seconds) for a redis connection when all connections are in use.
# (default: 5)
#
# - reconnect_attempts
# How many times to retry one redis connection, for example if the host:port is not available
# The time between attempts starts at 1.5s and increases up to 10s, in such a way that 10 attempts
# takes about 80s, at which point an error is returned.
# (default: 10)
# Example: