Merge pull request #372 from jcoconnor/POOLER-160-Prometheus-Endpoints

(POOLER-160) Add Prometheus Metrics to vmpooler
This commit is contained in:
Brandon High 2020-07-01 11:14:08 -07:00 committed by GitHub
commit 8c18880e29
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
42 changed files with 1501 additions and 313 deletions

View file

@ -51,6 +51,10 @@ Metrics/ModuleLength:
Style/WordArray:
Enabled: false
# RedundantBegin is causing lib/pool_manager & vsphere.rb to fail in Ruby 2.5+
Style/RedundantBegin:
Enabled: false
# Either sytnax for regex is ok
Style/RegexpLiteral:
Enabled: false
@ -93,4 +97,4 @@ Naming/MethodParameterName:
# Standard comparisons seem more readable
Style/NumericPredicate:
Enabled: true
EnforcedStyle: comparison
EnforcedStyle: comparison

View file

@ -8,6 +8,7 @@ gem 'rake', '~> 13.0'
gem 'redis', '~> 4.1'
gem 'rbvmomi', '~> 2.1'
gem 'sinatra', '~> 2.0'
gem 'prometheus-client', '~> 2.0'
gem 'net-ldap', '~> 0.16'
gem 'statsd-ruby', '~> 1.4.0', :require => 'statsd'
gem 'connection_pool', '~> 2.2'

View file

@ -11,33 +11,38 @@ redis_connection_pool_size = config[:redis]['connection_pool_size']
redis_connection_pool_timeout = config[:redis]['connection_pool_timeout']
logger_file = config[:config]['logfile']
metrics = Vmpooler.new_metrics(config)
logger = Vmpooler::Logger.new logger_file
metrics = Vmpooler::Metrics.init(logger, config)
torun_threads = []
if ARGV.count == 0
torun = ['api', 'manager']
torun = %i[api manager]
else
torun = []
torun << 'api' if ARGV.include? 'api'
torun << 'manager' if ARGV.include? 'manager'
torun << :api if ARGV.include? 'api'
torun << :manager if ARGV.include? 'manager'
exit(2) if torun.empty?
end
if torun.include? 'api'
if torun.include? :api
api = Thread.new do
thr = Vmpooler::API.new
redis = Vmpooler.new_redis(redis_host, redis_port, redis_password)
thr.helpers.configure(config, redis, metrics)
thr.helpers.execute!
Vmpooler::API.execute(torun, config, redis, metrics, logger)
end
torun_threads << api
elsif metrics.respond_to?(:setup_prometheus_metrics)
# Run the cut down API - Prometheus Metrics only.
prometheus_only_api = Thread.new do
Vmpooler::API.execute(torun, config, nil, metrics, logger)
end
torun_threads << prometheus_only_api
end
if torun.include? 'manager'
if torun.include? :manager
manager = Thread.new do
Vmpooler::PoolManager.new(
config,
Vmpooler.new_logger(logger_file),
logger,
Vmpooler.redis_connection_pool(redis_host, redis_port, redis_password, redis_connection_pool_size, redis_connection_pool_timeout, metrics),
metrics
).execute!

View file

@ -191,6 +191,13 @@ https://jenkins.example.com/job/platform\_puppet-agent-extra\_puppet-agent-integ
Expects a boolean value
(optional; default: false)
### REQUEST\_LOGGER
Enable API request logging to the logger
When enabled all requests to the API are written to the standard logger.
Expects a boolean value
(optional; default: false)
### EXTRA\_CONFIG
Specify additional application configuration files

View file

@ -15,7 +15,7 @@ module Vmpooler
require 'timeout'
require 'yaml'
%w[api graphite logger pool_manager statsd dummy_statsd generic_connection_pool].each do |lib|
%w[api metrics logger pool_manager generic_connection_pool].each do |lib|
require "vmpooler/#{lib}"
end
@ -84,6 +84,7 @@ module Vmpooler
parsed_config[:config]['experimental_features'] = ENV['EXPERIMENTAL_FEATURES'] if ENV['EXPERIMENTAL_FEATURES']
parsed_config[:config]['purge_unconfigured_folders'] = ENV['PURGE_UNCONFIGURED_FOLDERS'] if ENV['PURGE_UNCONFIGURED_FOLDERS']
parsed_config[:config]['usage_stats'] = ENV['USAGE_STATS'] if ENV['USAGE_STATS']
parsed_config[:config]['request_logger'] = ENV['REQUEST_LOGGER'] if ENV['REQUEST_LOGGER']
parsed_config[:redis] = parsed_config[:redis] || {}
parsed_config[:redis]['server'] = ENV['REDIS_SERVER'] || parsed_config[:redis]['server'] || 'localhost'
@ -166,7 +167,8 @@ module Vmpooler
def self.redis_connection_pool(host, port, password, size, timeout, metrics)
Vmpooler::PoolManager::GenericConnectionPool.new(
metrics: metrics,
metric_prefix: 'redis_connection_pool',
connpool_type: 'redis_connection_pool',
connpool_provider: 'manager',
size: size,
timeout: timeout
) do
@ -180,20 +182,6 @@ module Vmpooler
Redis.new(host: host, port: port, password: password)
end
def self.new_logger(logfile)
Vmpooler::Logger.new logfile
end
def self.new_metrics(params)
if params[:statsd]
Vmpooler::Statsd.new(params[:statsd])
elsif params[:graphite]
Vmpooler::Graphite.new(params[:graphite])
else
Vmpooler::DummyStatsd.new
end
end
def self.pools(conf)
conf[:pools]
end

View file

@ -2,51 +2,58 @@
module Vmpooler
class API < Sinatra::Base
def initialize
super
end
not_found do
content_type :json
result = {
ok: false
}
JSON.pretty_generate(result)
end
# Load dashboard components
begin
require 'dashboard'
rescue LoadError
require File.expand_path(File.join(File.dirname(__FILE__), 'dashboard'))
end
use Vmpooler::Dashboard
# Load API components
%w[helpers dashboard reroute v1].each do |lib|
begin
require "api/#{lib}"
rescue LoadError
require File.expand_path(File.join(File.dirname(__FILE__), 'api', lib))
end
%w[helpers dashboard reroute v1 request_logger].each do |lib|
require "vmpooler/api/#{lib}"
end
# Load dashboard components
require 'vmpooler/dashboard'
use Vmpooler::API::Dashboard
use Vmpooler::API::Reroute
use Vmpooler::API::V1
def configure(config, redis, metrics)
def self.execute(torun, config, redis, metrics, logger)
self.settings.set :config, config
self.settings.set :redis, redis
self.settings.set :redis, redis unless redis.nil?
self.settings.set :metrics, metrics
self.settings.set :checkoutlock, Mutex.new
end
def execute!
self.settings.run!
# Deflating in all situations
# https://www.schneems.com/2017/11/08/80-smaller-rails-footprint-with-rack-deflate/
use Rack::Deflater
# not_found clause placed here to fix rspec test issue.
not_found do
content_type :json
result = {
ok: false
}
JSON.pretty_generate(result)
end
if metrics.respond_to?(:setup_prometheus_metrics)
# Prometheus metrics are only setup if actually specified
# in the config file.
metrics.setup_prometheus_metrics(torun)
# Using customised collector that filters out hostnames on API paths
require 'vmpooler/metrics/promstats/collector_middleware'
require 'prometheus/middleware/exporter'
use Vmpooler::Metrics::Promstats::CollectorMiddleware, metrics_prefix: "#{metrics.metrics_prefix}_http"
use Prometheus::Middleware::Exporter, path: metrics.endpoint
end
if torun.include? :api
# Enable API request logging only if required
use Vmpooler::API::RequestLogger, logger: logger if config[:config]['request_logger']
use Vmpooler::Dashboard
use Vmpooler::API::Dashboard
use Vmpooler::API::Reroute
use Vmpooler::API::V1
end
# Get thee started O WebServer
self.run!
end
end
end

View file

@ -13,7 +13,7 @@ module Vmpooler
def valid_token?(backend)
return false unless has_token?
backend.exists('vmpooler__token__' + request.env['HTTP_X_AUTH_TOKEN']) ? true : false
backend.exists?('vmpooler__token__' + request.env['HTTP_X_AUTH_TOKEN']) ? true : false
end
def validate_token(backend)

View file

@ -0,0 +1,20 @@
# frozen_string_literal: true
module Vmpooler
class API
class RequestLogger
attr_reader :app
def initialize(app, options = {})
@app = app
@logger = options[:logger]
end
def call(env)
status, headers, body = @app.call(env)
@logger.log('s', "[ ] API: Method: #{env['REQUEST_METHOD']}, Status: #{status}, Path: #{env['PATH_INFO']}, Body: #{body}")
[status, headers, body]
end
end
end
end

View file

@ -339,7 +339,7 @@ module Vmpooler
payload&.each do |poolname, count|
next unless count.to_i > config['max_ondemand_instances_per_request']
metrics.increment('ondemandrequest.toomanyrequests.' + poolname)
metrics.increment('ondemandrequest_fail.toomanyrequests.' + poolname)
return true
end
false
@ -360,10 +360,10 @@ module Vmpooler
request_id ||= generate_request_id
result['request_id'] = request_id
if backend.exists("vmpooler__odrequest__#{request_id}")
if backend.exists?("vmpooler__odrequest__#{request_id}")
result['message'] = "request_id '#{request_id}' has already been created"
status 409
metrics.increment('ondemandrequest.generate.duplicaterequests')
metrics.increment('ondemandrequest_generate.duplicaterequests')
return result
end
@ -387,7 +387,7 @@ module Vmpooler
result['domain'] = config['domain'] if config['domain']
result[:ok] = true
metrics.increment('ondemandrequest.generate.success')
metrics.increment('ondemandrequest_generate.success')
result
end
@ -809,6 +809,7 @@ module Vmpooler
post "#{api_prefix}/ondemandvm/?" do
content_type :json
metrics.increment('api_vm.post.ondemand.requestid')
need_token! if Vmpooler::API.settings.config[:auth]
@ -824,12 +825,12 @@ module Vmpooler
else
result[:bad_templates] = invalid
invalid.each do |bad_template|
metrics.increment('ondemandrequest.invalid.' + bad_template)
metrics.increment('ondemandrequest_fail.invalid.' + bad_template)
end
status 404
end
else
metrics.increment('ondemandrequest.invalid.unknown')
metrics.increment('ondemandrequest_fail.invalid.unknown')
status 404
end
rescue JSON::ParserError
@ -846,6 +847,7 @@ module Vmpooler
post "#{api_prefix}/ondemandvm/:template/?" do
content_type :json
result = { 'ok' => false }
metrics.increment('api_vm.delete.ondemand.template')
need_token! if Vmpooler::API.settings.config[:auth]
@ -858,12 +860,12 @@ module Vmpooler
else
result[:bad_templates] = invalid
invalid.each do |bad_template|
metrics.increment('ondemandrequest.invalid.' + bad_template)
metrics.increment('ondemandrequest_fail.invalid.' + bad_template)
end
status 404
end
else
metrics.increment('ondemandrequest.invalid.unknown')
metrics.increment('ondemandrequest_fail.invalid.unknown')
status 404
end
@ -872,6 +874,7 @@ module Vmpooler
get "#{api_prefix}/ondemandvm/:requestid/?" do
content_type :json
metrics.increment('api_vm.get.ondemand.request')
status 404
result = check_ondemand_request(params[:requestid])
@ -882,6 +885,7 @@ module Vmpooler
delete "#{api_prefix}/ondemandvm/:requestid/?" do
content_type :json
need_token! if Vmpooler::API.settings.config[:auth]
metrics.increment('api_vm.delete.ondemand.request')
status 404
result = delete_ondemand_request(params[:requestid])
@ -892,6 +896,7 @@ module Vmpooler
post "#{api_prefix}/vm/?" do
content_type :json
result = { 'ok' => false }
metrics.increment('api_vm.post.vm.checkout')
payload = JSON.parse(request.body.read)
@ -1034,6 +1039,7 @@ module Vmpooler
post "#{api_prefix}/vm/:template/?" do
content_type :json
result = { 'ok' => false }
metrics.increment('api_vm.get.vm.template')
payload = extract_templates_from_query_params(params[:template])
@ -1057,6 +1063,7 @@ module Vmpooler
get "#{api_prefix}/vm/:hostname/?" do
content_type :json
metrics.increment('api_vm.get.vm.hostname')
result = {}
@ -1129,6 +1136,7 @@ module Vmpooler
delete "#{api_prefix}/vm/:hostname/?" do
content_type :json
metrics.increment('api_vm.delete.vm.hostname')
result = {}
@ -1146,8 +1154,9 @@ module Vmpooler
status 200
result['ok'] = true
metrics.increment('delete.success')
else
metrics.increment('delete.srem.failed')
metrics.increment('delete.failed')
end
end
@ -1156,6 +1165,7 @@ module Vmpooler
put "#{api_prefix}/vm/:hostname/?" do
content_type :json
metrics.increment('api_vm.put.vm.modify')
status 404
result = { 'ok' => false }
@ -1164,7 +1174,7 @@ module Vmpooler
params[:hostname] = hostname_shorten(params[:hostname], config['domain'])
if backend.exists('vmpooler__vm__' + params[:hostname])
if backend.exists?('vmpooler__vm__' + params[:hostname])
begin
jdata = JSON.parse(request.body.read)
rescue StandardError
@ -1232,6 +1242,7 @@ module Vmpooler
post "#{api_prefix}/vm/:hostname/disk/:size/?" do
content_type :json
metrics.increment('api_vm.post.vm.disksize')
need_token! if Vmpooler::API.settings.config[:auth]
@ -1240,7 +1251,7 @@ module Vmpooler
params[:hostname] = hostname_shorten(params[:hostname], config['domain'])
if ((params[:size].to_i > 0 )and (backend.exists('vmpooler__vm__' + params[:hostname])))
if ((params[:size].to_i > 0 )and (backend.exists?('vmpooler__vm__' + params[:hostname])))
result[params[:hostname]] = {}
result[params[:hostname]]['disk'] = "+#{params[:size]}gb"
@ -1255,6 +1266,7 @@ module Vmpooler
post "#{api_prefix}/vm/:hostname/snapshot/?" do
content_type :json
metrics.increment('api_vm.post.vm.snapshot')
need_token! if Vmpooler::API.settings.config[:auth]
@ -1263,7 +1275,7 @@ module Vmpooler
params[:hostname] = hostname_shorten(params[:hostname], config['domain'])
if backend.exists('vmpooler__vm__' + params[:hostname])
if backend.exists?('vmpooler__vm__' + params[:hostname])
result[params[:hostname]] = {}
o = [('a'..'z'), ('0'..'9')].map(&:to_a).flatten
@ -1280,6 +1292,7 @@ module Vmpooler
post "#{api_prefix}/vm/:hostname/snapshot/:snapshot/?" do
content_type :json
metrics.increment('api_vm.post.vm.disksize')
need_token! if Vmpooler::API.settings.config[:auth]

View file

@ -1,22 +0,0 @@
# frozen_string_literal: true
module Vmpooler
class DummyStatsd
attr_reader :server, :port, :prefix
def initialize(*)
end
def increment(*)
true
end
def gauge(*)
true
end
def timing(*)
true
end
end
end

View file

@ -11,8 +11,10 @@ module Vmpooler
def initialize(options = {}, &block)
super(options, &block)
@metrics = options[:metrics]
@metric_prefix = options[:metric_prefix]
@metric_prefix = 'connectionpool' if @metric_prefix.nil? || @metric_prefix == ''
@connpool_type = options[:connpool_type]
@connpool_type = 'connectionpool' if @connpool_type.nil? || @connpool_type == ''
@connpool_provider = options[:connpool_provider]
@connpool_provider = 'unknown' if @connpool_provider.nil? || @connpool_provider == ''
end
def with_metrics(options = {})
@ -20,15 +22,15 @@ module Vmpooler
start = Time.now
conn = checkout(options)
timespan_ms = ((Time.now - start) * 1000).to_i
@metrics&.gauge(@metric_prefix + '.available', @available.length)
@metrics&.timing(@metric_prefix + '.waited', timespan_ms)
@metrics&.gauge("connection_available.#{@connpool_type}.#{@connpool_provider}", @available.length)
@metrics&.timing("connection_waited.#{@connpool_type}.#{@connpool_provider}", timespan_ms)
begin
Thread.handle_interrupt(Exception => :immediate) do
yield conn
end
ensure
checkin
@metrics&.gauge(@metric_prefix + '.available', @available.length)
@metrics&.gauge("connection_available.#{@connpool_type}.#{@connpool_provider}", @available.length)
end
end
end

View file

@ -1,44 +0,0 @@
# frozen_string_literal: true
require 'rubygems' unless defined?(Gem)
module Vmpooler
class Graphite
attr_reader :server, :port, :prefix
def initialize(params = {})
raise ArgumentError, "Graphite server is required. Config: #{params.inspect}" if params['server'].nil? || params['server'].empty?
@server = params['server']
@port = params['port'] || 2003
@prefix = params['prefix'] || 'vmpooler'
end
def increment(label)
log label, 1
end
def gauge(label, value)
log label, value
end
def timing(label, duration)
log label, duration
end
def log(path, value)
Thread.new do
socket = TCPSocket.new(server, port)
begin
socket.puts "#{prefix}.#{path} #{value} #{Time.now.to_i}"
ensure
socket.close
end
end
rescue Errno::EADDRNOTAVAIL => e
warn "Could not assign address to graphite server #{server}: #{e}"
rescue StandardError => e
warn "Failure logging #{path} to graphite server [#{server}:#{port}]: #{e}"
end
end
end

24
lib/vmpooler/metrics.rb Normal file
View file

@ -0,0 +1,24 @@
# frozen_string_literal: true
require 'vmpooler/metrics/statsd'
require 'vmpooler/metrics/graphite'
require 'vmpooler/metrics/promstats'
require 'vmpooler/metrics/dummy_statsd'
module Vmpooler
class Metrics
# static class instantiate appropriate metrics object.
def self.init(logger, params)
if params[:statsd]
metrics = Vmpooler::Metrics::Statsd.new(logger, params[:statsd])
elsif params[:graphite]
metrics = Vmpooler::Metrics::Graphite.new(logger, params[:graphite])
elsif params[:prometheus]
metrics = Vmpooler::Metrics::Promstats.new(logger, params[:prometheus])
else
metrics = Vmpooler::Metrics::DummyStatsd.new
end
metrics
end
end
end

View file

@ -0,0 +1,24 @@
# frozen_string_literal: true
module Vmpooler
class Metrics
class DummyStatsd < Metrics
attr_reader :server, :port, :prefix
def initialize(*)
end
def increment(*)
true
end
def gauge(*)
true
end
def timing(*)
true
end
end
end
end

View file

@ -0,0 +1,47 @@
# frozen_string_literal: true
require 'rubygems' unless defined?(Gem)
module Vmpooler
class Metrics
class Graphite < Metrics
attr_reader :server, :port, :prefix
def initialize(logger, params = {})
raise ArgumentError, "Graphite server is required. Config: #{params.inspect}" if params['server'].nil? || params['server'].empty?
@server = params['server']
@port = params['port'] || 2003
@prefix = params['prefix'] || 'vmpooler'
@logger = logger
end
def increment(label)
log label, 1
end
def gauge(label, value)
log label, value
end
def timing(label, duration)
log label, duration
end
def log(path, value)
Thread.new do
socket = TCPSocket.new(server, port)
begin
socket.puts "#{prefix}.#{path} #{value} #{Time.now.to_i}"
ensure
socket.close
end
end
rescue Errno::EADDRNOTAVAIL => e
warn "Could not assign address to graphite server #{server}: #{e}"
rescue StandardError => e
@logger.log('s', "[!] Failure logging #{path} to graphite server [#{server}:#{port}]: #{e}")
end
end
end
end

View file

@ -0,0 +1,380 @@
# frozen_string_literal: true
require 'prometheus/client'
module Vmpooler
class Metrics
class Promstats < Metrics
attr_reader :prefix, :endpoint, :metrics_prefix
# Constants for Metric Types
M_COUNTER = 1
M_GAUGE = 2
M_SUMMARY = 3
M_HISTOGRAM = 4
# Customised Bucket set to use for the Pooler clone times set to more appropriate intervals.
POOLER_CLONE_TIME_BUCKETS = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 120.0, 180.0, 240.0, 300.0, 600.0].freeze
POOLER_READY_TIME_BUCKETS = [30.0, 60.0, 120.0, 180.0, 240.0, 300.0, 500.0, 800.0, 1200.0, 1600.0].freeze
# Same for redis connection times - this is the same as the current Prometheus Default.
# https://github.com/prometheus/client_ruby/blob/master/lib/prometheus/client/histogram.rb#L14
REDIS_CONNECT_BUCKETS = [1.0, 2.0, 3.0, 5.0, 8.0, 13.0, 18.0, 23.0].freeze
@p_metrics = {}
def initialize(logger, params = {})
@prefix = params['prefix'] || 'vmpooler'
@metrics_prefix = params['metrics_prefix'] || 'vmpooler'
@endpoint = params['endpoint'] || '/prometheus'
@logger = logger
# Setup up prometheus registry and data structures
@prometheus = Prometheus::Client.registry
end
# Metrics structure used to register the metrics and also translate/interpret the incoming metrics.
def vmpooler_metrics_table
{
errors: {
mtype: M_COUNTER,
torun: %i[manager],
docstring: 'Count of errors for pool',
prom_metric_prefix: "#{@metrics_prefix}_errors",
metric_suffixes: {
markedasfailed: 'timeout waiting for instance to initialise',
duplicatehostname: 'unable to create instance due to duplicate hostname',
staledns: 'unable to create instance due to duplicate DNS record'
},
param_labels: %i[template_name]
},
user: {
mtype: M_COUNTER,
torun: %i[manager],
docstring: 'Number of pool instances this user created created',
prom_metric_prefix: "#{@metrics_prefix}_user",
param_labels: %i[user poolname]
},
usage_litmus: {
mtype: M_COUNTER,
torun: %i[manager],
docstring: 'Pools by Litmus job usage',
prom_metric_prefix: "#{@metrics_prefix}_usage_litmus",
param_labels: %i[user poolname]
},
usage_jenkins_instance: {
mtype: M_COUNTER,
torun: %i[manager],
docstring: 'Pools by Jenkins instance usage',
prom_metric_prefix: "#{@metrics_prefix}_usage_jenkins_instance",
param_labels: %i[jenkins_instance value_stream poolname]
},
usage_branch_project: {
mtype: M_COUNTER,
torun: %i[manager],
docstring: 'Pools by branch/project usage',
prom_metric_prefix: "#{@metrics_prefix}_usage_branch_project",
param_labels: %i[branch project poolname]
},
usage_job_component: {
mtype: M_COUNTER,
torun: %i[manager],
docstring: 'Pools by job/component usage',
prom_metric_prefix: "#{@metrics_prefix}_usage_job_component",
param_labels: %i[job_name component_to_test poolname]
},
checkout: {
mtype: M_COUNTER,
torun: %i[api],
docstring: 'Pool checkout counts',
prom_metric_prefix: "#{@metrics_prefix}_checkout",
metric_suffixes: {
nonresponsive: 'checkout failed - non responsive machine',
empty: 'checkout failed - no machine',
success: 'successful checkout',
invalid: 'checkout failed - invalid template'
},
param_labels: %i[poolname]
},
delete: {
mtype: M_COUNTER,
torun: %i[api],
docstring: 'Delete machine',
prom_metric_prefix: "#{@metrics_prefix}_delete",
metric_suffixes: {
success: 'succeeded',
failed: 'failed'
},
param_labels: []
},
ondemandrequest_generate: {
mtype: M_COUNTER,
torun: %i[api],
docstring: 'Ondemand request',
prom_metric_prefix: "#{@metrics_prefix}_ondemandrequest_generate",
metric_suffixes: {
duplicaterequests: 'failed duplicate request',
success: 'succeeded'
},
param_labels: []
},
ondemandrequest_fail: {
mtype: M_COUNTER,
torun: %i[api],
docstring: 'Ondemand request failure',
prom_metric_prefix: "#{@metrics_prefix}_ondemandrequest_fail",
metric_suffixes: {
toomanyrequests: 'too many requests',
invalid: 'invalid poolname'
},
param_labels: %i[poolname]
},
config: {
mtype: M_COUNTER,
torun: %i[api],
docstring: 'vmpooler pool configuration request',
prom_metric_prefix: "#{@metrics_prefix}_config",
metric_suffixes: { invalid: 'Invalid' },
param_labels: %i[poolname]
},
poolreset: {
mtype: M_COUNTER,
torun: %i[api],
docstring: 'Pool reset counter',
prom_metric_prefix: "#{@metrics_prefix}_poolreset",
metric_suffixes: { invalid: 'Invalid Pool' },
param_labels: %i[poolname]
},
connect: {
mtype: M_COUNTER,
torun: %i[manager],
docstring: 'vmpooler connect (to vSphere)',
prom_metric_prefix: "#{@metrics_prefix}_connect",
metric_suffixes: {
open: 'Connect Succeeded',
fail: 'Connect Failed'
},
param_labels: []
},
migrate_from: {
mtype: M_COUNTER,
torun: %i[manager],
docstring: 'vmpooler machine migrated from',
prom_metric_prefix: "#{@metrics_prefix}_migrate_from",
param_labels: %i[host_name]
},
migrate_to: {
mtype: M_COUNTER,
torun: %i[manager],
docstring: 'vmpooler machine migrated to',
prom_metric_prefix: "#{@metrics_prefix}_migrate_to",
param_labels: %i[host_name]
},
api_vm: {
mtype: M_COUNTER,
torun: %i[api],
docstring: 'Total number of HTTP request/sub-operations handled by the Rack application under the /vm endpoint',
prom_metric_prefix: "#{@metrics_prefix}_http_requests_vm_total",
param_labels: %i[method subpath operation]
},
ready: {
mtype: M_GAUGE,
torun: %i[manager],
docstring: 'vmpooler number of machines in ready State',
prom_metric_prefix: "#{@metrics_prefix}_ready",
param_labels: %i[poolname]
},
running: {
mtype: M_GAUGE,
torun: %i[manager],
docstring: 'vmpooler number of machines running',
prom_metric_prefix: "#{@metrics_prefix}_running",
param_labels: %i[poolname]
},
connection_available: {
mtype: M_GAUGE,
torun: %i[manager],
docstring: 'vmpooler redis connections available',
prom_metric_prefix: "#{@metrics_prefix}_connection_available",
param_labels: %i[type provider]
},
time_to_ready_state: {
mtype: M_HISTOGRAM,
torun: %i[manager],
buckets: POOLER_READY_TIME_BUCKETS,
docstring: 'Time taken for machine to read ready state for pool',
prom_metric_prefix: "#{@metrics_prefix}_time_to_ready_state",
param_labels: %i[poolname]
},
migrate: {
mtype: M_HISTOGRAM,
torun: %i[manager],
buckets: POOLER_CLONE_TIME_BUCKETS,
docstring: 'vmpooler time taken to migrate machine for pool',
prom_metric_prefix: "#{@metrics_prefix}_migrate",
param_labels: %i[poolname]
},
clone: {
mtype: M_HISTOGRAM,
torun: %i[manager],
buckets: POOLER_CLONE_TIME_BUCKETS,
docstring: 'vmpooler time taken to clone machine',
prom_metric_prefix: "#{@metrics_prefix}_clone",
param_labels: %i[poolname]
},
destroy: {
mtype: M_HISTOGRAM,
torun: %i[manager],
buckets: POOLER_CLONE_TIME_BUCKETS,
docstring: 'vmpooler time taken to destroy machine',
prom_metric_prefix: "#{@metrics_prefix}_destroy",
param_labels: %i[poolname]
},
connection_waited: {
mtype: M_HISTOGRAM,
torun: %i[manager],
buckets: REDIS_CONNECT_BUCKETS,
docstring: 'vmpooler redis connection wait time',
prom_metric_prefix: "#{@metrics_prefix}_connection_waited",
param_labels: %i[type provider]
}
}
end
# Helper to add individual prom metric.
# Allow Histograms to specify the bucket size.
def add_prometheus_metric(metric_spec, name, docstring)
case metric_spec[:mtype]
when M_COUNTER
metric_class = Prometheus::Client::Counter
when M_GAUGE
metric_class = Prometheus::Client::Gauge
when M_SUMMARY
metric_class = Prometheus::Client::Summary
when M_HISTOGRAM
metric_class = Prometheus::Client::Histogram
else
raise("Unable to register metric #{name} with metric type #{metric_spec[:mtype]}")
end
if (metric_spec[:mtype] == M_HISTOGRAM) && (metric_spec.key? :buckets)
prom_metric = metric_class.new(
name.to_sym,
docstring: docstring,
labels: metric_spec[:param_labels] + [:vmpooler_instance],
buckets: metric_spec[:buckets],
preset_labels: { vmpooler_instance: @prefix }
)
else
prom_metric = metric_class.new(
name.to_sym,
docstring: docstring,
labels: metric_spec[:param_labels] + [:vmpooler_instance],
preset_labels: { vmpooler_instance: @prefix }
)
end
@prometheus.register(prom_metric)
end
# Top level method to register all the prometheus metrics.
def setup_prometheus_metrics(torun)
@p_metrics = vmpooler_metrics_table
@p_metrics.each do |_name, metric_spec|
# Only register metrics appropriate to api or manager
next if (torun & metric_spec[:torun]).empty?
if metric_spec.key? :metric_suffixes
# Iterate thru the suffixes if provided to register multiple counters here.
metric_spec[:metric_suffixes].each do |metric_suffix|
add_prometheus_metric(
metric_spec,
"#{metric_spec[:prom_metric_prefix]}_#{metric_suffix[0]}",
"#{metric_spec[:docstring]} #{metric_suffix[1]}"
)
end
else
# No Additional counter suffixes so register this as metric.
add_prometheus_metric(
metric_spec,
metric_spec[:prom_metric_prefix],
metric_spec[:docstring]
)
end
end
end
# locate a metric and check/interpet the sub-fields.
def find_metric(label)
sublabels = label.split('.')
metric_key = sublabels.shift.to_sym
raise("Invalid Metric #{metric_key} for #{label}") unless @p_metrics.key? metric_key
metric = @p_metrics[metric_key].clone
if metric.key? :metric_suffixes
metric_subkey = sublabels.shift.to_sym
raise("Invalid Metric #{metric_key}_#{metric_subkey} for #{label}") unless metric[:metric_suffixes].key? metric_subkey.to_sym
metric[:metric_name] = "#{metric[:prom_metric_prefix]}_#{metric_subkey}"
else
metric[:metric_name] = metric[:prom_metric_prefix]
end
# Check if we are looking for a parameter value at last element.
if metric.key? :param_labels
metric[:labels] = {}
# Special case processing here - if there is only one parameter label then make sure
# we append all of the remaining contents of the metric with "." separators to ensure
# we get full nodenames (e.g. for Migration to node operations)
if metric[:param_labels].length == 1
metric[:labels][metric[:param_labels].first] = sublabels.join('.')
else
metric[:param_labels].reverse_each do |param_label|
metric[:labels][param_label] = sublabels.pop(1).first
end
end
end
metric
end
# Helper to get lab metrics.
def get(label)
metric = find_metric(label)
[metric, @prometheus.get(metric[:metric_name])]
end
# Note - Catch and log metrics failures so they can be noted, but don't interrupt vmpooler operation.
def increment(label)
begin
counter_metric, c = get(label)
c.increment(labels: counter_metric[:labels])
rescue StandardError => e
@logger.log('s', "[!] prometheus error logging metric #{label} increment : #{e}")
end
end
def gauge(label, value)
begin
unless value.nil?
gauge_metric, g = get(label)
g.set(value.to_i, labels: gauge_metric[:labels])
end
rescue StandardError => e
@logger.log('s', "[!] prometheus error logging gauge #{label}, value #{value}: #{e}")
end
end
def timing(label, duration)
begin
# https://prometheus.io/docs/practices/histograms/
unless duration.nil?
histogram_metric, hm = get(label)
hm.observe(duration.to_f, labels: histogram_metric[:labels])
end
rescue StandardError => e
@logger.log('s', "[!] prometheus error logging timing event label #{label}, duration #{duration}: #{e}")
end
end
end
end
end

View file

@ -0,0 +1,121 @@
# frozen_string_literal: true
# This is an adapted Collector module for vmpooler based on the sample implementation
# available in the prometheus client_ruby library
# https://github.com/prometheus/client_ruby/blob/master/lib/prometheus/middleware/collector.rb
#
# The code was also failing Rubocop on PR check, so have addressed all the offenses.
#
# The method strip_hostnames_from_path (originally strip_ids_from_path) has been adapted
# to add a match for hostnames in paths # to replace with a single ":hostname" string to
# avoid # proliferation of stat lines for # each new vm hostname deleted, modified or
# otherwise queried.
require 'benchmark'
require 'prometheus/client'
require 'vmpooler/logger'
module Vmpooler
class Metrics
class Promstats
# CollectorMiddleware is an implementation of Rack Middleware customised
# for vmpooler use.
#
# By default metrics are registered on the global registry. Set the
# `:registry` option to use a custom registry.
#
# By default metrics all have the prefix "http_server". Set to something
# else if you like.
#
# The request counter metric is broken down by code, method and path by
# default. Set the `:counter_label_builder` option to use a custom label
# builder.
#
# The request duration metric is broken down by method and path by default.
# Set the `:duration_label_builder` option to use a custom label builder.
#
# Label Builder functions will receive a Rack env and a status code, and must
# return a hash with the labels for that request. They must also accept an empty
# env, and return a hash with the correct keys. This is necessary to initialize
# the metrics with the correct set of labels.
class CollectorMiddleware
attr_reader :app, :registry
def initialize(app, options = {})
@app = app
@registry = options[:registry] || Prometheus::Client.registry
@metrics_prefix = options[:metrics_prefix] || 'http_server'
init_request_metrics
init_exception_metrics
end
def call(env) # :nodoc:
trace(env) { @app.call(env) }
end
protected
def init_request_metrics
@requests = @registry.counter(
:"#{@metrics_prefix}_requests_total",
docstring:
'The total number of HTTP requests handled by the Rack application.',
labels: %i[code method path]
)
@durations = @registry.histogram(
:"#{@metrics_prefix}_request_duration_seconds",
docstring: 'The HTTP response duration of the Rack application.',
labels: %i[method path]
)
end
def init_exception_metrics
@exceptions = @registry.counter(
:"#{@metrics_prefix}_exceptions_total",
docstring: 'The total number of exceptions raised by the Rack application.',
labels: [:exception]
)
end
def trace(env)
response = nil
duration = Benchmark.realtime { response = yield }
record(env, response.first.to_s, duration)
response
rescue StandardError => e
@exceptions.increment(labels: { exception: e.class.name })
raise
end
def record(env, code, duration)
counter_labels = {
code: code,
method: env['REQUEST_METHOD'].downcase,
path: strip_hostnames_from_path(env['PATH_INFO'])
}
duration_labels = {
method: env['REQUEST_METHOD'].downcase,
path: strip_hostnames_from_path(env['PATH_INFO'])
}
@requests.increment(labels: counter_labels)
@durations.observe(duration, labels: duration_labels)
rescue # rubocop:disable Style/RescueStandardError
nil
end
def strip_hostnames_from_path(path)
# Custom for /vm path - so we just collect aggrate stats for all usage along this one
# path. Custom counters are then added more specific endpoints in v1.rb
# Since we aren't parsing UID/GIDs as in the original example, these are removed.
# Similarly, request IDs are also stripped from the /ondemand path.
path
.gsub(%r{/vm/.+$}, '/vm')
.gsub(%r{/ondemand/.+$}, '/ondemand')
end
end
end
end
end

View file

@ -0,0 +1,40 @@
# frozen_string_literal: true
require 'rubygems' unless defined?(Gem)
require 'statsd'
module Vmpooler
class Metrics
class Statsd < Metrics
attr_reader :server, :port, :prefix
def initialize(logger, params = {})
raise ArgumentError, "Statsd server is required. Config: #{params.inspect}" if params['server'].nil? || params['server'].empty?
host = params['server']
@port = params['port'] || 8125
@prefix = params['prefix'] || 'vmpooler'
@server = ::Statsd.new(host, @port)
@logger = logger
end
def increment(label)
server.increment(prefix + '.' + label)
rescue StandardError => e
@logger.log('s', "[!] Failure incrementing #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}")
end
def gauge(label, value)
server.gauge(prefix + '.' + label, value)
rescue StandardError => e
@logger.log('s', "[!] Failure updating gauge #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}")
end
def timing(label, duration)
server.timing(prefix + '.' + label, duration)
rescue StandardError => e
@logger.log('s', "[!] Failure updating timing #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}")
end
end
end
end

View file

@ -491,15 +491,21 @@ module Vmpooler
return if checkout.nil?
user ||= 'unauthenticated'
unless jenkins_build_url
user = user.gsub('.', '_')
$metrics.increment("usage.#{user}.#{poolname}")
user = user.gsub('.', '_')
$metrics.increment("user.#{user}.#{poolname}")
return unless jenkins_build_url
if jenkins_build_url.include? 'litmus'
# Very simple filter for Litmus jobs - just count them coming through for the moment.
$metrics.increment("usage_litmus.#{user}.#{poolname}")
return
end
url_parts = jenkins_build_url.split('/')[2..-1]
instance = url_parts[0]
jenkins_instance = url_parts[0].gsub('.', '_')
value_stream_parts = url_parts[2].split('_')
value_stream_parts = value_stream_parts.map { |s| s.gsub('.', '_') }
value_stream = value_stream_parts.shift
branch = value_stream_parts.pop
project = value_stream_parts.shift
@ -507,22 +513,9 @@ module Vmpooler
build_metadata_parts = url_parts[3]
component_to_test = component_to_test('RMM_COMPONENT_TO_TEST_NAME', build_metadata_parts)
metric_parts = [
'usage',
user,
instance,
value_stream,
branch,
project,
job_name,
component_to_test,
poolname
]
metric_parts = metric_parts.reject(&:nil?)
metric_parts = metric_parts.map { |s| s.gsub('.', '_') }
$metrics.increment(metric_parts.join('.'))
$metrics.increment("usage_jenkins_instance.#{jenkins_instance}.#{value_stream}.#{poolname}")
$metrics.increment("usage_branch_project.#{branch}.#{project}.#{poolname}")
$metrics.increment("usage_job_component.#{job_name}.#{component_to_test}.#{poolname}")
rescue StandardError => e
$logger.log('d', "[!] [#{poolname}] failed while evaluating usage labels on '#{vm}' with an error: #{e}")
raise
@ -537,7 +530,7 @@ module Vmpooler
next if value.nil?
return value if key == match
end
nil
'none'
end
def purge_unused_vms_and_folders

View file

@ -29,7 +29,8 @@ module Vmpooler
logger.log('d', "[#{name}] ConnPool - Creating a connection pool of size #{connpool_size} with timeout #{connpool_timeout}")
@connection_pool = Vmpooler::PoolManager::GenericConnectionPool.new(
metrics: metrics,
metric_prefix: "#{name}_provider_connection_pool",
connpool_type: 'provider_connection_pool',
connpool_provider: name,
size: connpool_size,
timeout: connpool_timeout
) do

View file

@ -25,7 +25,8 @@ module Vmpooler
logger.log('d', "[#{name}] ConnPool - Creating a connection pool of size #{connpool_size} with timeout #{connpool_timeout}")
@connection_pool = Vmpooler::PoolManager::GenericConnectionPool.new(
metrics: metrics,
metric_prefix: "#{name}_provider_connection_pool",
connpool_type: 'provider_connection_pool',
connpool_provider: name,
size: connpool_size,
timeout: connpool_timeout
) do

View file

@ -1,37 +0,0 @@
# frozen_string_literal: true
require 'rubygems' unless defined?(Gem)
require 'statsd'
module Vmpooler
class Statsd
attr_reader :server, :port, :prefix
def initialize(params = {})
raise ArgumentError, "Statsd server is required. Config: #{params.inspect}" if params['server'].nil? || params['server'].empty?
host = params['server']
@port = params['port'] || 8125
@prefix = params['prefix'] || 'vmpooler'
@server = ::Statsd.new(host, @port)
end
def increment(label)
server.increment(prefix + '.' + label)
rescue StandardError => e
warn "Failure incrementing #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}"
end
def gauge(label, value)
server.gauge(prefix + '.' + label, value)
rescue StandardError => e
warn "Failure updating gauge #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}"
end
def timing(label, duration)
server.timing(prefix + '.' + label, duration)
rescue StandardError => e
warn "Failure updating timing #{prefix}.#{label} on statsd server [#{server}:#{port}]: #{e}"
end
end
end

View file

@ -8,6 +8,13 @@ describe Vmpooler::API::V1 do
Vmpooler::API
end
# Added to ensure no leakage in rack state from previous tests.
# Removes all routes, filters, middleware and extension hooks from the current class
# https://rubydoc.info/gems/sinatra/Sinatra/Base#reset!-class_method
before(:each) do
app.reset!
end
let(:config) {
{
config: {
@ -27,14 +34,13 @@ describe Vmpooler::API::V1 do
describe '/config/pooltemplate' do
let(:prefix) { '/api/v1' }
let(:metrics) { Vmpooler::DummyStatsd.new }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:current_time) { Time.now }
before(:each) do
app.settings.set :config, config
app.settings.set :redis, redis
app.settings.set :metrics, metrics
expect(app).to receive(:run!).once
app.execute([:api], config, redis, metrics, nil)
app.settings.set :config, auth: false
create_token('abcdefghijklmnopqrstuvwxyz012345', 'jdoe', current_time)
end

View file

@ -5,11 +5,18 @@ describe Vmpooler::API::V1 do
include Rack::Test::Methods
def app()
Vmpooler::API end
Vmpooler::API
end
# Added to ensure no leakage in rack state from previous tests.
# Removes all routes, filters, middleware and extension hooks from the current class
# https://rubydoc.info/gems/sinatra/Sinatra/Base#reset!-class_method
before(:each) do
app.reset!
end
describe '/ondemandvm' do
let(:prefix) { '/api/v1' }
let(:metrics) { Vmpooler::DummyStatsd.new }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:config) {
{
config: {
@ -32,13 +39,11 @@ describe Vmpooler::API::V1 do
let(:current_time) { Time.now }
let(:vmname) { 'abcdefghijkl' }
let(:checkoutlock) { Mutex.new }
let(:redis) { MockRedis.new }
let(:uuid) { SecureRandom.uuid }
before(:each) do
app.settings.set :config, config
app.settings.set :redis, redis
app.settings.set :metrics, metrics
expect(app).to receive(:run!).once
app.execute([:api], config, redis, metrics, nil)
app.settings.set :config, auth: false
app.settings.set :checkoutlock, checkoutlock
create_token('abcdefghijklmnopqrstuvwxyz012345', 'jdoe', current_time)

View file

@ -8,6 +8,10 @@ describe Vmpooler::API::V1 do
Vmpooler::API
end
after(:each) do
Vmpooler::API.reset!
end
let(:config) {
{
config: {
@ -27,14 +31,13 @@ describe Vmpooler::API::V1 do
describe '/poolreset' do
let(:prefix) { '/api/v1' }
let(:metrics) { Vmpooler::DummyStatsd.new }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:current_time) { Time.now }
before(:each) do
app.settings.set :config, config
app.settings.set :redis, redis
app.settings.set :metrics, metrics
expect(app).to receive(:run!).once
app.execute([:api], config, redis, metrics, nil)
app.settings.set :config, auth: false
create_token('abcdefghijklmnopqrstuvwxyz012345', 'jdoe', current_time)
end

View file

@ -12,6 +12,13 @@ describe Vmpooler::API::V1 do
Vmpooler::API
end
# Added to ensure no leakage in rack state from previous tests.
# Removes all routes, filters, middleware and extension hooks from the current class
# https://rubydoc.info/gems/sinatra/Sinatra/Base#reset!-class_method
before(:each) do
app.reset!
end
describe 'status and metrics endpoints' do
let(:prefix) { '/api/v1' }
@ -32,8 +39,8 @@ describe Vmpooler::API::V1 do
let(:current_time) { Time.now }
before(:each) do
app.settings.set :config, config
app.settings.set :redis, redis
expect(app).to receive(:run!).once
app.execute([:api], config, redis, nil, nil)
app.settings.set :config, auth: false
create_token('abcdefghijklmnopqrstuvwxyz012345', 'jdoe', current_time)
end

View file

@ -8,19 +8,31 @@ describe Vmpooler::API::V1 do
Vmpooler::API
end
# Added to ensure no leakage in rack state from previous tests.
# Removes all routes, filters, middleware and extension hooks from the current class
# https://rubydoc.info/gems/sinatra/Sinatra/Base#reset!-class_method
before(:each) do
app.reset!
end
describe '/token' do
let(:prefix) { '/api/v1' }
let(:current_time) { Time.now }
let(:config) { { } }
let(:config) { {
config: {}
} }
before do
app.settings.set :config, config
app.settings.set :redis, redis
before(:each) do
expect(app).to receive(:run!).once
app.execute([:api], config, redis, nil, nil)
end
describe 'GET /token' do
context '(auth not configured)' do
let(:config) { { auth: false } }
let(:config) { {
config: {},
auth: false
} }
it 'returns a 404' do
get "#{prefix}/token"
@ -31,6 +43,7 @@ describe Vmpooler::API::V1 do
context '(auth configured)' do
let(:config) {
{
config: {},
auth: {
'provider' => 'dummy'
}
@ -58,7 +71,10 @@ describe Vmpooler::API::V1 do
describe 'POST /token' do
context '(auth not configured)' do
let(:config) { { auth: false } }
let(:config) { {
config: {},
auth: false
} }
it 'returns a 404' do
post "#{prefix}/token"
@ -69,6 +85,7 @@ describe Vmpooler::API::V1 do
context '(auth configured)' do
let(:config) {
{
config: {},
auth: {
'provider' => 'dummy'
}
@ -97,7 +114,9 @@ describe Vmpooler::API::V1 do
let(:prefix) { '/api/v1' }
let(:current_time) { Time.now }
before do
before(:each) do
expect(app).to receive(:run!).once
app.execute([:api], config, redis, nil, nil)
app.settings.set :config, config
app.settings.set :redis, redis
end
@ -109,7 +128,10 @@ describe Vmpooler::API::V1 do
describe 'GET /token/:token' do
context '(auth not configured)' do
let(:config) { { auth: false } }
let(:config) { {
config: {},
auth: false
} }
it 'returns a 404' do
get "#{prefix}/token/this"
@ -119,6 +141,7 @@ describe Vmpooler::API::V1 do
context '(auth configured)' do
let(:config) { {
config: {},
auth: true,
pools: [
{'name' => 'pool1', 'size' => 5}
@ -141,7 +164,10 @@ describe Vmpooler::API::V1 do
describe 'DELETE /token/:token' do
context '(auth not configured)' do
let(:config) { { auth: false } }
let(:config) { {
config: {},
auth: false
} }
it 'returns a 404' do
delete "#{prefix}/token/this"
@ -152,6 +178,7 @@ describe Vmpooler::API::V1 do
context '(auth configured)' do
let(:config) {
{
config: {},
auth: {
'provider' => 'dummy'
}

View file

@ -12,8 +12,16 @@ describe Vmpooler::API::V1 do
Vmpooler::API
end
# Added to ensure no leakage in rack state from previous tests.
# Removes all routes, filters, middleware and extension hooks from the current class
# https://rubydoc.info/gems/sinatra/Sinatra/Base#reset!-class_method
before(:each) do
app.reset!
end
describe '/vm/:hostname' do
let(:prefix) { '/api/v1' }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:config) {
{
@ -33,11 +41,9 @@ describe Vmpooler::API::V1 do
let(:current_time) { Time.now }
let(:redis) { MockRedis.new }
before(:each) do
app.settings.set :config, config
app.settings.set :redis, redis
expect(app).to receive(:run!).once
app.execute([:api], config, redis, metrics, nil)
create_token('abcdefghijklmnopqrstuvwxyz012345', 'jdoe', current_time)
end
@ -81,9 +87,9 @@ describe Vmpooler::API::V1 do
end
context '(tagfilter configured)' do
let(:config) { {
tagfilter: { 'url' => '(.*)\/' },
} }
before(:each) do
app.settings.set :config, tagfilter: { 'url' => '(.*)\/' }
end
it 'correctly filters tags' do
create_vm('testhost', redis)
@ -104,7 +110,9 @@ describe Vmpooler::API::V1 do
end
context '(auth not configured)' do
let(:config) { { auth: false } }
before(:each) do
app.settings.set :config, auth: false
end
it 'allows VM lifetime to be modified without a token' do
create_vm('testhost', redis)

View file

@ -8,9 +8,16 @@ describe Vmpooler::API::V1 do
Vmpooler::API
end
# Added to ensure no leakage in rack state from previous tests.
# Removes all routes, filters, middleware and extension hooks from the current class
# https://rubydoc.info/gems/sinatra/Sinatra/Base#reset!-class_method
before(:each) do
app.reset!
end
describe '/vm' do
let(:prefix) { '/api/v1' }
let(:metrics) { Vmpooler::DummyStatsd.new }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:config) {
{
config: {
@ -32,9 +39,8 @@ describe Vmpooler::API::V1 do
let(:checkoutlock) { Mutex.new }
before(:each) do
app.settings.set :config, config
app.settings.set :redis, redis
app.settings.set :metrics, metrics
expect(app).to receive(:run!).once
app.execute([:api], config, redis, metrics, nil)
app.settings.set :config, auth: false
app.settings.set :checkoutlock, checkoutlock
create_token('abcdefghijklmnopqrstuvwxyz012345', 'jdoe', current_time)
@ -104,8 +110,8 @@ describe Vmpooler::API::V1 do
end
it 'returns 503 for empty pool when aliases are not defined' do
Vmpooler::API.settings.config.delete(:alias)
Vmpooler::API.settings.config[:pool_names] = ['pool1', 'pool2']
app.settings.config.delete(:alias)
app.settings.config[:pool_names] = ['pool1', 'pool2']
create_ready_vm 'pool1', vmname, redis
post "#{prefix}/vm/pool1"

View file

@ -8,9 +8,16 @@ describe Vmpooler::API::V1 do
Vmpooler::API
end
# Added to ensure no leakage in rack state from previous tests.
# Removes all routes, filters, middleware and extension hooks from the current class
# https://rubydoc.info/gems/sinatra/Sinatra/Base#reset!-class_method
before(:each) do
app.reset!
end
describe '/vm/:template' do
let(:prefix) { '/api/v1' }
let(:metrics) { Vmpooler::DummyStatsd.new }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:config) {
{
config: {
@ -33,9 +40,8 @@ describe Vmpooler::API::V1 do
let(:checkoutlock) { Mutex.new }
before(:each) do
app.settings.set :config, config
app.settings.set :redis, redis
app.settings.set :metrics, metrics
expect(app).to receive(:run!).once
app.execute([:api], config, redis, metrics, nil)
app.settings.set :config, auth: false
app.settings.set :checkoutlock, checkoutlock
create_token('abcdefghijklmnopqrstuvwxyz012345', 'jdoe', current_time)
@ -84,8 +90,8 @@ describe Vmpooler::API::V1 do
end
it 'returns 503 for empty pool when aliases are not defined' do
Vmpooler::API.settings.config.delete(:alias)
Vmpooler::API.settings.config[:pool_names] = ['pool1', 'pool2']
app.settings.config.delete(:alias)
app.settings.config[:pool_names] = ['pool1', 'pool2']
create_ready_vm 'pool1', 'abcdefghijklmnop', redis
post "#{prefix}/vm/pool1"

View file

@ -5,13 +5,36 @@ describe Vmpooler::API do
include Rack::Test::Methods
def app()
described_class
Vmpooler::API
end
# Added to ensure no leakage in rack state from previous tests.
# Removes all routes, filters, middleware and extension hooks from the current class
# https://rubydoc.info/gems/sinatra/Sinatra/Base#reset!-class_method
before(:each) do
app.reset!
end
describe 'Dashboard' do
let(:config) { {
config: {},
pools: [
{'name' => 'pool1', 'size' => 5}
],
graphite: {}
} }
before(:each) do
expect(app).to receive(:run!)
app.execute([:api], config, redis, nil, nil)
app.settings.set :config, auth: false
end
context '/' do
before { get '/' }
before(:each) do
get '/'
end
it { expect(last_response.status).to eq(302) }
it { expect(last_response.location).to eq('http://example.org/dashboard/') }
@ -21,7 +44,7 @@ describe Vmpooler::API do
ENV['SITE_NAME'] = 'test pooler'
ENV['VMPOOLER_CONFIG'] = 'thing'
before do
before(:each) do
get '/dashboard/'
end
@ -31,15 +54,18 @@ describe Vmpooler::API do
end
context 'unknown route' do
before { get '/doesnotexist' }
before(:each) do
get '/doesnotexist'
end
it { expect(last_response.status).to eq(404) }
it { expect(last_response.header['Content-Type']).to eq('application/json') }
it { expect(last_response.status).to eq(404) }
it { expect(last_response.body).to eq(JSON.pretty_generate({ok: false})) }
end
describe '/dashboard/stats/vmpooler/pool' do
let(:config) { {
config: {},
pools: [
{'name' => 'pool1', 'size' => 5},
{'name' => 'pool2', 'size' => 1}
@ -47,10 +73,8 @@ describe Vmpooler::API do
graphite: {}
} }
before do
$config = config
before(:each) do
app.settings.set :config, config
app.settings.set :redis, redis
app.settings.set :environment, :test
end
@ -112,6 +136,7 @@ describe Vmpooler::API do
describe '/dashboard/stats/vmpooler/running' do
let(:config) { {
config: {},
pools: [
{'name' => 'pool-1', 'size' => 5},
{'name' => 'pool-2', 'size' => 1},
@ -120,10 +145,8 @@ describe Vmpooler::API do
graphite: {}
} }
before do
$config = config
before(:each) do
app.settings.set :config, config
app.settings.set :redis, redis
app.settings.set :environment, :test
end

View file

@ -8,7 +8,7 @@ require 'rbvmomi'
require 'rspec'
require 'vmpooler'
require 'redis'
require 'vmpooler/statsd'
require 'vmpooler/metrics'
def project_root_dir
File.dirname(File.dirname(__FILE__))

View file

@ -0,0 +1,135 @@
require 'rack/test'
require 'vmpooler/metrics/promstats/collector_middleware'
describe Vmpooler::Metrics::Promstats::CollectorMiddleware do
include Rack::Test::Methods
# Reset the data store
before do
Prometheus::Client.config.data_store = Prometheus::Client::DataStores::Synchronized.new
end
let(:registry) do
Prometheus::Client::Registry.new
end
let(:original_app) do
->(_) { [200, { 'Content-Type' => 'text/html' }, ['OK']] }
end
let!(:app) do
described_class.new(original_app, registry: registry)
end
let(:dummy_error) { RuntimeError.new("Dummy error from tests") }
it 'returns the app response' do
get '/foo'
expect(last_response).to be_ok
expect(last_response.body).to eql('OK')
end
it 'handles errors in the registry gracefully' do
counter = registry.get(:http_server_requests_total)
expect(counter).to receive(:increment).and_raise(dummy_error)
get '/foo'
expect(last_response).to be_ok
end
it 'traces request information' do
expect(Benchmark).to receive(:realtime).and_yield.and_return(0.2)
get '/foo'
metric = :http_server_requests_total
labels = { method: 'get', path: '/foo', code: '200' }
expect(registry.get(metric).get(labels: labels)).to eql(1.0)
metric = :http_server_request_duration_seconds
labels = { method: 'get', path: '/foo' }
expect(registry.get(metric).get(labels: labels)).to include("0.1" => 0, "0.25" => 1)
end
it 'normalizes paths cotaining /vm by default' do
expect(Benchmark).to receive(:realtime).and_yield.and_return(0.3)
get '/foo/vm/bar-mumble-flame'
metric = :http_server_requests_total
labels = { method: 'get', path: '/foo/vm', code: '200' }
expect(registry.get(metric).get(labels: labels)).to eql(1.0)
metric = :http_server_request_duration_seconds
labels = { method: 'get', path: '/foo/vm' }
expect(registry.get(metric).get(labels: labels)).to include("0.1" => 0, "0.5" => 1)
end
it 'normalizes paths containing /ondemandvm by ' do
expect(Benchmark).to receive(:realtime).and_yield.and_return(0.3)
get '/foo/ondemand/bar/fatman'
metric = :http_server_requests_total
labels = { method: 'get', path: '/foo/ondemand', code: '200' }
expect(registry.get(metric).get(labels: labels)).to eql(1.0)
metric = :http_server_request_duration_seconds
labels = { method: 'get', path: '/foo/ondemand' }
expect(registry.get(metric).get(labels: labels)).to include("0.1" => 0, "0.5" => 1)
end
context 'when the app raises an exception' do
let(:original_app) do
lambda do |env|
raise dummy_error if env['PATH_INFO'] == '/broken'
[200, { 'Content-Type' => 'text/html' }, ['OK']]
end
end
before do
get '/foo'
end
it 'traces exceptions' do
expect { get '/broken' }.to raise_error RuntimeError
metric = :http_server_exceptions_total
labels = { exception: 'RuntimeError' }
expect(registry.get(metric).get(labels: labels)).to eql(1.0)
end
end
context 'when provided a custom metrics_prefix' do
let!(:app) do
described_class.new(
original_app,
registry: registry,
metrics_prefix: 'lolrus',
)
end
it 'provides alternate metric names' do
expect(
registry.get(:lolrus_requests_total),
).to be_a(Prometheus::Client::Counter)
expect(
registry.get(:lolrus_request_duration_seconds),
).to be_a(Prometheus::Client::Histogram)
expect(
registry.get(:lolrus_exceptions_total),
).to be_a(Prometheus::Client::Counter)
end
it "doesn't register the default metrics" do
expect(registry.get(:http_server_requests_total)).to be(nil)
expect(registry.get(:http_server_request_duration_seconds)).to be(nil)
expect(registry.get(:http_server_exceptions_total)).to be(nil)
end
end
end

View file

@ -30,6 +30,7 @@ describe 'Vmpooler' do
['experimental_features', test_bool, nil],
['purge_unconfigured_folders', test_bool, nil],
['usage_stats', test_bool, nil],
['request_logger', test_bool, nil],
['extra_config', test_string, nil],
]

View file

@ -1,16 +1,18 @@
require 'spec_helper'
describe 'GenericConnectionPool' do
let(:metrics) { Vmpooler::DummyStatsd.new }
let(:metric_prefix) { 'prefix' }
let(:default_metric_prefix) { 'connectionpool' }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:connpool_type) { 'test_connection_pool' }
let(:connpool_provider) { 'testprovider' }
let(:default_connpool_type) { 'connectionpool' }
let(:connection_object) { double('connection') }
let(:pool_size) { 1 }
let(:pool_timeout) { 1 }
subject { Vmpooler::PoolManager::GenericConnectionPool.new(
metrics: metrics,
metric_prefix: metric_prefix,
connpool_type: connpool_type,
connpool_provider: connpool_provider,
size: pool_size,
timeout: pool_timeout
) { connection_object }
@ -65,7 +67,7 @@ describe 'GenericConnectionPool' do
context 'When metrics are configured' do
it 'should emit a gauge metric when the connection is grabbed and released' do
expect(metrics).to receive(:gauge).with(/\.available/,Integer).exactly(2).times
expect(metrics).to receive(:gauge).with(/connection_available/,Integer).exactly(2).times
subject.with_metrics do |conn1|
# do nothing
@ -73,7 +75,7 @@ describe 'GenericConnectionPool' do
end
it 'should emit a timing metric when the connection is grabbed' do
expect(metrics).to receive(:timing).with(/\.waited/,Integer).exactly(1).times
expect(metrics).to receive(:timing).with(/connection_waited/,Integer).exactly(1).times
subject.with_metrics do |conn1|
# do nothing
@ -81,8 +83,8 @@ describe 'GenericConnectionPool' do
end
it 'should emit metrics with the specified prefix' do
expect(metrics).to receive(:gauge).with(/#{metric_prefix}\./,Integer).at_least(1).times
expect(metrics).to receive(:timing).with(/#{metric_prefix}\./,Integer).at_least(1).times
expect(metrics).to receive(:gauge).with(/#{connpool_type}\./,Integer).at_least(1).times
expect(metrics).to receive(:timing).with(/#{connpool_type}\./,Integer).at_least(1).times
subject.with_metrics do |conn1|
# do nothing
@ -90,11 +92,11 @@ describe 'GenericConnectionPool' do
end
context 'Metrix prefix is missing' do
let(:metric_prefix) { nil }
let(:connpool_type) { nil }
it 'should emit metrics with default prefix' do
expect(metrics).to receive(:gauge).with(/#{default_metric_prefix}\./,Integer).at_least(1).times
expect(metrics).to receive(:timing).with(/#{default_metric_prefix}\./,Integer).at_least(1).times
expect(metrics).to receive(:gauge).with(/#{default_connpool_type}\./,Integer).at_least(1).times
expect(metrics).to receive(:timing).with(/#{default_connpool_type}\./,Integer).at_least(1).times
subject.with_metrics do |conn1|
# do nothing
@ -103,11 +105,11 @@ describe 'GenericConnectionPool' do
end
context 'Metrix prefix is empty' do
let(:metric_prefix) { '' }
let(:connpool_type) { '' }
it 'should emit metrics with default prefix' do
expect(metrics).to receive(:gauge).with(/#{default_metric_prefix}\./,Integer).at_least(1).times
expect(metrics).to receive(:timing).with(/#{default_metric_prefix}\./,Integer).at_least(1).times
expect(metrics).to receive(:gauge).with(/#{default_connpool_type}\./,Integer).at_least(1).times
expect(metrics).to receive(:timing).with(/#{default_connpool_type}\./,Integer).at_least(1).times
subject.with_metrics do |conn1|
# do nothing

View file

@ -10,7 +10,7 @@ RSpec::Matchers.define :a_pool_with_name_of do |value|
end
describe 'Pool Manager' do
let(:logger) { MockLogger.new }
let(:metrics) { Vmpooler::DummyStatsd.new }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:pool) { 'pool1' }
let(:vm) { 'vm1' }
let(:timeout) { 5 }
@ -22,12 +22,12 @@ describe 'Pool Manager' do
let(:provider_options) { {} }
let(:redis_connection_pool) { Vmpooler::PoolManager::GenericConnectionPool.new(
metrics: metrics,
metric_prefix: 'redis_connection_pool',
connpool_type: 'redis_connection_pool',
connpool_provider: 'testprovider',
size: 1,
timeout: 5
) { MockRedis.new }
}
let(:redis) { MockRedis.new }
let(:provider) { Vmpooler::PoolManager::Provider::Base.new(config, logger, metrics, redis_connection_pool, 'mock_provider', provider_options) }
@ -1110,7 +1110,7 @@ EOT
it 'should emit a metric' do
redis_connection_pool.with do |redis|
expect(metrics).to receive(:increment).with("usage.unauthenticated.#{template}")
expect(metrics).to receive(:increment).with("user.unauthenticated.#{template}")
subject.get_vm_usage_labels(vm, redis)
end
@ -1126,7 +1126,8 @@ EOT
end
it 'should emit a metric' do
expect(metrics).to receive(:increment).with("usage.#{user}.#{template}")
expect(metrics).to receive(:increment).with("user.#{user}.#{template}")
expect(metrics).not_to receive(:increment)
redis_connection_pool.with do |redis|
subject.get_vm_usage_labels(vm, redis)
@ -1135,7 +1136,7 @@ EOT
context 'with a user with period in name' do
let(:user) { 'test.user'.gsub('.', '_') }
let(:metric_string) { "usage.#{user}.#{template}" }
let(:metric_string) { "user.#{user}.#{template}" }
let(:metric_nodes) { metric_string.split('.') }
before(:each) do
@ -1146,6 +1147,7 @@ EOT
it 'should emit a metric with the character replaced' do
expect(metrics).to receive(:increment).with(metric_string)
expect(metrics).not_to receive(:increment)
redis_connection_pool.with do |redis|
subject.get_vm_usage_labels(vm, redis)
@ -1155,7 +1157,6 @@ EOT
it 'should include three nodes' do
expect(metric_nodes.count).to eq(3)
end
end
context 'with a jenkins_build_url label' do
@ -1167,16 +1168,11 @@ EOT
let(:branch) { value_stream_parts.pop }
let(:project) { value_stream_parts.shift }
let(:job_name) { value_stream_parts.join('_') }
let(:metric_string_nodes) {
[
'usage', user, instance, value_stream, branch, project, job_name, template
]
}
let(:metric_string_sub) {
metric_string_nodes.map { |s| s.gsub('.', '_')
}
}
let(:metric_string) { metric_string_sub.join('.') }
let(:metric_string_1) { "user.#{user}.#{template}" }
let(:metric_string_2) { "usage_jenkins_instance.#{instance.gsub('.', '_')}.#{value_stream.gsub('.', '_')}.#{template}" }
let(:metric_string_3) { "usage_branch_project.#{branch.gsub('.', '_')}.#{project.gsub('.', '_')}.#{template}" }
let(:metric_string_4) { "usage_job_component.#{job_name.gsub('.', '_')}.none.#{template}" }
before(:each) do
redis_connection_pool.with do |redis|
@ -1184,8 +1180,12 @@ EOT
end
end
it 'should emit a metric with information from the URL' do
expect(metrics).to receive(:increment).with(metric_string)
it 'should emit 4 metric withs information from the URL' do
expect(metrics).to receive(:increment).with(metric_string_1)
expect(metrics).to receive(:increment).with(metric_string_2)
expect(metrics).to receive(:increment).with(metric_string_3)
expect(metrics).to receive(:increment).with(metric_string_4)
expect(metrics).not_to receive(:increment)
redis_connection_pool.with do |redis|
subject.get_vm_usage_labels(vm, redis)
@ -1207,14 +1207,23 @@ EOT
let(:expected_string) { "usage.#{user}.#{instance}.#{value_stream}.#{branch}.#{project}.#{job_name}.#{build_component}.#{template}" }
let(:metric_nodes) { expected_string.split('.') }
let(:metric_string_1) { "user.#{user}.#{template}" }
let(:metric_string_2) { "usage_jenkins_instance.#{instance.gsub('.', '_')}.#{value_stream.gsub('.', '_')}.#{template}" }
let(:metric_string_3) { "usage_branch_project.#{branch.gsub('.', '_')}.#{project.gsub('.', '_')}.#{template}" }
let(:metric_string_4) { "usage_job_component.#{job_name.gsub('.', '_')}.#{build_component}.#{template}" }
before(:each) do
redis_connection_pool.with do |redis|
create_tag(vm, 'jenkins_build_url', jenkins_build_url, redis)
end
end
it 'should emit a metric with information from the URL' do
expect(metrics).to receive(:increment).with(expected_string)
it 'should emit 4 metrics with information from the URL' do
expect(metrics).to receive(:increment).with(metric_string_1)
expect(metrics).to receive(:increment).with(metric_string_2)
expect(metrics).to receive(:increment).with(metric_string_3)
expect(metrics).to receive(:increment).with(metric_string_4)
expect(metrics).not_to receive(:increment)
redis_connection_pool.with do |redis|
subject.get_vm_usage_labels(vm, redis)
@ -1236,20 +1245,54 @@ EOT
let(:project) { value_stream_parts.shift }
let(:job_name) { value_stream_parts.join('_') }
let(:metric_string_1) { "user.#{user}.#{template}" }
let(:metric_string_2) { "usage_jenkins_instance.#{instance.gsub('.', '_')}.#{value_stream.gsub('.', '_')}.#{template}" }
let(:metric_string_3) { "usage_branch_project.#{branch.gsub('.', '_')}.#{project.gsub('.', '_')}.#{template}" }
let(:metric_string_4) { "usage_job_component.#{job_name.gsub('.', '_')}.none.#{template}" }
before(:each) do
redis_connection_pool.with do |redis|
create_tag(vm, 'jenkins_build_url', jenkins_build_url, redis)
end
end
it 'should emit a metric with information from the URL without a build_component' do
expect(metrics).to receive(:increment).with("usage.#{user}.#{instance}.#{value_stream}.#{branch}.#{project}.#{job_name}.#{template}")
it 'should emit 4 metrics with information from the URL without a build_component' do
expect(metrics).to receive(:increment).with(metric_string_1)
expect(metrics).to receive(:increment).with(metric_string_2)
expect(metrics).to receive(:increment).with(metric_string_3)
expect(metrics).to receive(:increment).with(metric_string_4)
expect(metrics).not_to receive(:increment)
redis_connection_pool.with do |redis|
subject.get_vm_usage_labels(vm, redis)
end
end
end
end
context 'with a litmus job' do
let(:jenkins_build_url) { 'https://litmus_manual' }
let(:metric_string_1) { "user.#{user}.#{template}" }
let(:metric_string_2) { "usage_litmus.#{user}.#{template}" }
before(:each) do
redis_connection_pool.with do |redis|
create_tag(vm, 'jenkins_build_url', jenkins_build_url, redis)
end
end
it 'should emit 2 metrics with the second indicating a litmus job' do
expect(metrics).to receive(:increment).with(metric_string_1)
expect(metrics).to receive(:increment).with(metric_string_2)
expect(metrics).not_to receive(:increment)
redis_connection_pool.with do |redis|
subject.get_vm_usage_labels(vm, redis)
end
end
end
end
@ -1269,21 +1312,21 @@ EOT
end
context 'when match contains no value' do
it 'should return nil' do
expect(subject.component_to_test(matching_key, matching_key)).to be nil
it 'should return none' do
expect(subject.component_to_test(matching_key, matching_key)).to eq('none')
end
end
end
context 'when string contains no key value pairs' do
it 'should return' do
expect(subject.component_to_test(matching_key, nonmatrix_string)).to be nil
expect(subject.component_to_test(matching_key, nonmatrix_string)).to eq('none')
end
end
context 'when labels_string is a job number' do
it 'should return nil' do
expect(subject.component_to_test(matching_key, '25')).to be nil
expect(subject.component_to_test(matching_key, '25')).to eq('none')
end
end

333
spec/unit/promstats_spec.rb Normal file
View file

@ -0,0 +1,333 @@
# frozen_string_literal: true
require 'spec_helper'
describe 'prometheus' do
logger = MockLogger.new
params = { 'prefix': 'test', 'metrics_prefix': 'mtest', 'endpoint': 'eptest' }
subject = Vmpooler::Metrics::Promstats.new(logger, params)
let(:logger) { MockLogger.new }
describe '#initialise' do
it 'returns a Metrics object' do
expect(Vmpooler::Metrics::Promstats.new(logger)).to be_a(Vmpooler::Metrics)
end
end
describe '#find_metric' do
context "Single Value Parameters" do
let!(:foo_metrics) do
{ metric_suffixes: { bar: 'baz' },
param_labels: %i[first second last] }
end
let!(:labels_hash) { { labels: { :first => nil, :second => nil, :last => nil } } }
before { subject.instance_variable_set(:@p_metrics, { foo: foo_metrics }) }
it 'returns the metric for a given label including parsed labels' do
expect(subject.find_metric('foo.bar')).to include(metric_name: '_bar')
expect(subject.find_metric('foo.bar')).to include(foo_metrics)
expect(subject.find_metric('foo.bar')).to include(labels_hash)
end
it 'raises an error when the given label is not present in metrics' do
expect { subject.find_metric('bogus') }.to raise_error(RuntimeError, 'Invalid Metric bogus for bogus')
end
it 'raises an error when the given label specifies metric_suffixes but the following suffix not present in metrics' do
expect { subject.find_metric('foo.metric_suffixes.bogus') }.to raise_error(RuntimeError, 'Invalid Metric foo_metric_suffixes for foo.metric_suffixes.bogus')
end
end
context "Node Name Handling" do
let!(:node_metrics) do
{ metric_name: 'connection_to',
param_labels: %i[node] }
end
let!(:nodename_hash) { { labels: { :node => 'test.bar.net'}}}
before { subject.instance_variable_set(:@p_metrics, { connection_to: node_metrics }) }
it 'Return final remaining fields (e.g. fqdn) in last label' do
expect(subject.find_metric('connection_to.test.bar.net')).to include(nodename_hash)
end
end
end
context 'setup_prometheus_metrics' do
before(:all) do
Prometheus::Client.config.data_store = Prometheus::Client::DataStores::Synchronized.new
subject.setup_prometheus_metrics(%i[api manager])
end
let(:MCOUNTER) { 1 }
describe '#setup_prometheus_metrics' do
it 'calls add_prometheus_metric for each item in list' do
Prometheus::Client.config.data_store = Prometheus::Client::DataStores::Synchronized.new
expect(subject).to receive(:add_prometheus_metric).at_least(subject.vmpooler_metrics_table.size).times
subject.setup_prometheus_metrics(%i[api manager])
end
end
describe '#increment' do
it 'Increments checkout.nonresponsive.#{template_backend}' do
template_backend = 'test'
expect { subject.increment("checkout.nonresponsive.#{template_backend}") }.to change {
metric, po = subject.get("checkout.nonresponsive.#{template_backend}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments checkout.empty. + requested' do
requested = 'test'
expect { subject.increment('checkout.empty.' + requested) }.to change {
metric, po = subject.get('checkout.empty.' + requested)
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments checkout.success. + vmtemplate' do
vmtemplate = 'test-template'
expect { subject.increment('checkout.success.' + vmtemplate) }.to change {
metric, po = subject.get('checkout.success.' + vmtemplate)
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments checkout.invalid. + bad_template' do
bad_template = 'test-template'
expect { subject.increment('checkout.invalid.' + bad_template) }.to change {
metric, po = subject.get('checkout.invalid.' + bad_template)
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments checkout.invalid.unknown' do
expect { subject.increment('checkout.invalid.unknown') }.to change {
metric, po = subject.get('checkout.invalid.unknown')
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments delete.failed' do
bad_template = 'test-template'
expect { subject.increment('delete.failed') }.to change {
metric, po = subject.get('delete.failed')
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments delete.success' do
bad_template = 'test-template'
expect { subject.increment('delete.success') }.to change {
metric, po = subject.get('delete.success')
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments ondemandrequest_generate.duplicaterequests' do
bad_template = 'test-template'
expect { subject.increment('ondemandrequest_generate.duplicaterequests') }.to change {
metric, po = subject.get('ondemandrequest_generate.duplicaterequests')
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments ondemandrequest_generate.success' do
bad_template = 'test-template'
expect { subject.increment('ondemandrequest_generate.success') }.to change {
metric, po = subject.get('ondemandrequest_generate.success')
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments ondemandrequest_fail.toomanyrequests.#{bad_template}' do
bad_template = 'test-template'
expect { subject.increment("ondemandrequest_fail.toomanyrequests.#{bad_template}") }.to change {
metric, po = subject.get("ondemandrequest_fail.toomanyrequests.#{bad_template}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments ondemandrequest_fail.invalid.#{bad_template}' do
bad_template = 'test-template'
expect { subject.increment("ondemandrequest_fail.invalid.#{bad_template}") }.to change {
metric, po = subject.get("ondemandrequest_fail.invalid.#{bad_template}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments config.invalid.#{bad_template}' do
bad_template = 'test-template'
expect { subject.increment("config.invalid.#{bad_template}") }.to change {
metric, po = subject.get("config.invalid.#{bad_template}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments config.invalid.unknown' do
expect { subject.increment('config.invalid.unknown') }.to change {
metric, po = subject.get('config.invalid.unknown')
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments poolreset.invalid.#{bad_pool}' do
bad_pool = 'test-pool'
expect { subject.increment("poolreset.invalid.#{bad_pool}") }.to change {
metric, po = subject.get("poolreset.invalid.#{bad_pool}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments poolreset.invalid.unknown' do
expect { subject.increment('poolreset.invalid.unknown') }.to change {
metric, po = subject.get('poolreset.invalid.unknown')
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments errors.markedasfailed.#{pool}' do
pool = 'test-pool'
expect { subject.increment("errors.markedasfailed.#{pool}") }.to change {
metric, po = subject.get("errors.markedasfailed.#{pool}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments errors.duplicatehostname.#{pool_name}' do
pool_name = 'test-pool'
expect { subject.increment("errors.duplicatehostname.#{pool_name}") }.to change {
metric, po = subject.get("errors.duplicatehostname.#{pool_name}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments errors.staledns.#{pool_name}' do
pool_name = 'test-pool'
expect { subject.increment("errors.staledns.#{pool_name}") }.to change {
metric, po = subject.get("errors.staledns.#{pool_name}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments user.#{user}.#{poolname}' do
user = 'myuser'
poolname = 'test-pool'
expect { subject.increment("user.#{user}.#{poolname}") }.to change {
metric, po = subject.get("user.#{user}.#{poolname}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments usage_litmus.#{user}.#{poolname}' do
user = 'myuser'
poolname = 'test-pool'
expect { subject.increment("usage_litmus.#{user}.#{poolname}") }.to change {
metric, po = subject.get("usage_litmus.#{user}.#{poolname}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments label usage_jenkins_instance.#{jenkins_instance}.#{value_stream}.#{poolname}' do
jenkins_instance = 'jenkins_test_instance'
value_stream = 'notional_value'
poolname = 'test-pool'
expect { subject.increment("usage_jenkins_instance.#{jenkins_instance}.#{value_stream}.#{poolname}") }.to change {
metric, po = subject.get("usage_jenkins_instance.#{jenkins_instance}.#{value_stream}.#{poolname}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments label usage_branch_project.#{branch}.#{project}.#{poolname}' do
branch = 'treetop'
project = 'test-project'
poolname = 'test-pool'
expect { subject.increment("usage_branch_project.#{branch}.#{project}.#{poolname}") }.to change {
metric, po = subject.get("usage_branch_project.#{branch}.#{project}.#{poolname}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments label usage_job_component.#{job_name}.#{component_to_test}.#{poolname}' do
job_name = 'a-job'
component_to_test = 'component-name'
poolname = 'test-pool'
expect { subject.increment("usage_job_component.#{job_name}.#{component_to_test}.#{poolname}") }.to change {
metric, po = subject.get("usage_job_component.#{job_name}.#{component_to_test}.#{poolname}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments connect.open' do
expect { subject.increment('connect.open') }.to change {
metric, po = subject.get('connect.open')
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments connect.fail' do
expect { subject.increment('connect.fail') }.to change {
metric, po = subject.get('connect.fail')
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments migrate_from.#{vm_hash[\'host_name\']}' do
vm_hash = { 'host_name': 'testhost.testdomain' }
expect { subject.increment("migrate_from.#{vm_hash['host_name']}") }.to change {
metric, po = subject.get("migrate_from.#{vm_hash['host_name']}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments "migrate_to.#{dest_host_name}"' do
dest_host_name = 'testhost.testdomain'
expect { subject.increment("migrate_to.#{dest_host_name}") }.to change {
metric, po = subject.get("migrate_to.#{dest_host_name}")
po.get(labels: metric[:labels])
}.by(1)
end
it 'Increments label api_vm.#{method}.#{subpath}.#{operation}' do
method = 'get'
subpath = 'template'
operation = 'something'
expect { subject.increment("api_vm.#{method}.#{subpath}.#{operation}") }.to change {
metric, po = subject.get("api_vm.#{method}.#{subpath}.#{operation}")
po.get(labels: metric[:labels])
}.by(1)
end
end
describe '#gauge' do
# metrics.gauge("ready.#{pool_name}", $redis.scard("vmpooler__ready__#{pool_name}"))
it 'sets value of ready.#{pool_name} to $redis.scard("vmpooler__ready__#{pool_name}"))' do
# is there a specific redis value that should be tested?
pool_name = 'test-pool'
test_value = 42
expect { subject.gauge("ready.#{pool_name}", test_value) }.to change {
metric, po = subject.get("ready.#{pool_name}")
po.get(labels: metric[:labels])
}.from(0).to(42)
end
# metrics.gauge("running.#{pool_name}", $redis.scard("vmpooler__running__#{pool_name}"))
it 'sets value of running.#{pool_name} to $redis.scard("vmpooler__running__#{pool_name}"))' do
# is there a specific redis value that should be tested?
pool_name = 'test-pool'
test_value = 42
expect { subject.gauge("running.#{pool_name}", test_value) }.to change {
metric, po = subject.get("running.#{pool_name}")
po.get(labels: metric[:labels])
}.from(0).to(42)
end
end
describe '#timing' do
it 'sets histogram value of time_to_ready_state.#{pool} to finish' do
pool = 'test-pool'
finish = 42
expect { subject.timing("time_to_ready_state.#{pool}", finish) }.to change {
metric, po = subject.get("time_to_ready_state.#{pool}")
po.get(labels: metric[:labels])
}
end
it 'sets histogram value of clone.#{pool} to finish' do
pool = 'test-pool'
finish = 42
expect { subject.timing("clone.#{pool}", finish) }.to change {
metric, po = subject.get("clone.#{pool}")
po.get(labels: metric[:labels])
}
end
it 'sets histogram value of migrate.#{pool} to finish' do
pool = 'test-pool'
finish = 42
expect { subject.timing("migrate.#{pool}", finish) }.to change {
metric, po = subject.get("migrate.#{pool}")
po.get(labels: metric[:labels])
}
end
it 'sets histogram value of destroy.#{pool} to finish' do
pool = 'test-pool'
finish = 42
expect { subject.timing("destroy.#{pool}", finish) }.to change {
metric, po = subject.get("destroy.#{pool}")
po.get(labels: metric[:labels])
}
end
end
end
end

View file

@ -6,7 +6,7 @@ require 'vmpooler/providers/base'
describe 'Vmpooler::PoolManager::Provider::Base' do
let(:logger) { MockLogger.new }
let(:metrics) { Vmpooler::DummyStatsd.new }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:config) { {} }
let(:provider_name) { 'base' }
let(:provider_options) { { 'param' => 'value' } }

View file

@ -3,7 +3,7 @@ require 'vmpooler/providers/dummy'
describe 'Vmpooler::PoolManager::Provider::Dummy' do
let(:logger) { MockLogger.new }
let(:metrics) { Vmpooler::DummyStatsd.new }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:pool_name) { 'pool1' }
let(:other_pool_name) { 'pool2' }
let(:vm_name) { 'vm1' }

View file

@ -41,7 +41,7 @@ end
describe 'Vmpooler::PoolManager::Provider::VSphere' do
let(:logger) { MockLogger.new }
let(:metrics) { Vmpooler::DummyStatsd.new }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:poolname) { 'pool1'}
let(:provider_options) { { 'param' => 'value' } }
let(:datacenter_name) { 'MockDC' }
@ -79,7 +79,8 @@ EOT
let(:vmname) { 'vm1' }
let(:redis_connection_pool) { Vmpooler::PoolManager::GenericConnectionPool.new(
metrics: metrics,
metric_prefix: 'redis_connection_pool',
connpool_type: 'redis_connection_pool',
connpool_provider: 'testprovider',
size: 1,
timeout: 5
) { MockRedis.new }
@ -167,7 +168,7 @@ EOT
end
it 'should record metrics' do
expect(metrics).to receive(:timing).with('redis_connection_pool.waited', 0)
expect(metrics).to receive(:timing).with('connection_waited.redis_connection_pool.testprovider', 0)
expect(metrics).to receive(:timing).with("destroy.#{pool}", finish)
subject.destroy_vm_and_log(vmname, vm_object, pool, data_ttl)

View file

@ -24,6 +24,7 @@ Gem::Specification.new do |s|
s.add_dependency 'redis', '~> 4.1'
s.add_dependency 'rbvmomi', '~> 2.1'
s.add_dependency 'sinatra', '~> 2.0'
s.add_dependency 'prometheus-client', '~> 2.0'
s.add_dependency 'net-ldap', '~> 0.16'
s.add_dependency 'statsd-ruby', '~> 1.4'
s.add_dependency 'connection_pool', '~> 2.2'

View file

@ -509,6 +509,12 @@
# Expects a boolean value
# (optional; default: false)
#
# - request_logger
# Enable API Request logging to the logger
# When enabled all requests to the API are written to the standard logger.
# Expects a boolean value
# (optional; default: false)
#
# - max_lifetime_upper_limit
# Sets a lifetime upper limit (in hours) for how long the vm lifetime can be set via the API. Lifetime can be set and extended
# so this configuration is used to enforce an upper limit to both the initial lifetime request and/or the extended