diff --git a/bin/vmpooler b/bin/vmpooler index 3730c1f..a54c99b 100755 --- a/bin/vmpooler +++ b/bin/vmpooler @@ -11,7 +11,7 @@ redis_connection_pool_size = config[:redis]['connection_pool_size'] redis_connection_pool_timeout = config[:redis]['connection_pool_timeout'] logger_file = config[:config]['logfile'] -metrics = Vmpooler.new_metrics(config) +metrics = Vmpooler::Metrics.init(config) torun_threads = [] if ARGV.count == 0 diff --git a/lib/vmpooler.rb b/lib/vmpooler.rb index aa65ff3..b7dff93 100644 --- a/lib/vmpooler.rb +++ b/lib/vmpooler.rb @@ -15,7 +15,10 @@ module Vmpooler require 'timeout' require 'yaml' - %w[api graphite logger pool_manager statsd dummy_statsd generic_connection_pool].each do |lib| + require 'prometheus/middleware/collector' + require 'prometheus/middleware/exporter' + + %w[api metrics logger pool_manager generic_connection_pool].each do |lib| require "vmpooler/#{lib}" end @@ -103,6 +106,12 @@ module Vmpooler parsed_config[:graphite]['prefix'] = ENV['GRAPHITE_PREFIX'] if ENV['GRAPHITE_PREFIX'] parsed_config[:graphite]['port'] = string_to_int(ENV['GRAPHITE_PORT']) if ENV['GRAPHITE_PORT'] + if parsed_config.key? :prometheus +# parsed_config[:prometheus]['endpoint'] = ENV['PROMETHEUS_ENDPOINT'] if ENV['PROMETHEUS_ENDPOINT'] +# parsed_config[:prometheus]['prefix'] = ENV['PROMETHEUS_PREFIX'] if ENV['PROMETHEUS_PREFIX'] +# parsed_config[:prometheus]['metrics_prefix'] = ENV['PROMETHEUS_METRICS_PREFIX'] if ENV['PROMETHEUS_METRICS_PREFIX'] + end + parsed_config[:auth] = parsed_config[:auth] || {} if ENV['AUTH_PROVIDER'] if parsed_config.key? :auth parsed_config[:auth]['provider'] = ENV['AUTH_PROVIDER'] if ENV['AUTH_PROVIDER'] @@ -184,16 +193,6 @@ module Vmpooler Vmpooler::Logger.new logfile end - def self.new_metrics(params) - if params[:statsd] - Vmpooler::Statsd.new(params[:statsd]) - elsif params[:graphite] - Vmpooler::Graphite.new(params[:graphite]) - else - Vmpooler::DummyStatsd.new - end - end - def self.pools(conf) conf[:pools] end diff --git a/lib/vmpooler/metrics.rb b/lib/vmpooler/metrics.rb new file mode 100644 index 0000000..7d704e0 --- /dev/null +++ b/lib/vmpooler/metrics.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +module Vmpooler + class Metrics + # static class instantiate appropriate metrics object. + def self.init(params) + if params[:statsd] + metrics = Vmpooler::Statsd.new(params[:statsd]) + elsif params[:graphite] + metrics = Vmpooler::Graphite.new(params[:graphite]) + elsif params[:prometheus] + metrics = Vmpooler::Promstats.new(params[:prometheus]) + else + metrics = Vmpooler::DummyStatsd.new + end + metrics + end + end +end + +require 'vmpooler/metrics/statsd' +require 'vmpooler/metrics/dummy_statsd' +require 'vmpooler/metrics/graphite' +require 'vmpooler/metrics/promstats' diff --git a/lib/vmpooler/dummy_statsd.rb b/lib/vmpooler/metrics/dummy_statsd.rb similarity index 89% rename from lib/vmpooler/dummy_statsd.rb rename to lib/vmpooler/metrics/dummy_statsd.rb index fa23833..2cc4ac6 100644 --- a/lib/vmpooler/dummy_statsd.rb +++ b/lib/vmpooler/metrics/dummy_statsd.rb @@ -1,7 +1,7 @@ # frozen_string_literal: true module Vmpooler - class DummyStatsd + class DummyStatsd < Metrics attr_reader :server, :port, :prefix def initialize(*) diff --git a/lib/vmpooler/graphite.rb b/lib/vmpooler/metrics/graphite.rb similarity index 97% rename from lib/vmpooler/graphite.rb rename to lib/vmpooler/metrics/graphite.rb index 2b207c9..23740ed 100644 --- a/lib/vmpooler/graphite.rb +++ b/lib/vmpooler/metrics/graphite.rb @@ -3,7 +3,7 @@ require 'rubygems' unless defined?(Gem) module Vmpooler - class Graphite + class Graphite < Metrics attr_reader :server, :port, :prefix def initialize(params = {}) diff --git a/lib/vmpooler/metrics/promstats.rb b/lib/vmpooler/metrics/promstats.rb new file mode 100644 index 0000000..2b159b2 --- /dev/null +++ b/lib/vmpooler/metrics/promstats.rb @@ -0,0 +1,299 @@ +# frozen_string_literal: true + +module Vmpooler + class Promstats < Metrics + attr_reader :prefix, :endpoint, :metrics_prefix + + # Constants for Metric Types + M_COUNTER = 1 + M_GAUGE = 2 + M_SUMMARY = 3 + M_HISTOGRAM = 4 + + # Customised Bucket set to use for the Pooler clone times set to more appropriate intervals. + POOLER_TIME_BUCKETS = [1.0, 2.5, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0, 2000.0].freeze + # Same for Redis connection times - this is the same as the current Prometheus Default. + # https://github.com/prometheus/client_ruby/blob/master/lib/prometheus/client/histogram.rb#L14 + REDIS_CONNECT_BUCKETS = [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10].freeze + + @p_metrics = {} + + def initialize(params = {}) + @prefix = params['prefix'] || 'vmpooler' + @metrics_prefix = params['metrics_prefix'] || 'vmpooler' + @endpoint = params['endpoint'] || '/prometheus' + + # Hmm - think this is breaking the logger ..... + @logger = params.delete('logger') || Logger.new(STDOUT) + + # Setup up prometheus registry and data structures + @prometheus = Prometheus::Client.registry + end + + # Metrics structure used to register the metrics and also translate/interpret the incoming metrics. + def vmpooler_metrics_table + { + errors: { + mtype: M_COUNTER, + docstring: 'Count of Errors for pool', + prom_metric_prefix: "#{@metrics_prefix}_errors", + metric_suffixes: { + markedasfailed: 'Timeout waiting for instance to initialise', + duplicatehostname: 'Unable to create instance due to duplicate hostname' + }, + param_labels: %i[template_name] + }, + usage: { + mtype: M_COUNTER, + docstring: 'Number of Pool Instances of this created', + prom_metric_prefix: "#{@metrics_prefix}_usage", + param_labels: %i[user poolname] + }, + user: { + # This metrics is leads to a lot of label values which is likely to challenge data storage + # on prometheus - see Best Practices: https://prometheus.io/docs/practices/naming/#labels + # So it is likely that this metric may need to be simplified or broken into a number + # of smaller metrics to capture the detail without challenging prometheus + mtype: M_COUNTER, + docstring: 'vmpooler user counters', + prom_metric_prefix: "#{@metrics_prefix}_user", + param_labels: %i[user instancex value_stream branch project job_name component_to_test poolname] + }, + checkout: { + mtype: M_COUNTER, + docstring: 'Pool Checkout counts', + prom_metric_prefix: "#{@metrics_prefix}_checkout", + metric_suffixes: { + nonresponsive: 'Checkout Failed - Non Responsive Machine', + empty: 'Checkout Failed - no machine', + success: 'Successful Checkout', + invalid: 'Checkout Failed - Invalid Template' + }, + param_labels: %i[poolname] + }, + config: { + mtype: M_COUNTER, + docstring: 'vmpooler Pool Configuration Request', + prom_metric_prefix: "#{@metrics_prefix}_config", + metric_suffixes: { invalid: 'Invalid' }, + param_labels: %i[poolname] + }, + poolreset: { + mtype: M_COUNTER, + docstring: 'Pool Reset Counter', + prom_metric_prefix: "#{@metrics_prefix}_poolreset", + metric_suffixes: { invalid: 'Invalid Pool' }, + param_labels: %i[poolname] + }, + connect: { + mtype: M_COUNTER, + docstring: 'vmpooler Connect (to vSphere)', + prom_metric_prefix: "#{@metrics_prefix}_connect", + metric_suffixes: { + open: 'Connect Succeeded', + fail: 'Connect Failed' + }, + param_labels: [] + }, + migrate_from: { + mtype: M_COUNTER, + docstring: 'vmpooler Machine Migrated from', + prom_metric_prefix: "#{@metrics_prefix}_migrate_from", + param_labels: %i[host_name] + }, + migrate_to: { + mtype: M_COUNTER, + docstring: 'vmpooler Machine Migrated to', + prom_metric_prefix: "#{@metrics_prefix}_migrate_to", + param_labels: %i[host_name] + }, + ready: { + mtype: M_GAUGE, + docstring: 'vmpooler Number of Machines in Ready State', + prom_metric_prefix: "#{@metrics_prefix}_ready", + param_labels: %i[poolname] + }, + running: { + mtype: M_GAUGE, + docstring: 'vmpooler Number of Machines Running', + prom_metric_prefix: "#{@metrics_prefix}_running", + param_labels: %i[poolname] + }, + connection_available: { + mtype: M_GAUGE, + docstring: 'vmpooler Redis Connections Available', + prom_metric_prefix: "#{@metrics_prefix}_connection_available", + param_labels: %i[type provider] + }, + time_to_ready_state: { + mtype: M_HISTOGRAM, + buckets: POOLER_TIME_BUCKETS, + docstring: 'Time taken for machine to read ready state for pool', + prom_metric_prefix: "#{@metrics_prefix}_time_to_ready_state", + param_labels: %i[poolname] + }, + migrate: { + mtype: M_HISTOGRAM, + buckets: POOLER_TIME_BUCKETS, + docstring: 'vmpooler Time taken to migrate machine for pool', + prom_metric_prefix: "#{@metrics_prefix}_migrate", + param_labels: %i[poolname] + }, + clone: { + mtype: M_HISTOGRAM, + buckets: POOLER_TIME_BUCKETS, + docstring: 'vmpooler Time taken to Clone Machine', + prom_metric_prefix: "#{@metrics_prefix}_clone", + param_labels: %i[poolname] + }, + destroy: { + mtype: M_HISTOGRAM, + buckets: POOLER_TIME_BUCKETS, + docstring: 'vmpooler Time taken to Destroy Machine', + prom_metric_prefix: "#{@metrics_prefix}_destroy", + param_labels: %i[poolname] + }, + connection_waited: { + mtype: M_HISTOGRAM, + buckets: REDIS_CONNECT_BUCKETS, + docstring: 'vmpooler Redis Connection Wait Time', + prom_metric_prefix: "#{@metrics_prefix}_connection_waited", + param_labels: %i[type provider] + } + } + end + + # Helper to add individual prom metric. + # Allow Histograms to specify the bucket size. + def add_prometheus_metric(metric_spec, name, docstring) + case metric_spec[:mtype] + when M_COUNTER + metric_class = Prometheus::Client::Counter + when M_GAUGE + metric_class = Prometheus::Client::Gauge + when M_SUMMARY + metric_class = Prometheus::Client::Summary + when M_HISTOGRAM + metric_class = Prometheus::Client::Histogram + else + raise("Unable to register metric #{name} with metric type #{metric_spec[:mtype]}") + end + + if (metric_spec[:mtype] == M_HISTOGRAM) && (metric_spec.key? :buckets) + prom_metric = metric_class.new( + name.to_sym, + docstring: docstring, + labels: metric_spec[:param_labels] + [:vmpooler_instance], + buckets: metric_spec[:buckets], + preset_labels: { vmpooler_instance: @prefix } + ) + else + prom_metric = metric_class.new( + name.to_sym, + docstring: docstring, + labels: metric_spec[:param_labels] + [:vmpooler_instance], + preset_labels: { vmpooler_instance: @prefix } + ) + end + @prometheus.register(prom_metric) + end + + # Top level method to register all the prometheus metrics. + + def setup_prometheus_metrics + @p_metrics = vmpooler_metrics_table + @p_metrics.each do |_name, metric_spec| + if metric_spec.key? :metric_suffixes + # Iterate thru the suffixes if provided to register multiple counters here. + metric_spec[:metric_suffixes].each do |metric_suffix| + add_prometheus_metric( + metric_spec, + "#{metric_spec[:prom_metric_prefix]}_#{metric_suffix[0]}", + "#{metric_spec[:docstring]} #{metric_suffix[1]}" + ) + end + else + # No Additional counter suffixes so register this as metric. + add_prometheus_metric( + metric_spec, + metric_spec[:prom_metric_prefix], + metric_spec[:docstring] + ) + end + end + end + + # locate a metric and check/interpet the sub-fields. + def find_metric(label) + sublabels = label.split('.') + metric_key = sublabels.shift.to_sym + raise("Invalid Metric #{metric_key} for #{label}") unless @p_metrics.key? metric_key + + metric = @p_metrics[metric_key].clone + + if metric.key? :metric_suffixes + metric_subkey = sublabels.shift.to_sym + raise("Invalid Metric #{metric_key}_#{metric_subkey} for #{label}") unless metric[:metric_suffixes].key? metric_subkey.to_sym + + metric[:metric_name] = "#{metric[:prom_metric_prefix]}_#{metric_subkey}" + else + metric[:metric_name] = metric[:prom_metric_prefix] + end + + # Check if we are looking for a parameter value at last element. + if metric.key? :param_labels + metric[:labels] = {} + # Special case processing here - if there is only one parameter label then make sure + # we append all of the remaining contents of the metric with "." separators to ensure + # we get full nodenames (e.g. for Migration to node operations) + if metric[:param_labels].length == 1 + metric[:labels][metric[:param_labels].first] = sublabels.join('.') + else + metric[:param_labels].reverse_each do |param_label| + metric[:labels][param_label] = sublabels.pop(1).first + end + end + end + metric + end + + # Helper to get lab metrics. + def get(label) + metric = find_metric(label) + [metric, @prometheus.get(metric[:metric_name])] + end + + # Note - Catch and log metrics failures so they can be noted, but don't interrupt vmpooler operation. + def increment(label) + begin + counter_metric, c = get(label) + c.increment(labels: counter_metric[:labels]) + rescue StandardError => e + @logger.log('s', "[!] prometheus error logging metric #{label} increment : #{e}") + end + end + + def gauge(label, value) + begin + unless value.nil? + gauge_metric, g = get(label) + g.set(value.to_i, labels: gauge_metric[:labels]) + end + rescue StandardError => e + @logger.log('s', "[!] prometheus error logging gauge #{label}, value #{value}: #{e}") + end + end + + def timing(label, duration) + begin + # https://prometheus.io/docs/practices/histograms/ + unless duration.nil? + histogram_metric, hm = get(label) + hm.observe(duration.to_f, labels: histogram_metric[:labels]) + end + rescue StandardError => e + @logger.log('s', "[!] prometheus error logging timing event label #{label}, duration #{duration}: #{e}") + end + end + end +end diff --git a/lib/vmpooler/statsd.rb b/lib/vmpooler/metrics/statsd.rb similarity index 97% rename from lib/vmpooler/statsd.rb rename to lib/vmpooler/metrics/statsd.rb index 53e9551..a942e42 100644 --- a/lib/vmpooler/statsd.rb +++ b/lib/vmpooler/metrics/statsd.rb @@ -4,7 +4,7 @@ require 'rubygems' unless defined?(Gem) require 'statsd' module Vmpooler - class Statsd + class Statsd < Metrics attr_reader :server, :port, :prefix def initialize(params = {}) diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb index b213bfb..080f797 100644 --- a/spec/spec_helper.rb +++ b/spec/spec_helper.rb @@ -8,7 +8,7 @@ require 'rbvmomi' require 'rspec' require 'vmpooler' require 'redis' -require 'vmpooler/statsd' +require 'vmpooler/metrics' def project_root_dir File.dirname(File.dirname(__FILE__)) diff --git a/spec/unit/promstats_spec.rb b/spec/unit/promstats_spec.rb new file mode 100644 index 0000000..5db495d --- /dev/null +++ b/spec/unit/promstats_spec.rb @@ -0,0 +1,247 @@ +# frozen_string_literal: true + +require 'spec_helper' + +describe 'prometheus' do + logger = MockLogger.new + params = { 'prefix': 'test', 'metrics_prefix': 'mtest', 'endpoint': 'eptest' } + subject = Vmpooler::Promstats.new(logger, params) + let(:logger) { MockLogger.new } + + describe '#initialise' do + it 'returns a Metrics object' do + expect(Vmpooler::Promstats.new(logger)).to be_a(Vmpooler::Metrics) + end + end + + describe '#find_metric' do + context "Single Value Parameters" do + let!(:foo_metrics) do + { metric_suffixes: { bar: 'baz' }, + param_labels: %i[first second last] } + end + let!(:labels_hash) { { labels: { :first => nil, :second => nil, :last => nil } } } + before { subject.instance_variable_set(:@p_metrics, { foo: foo_metrics }) } + + it 'returns the metric for a given label including parsed labels' do + expect(subject.find_metric('foo.bar')).to include(metric_name: '_bar') + expect(subject.find_metric('foo.bar')).to include(foo_metrics) + expect(subject.find_metric('foo.bar')).to include(labels_hash) + end + + it 'raises an error when the given label is not present in metrics' do + expect { subject.find_metric('bogus') }.to raise_error(RuntimeError, 'Invalid Metric bogus for bogus') + end + + it 'raises an error when the given label specifies metric_suffixes but the following suffix not present in metrics' do + expect { subject.find_metric('foo.metric_suffixes.bogus') }.to raise_error(RuntimeError, 'Invalid Metric foo_metric_suffixes for foo.metric_suffixes.bogus') + end + end + + context "Node Name Handling" do + let!(:node_metrics) do + { metric_name: 'connection_to', + param_labels: %i[node] } + end + let!(:nodename_hash) { { labels: { :node => 'test.bar.net'}}} + before { subject.instance_variable_set(:@p_metrics, { connection_to: node_metrics }) } + + it 'Return final remaining fields (e.g. fqdn) in last label' do + expect(subject.find_metric('connection_to.test.bar.net')).to include(nodename_hash) + end + end + end + + context 'setup_prometheus_metrics' do + before(:all) do + Prometheus::Client.config.data_store = Prometheus::Client::DataStores::Synchronized.new + subject.setup_prometheus_metrics + end + let(:MCOUNTER) { 1 } + + describe '#setup_prometheus_metrics' do + it 'calls add_prometheus_metric for each item in list' do + Prometheus::Client.config.data_store = Prometheus::Client::DataStores::Synchronized.new + expect(subject).to receive(:add_prometheus_metric).at_least(subject.vmpooler_metrics_table.size).times + subject.setup_prometheus_metrics + end + end + + describe '#increment' do + it 'Increments checkout.nonresponsive.#{template_backend}' do + template_backend = 'test' + expect { subject.increment("checkout.nonresponsive.#{template_backend}") }.to change { + metric, po = subject.get("checkout.nonresponsive.#{template_backend}") + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments checkout.empty. + requested' do + requested = 'test' + expect { subject.increment('checkout.empty.' + requested) }.to change { + metric, po = subject.get('checkout.empty.' + requested) + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments checkout.success. + vmtemplate' do + vmtemplate = 'test-template' + expect { subject.increment('checkout.success.' + vmtemplate) }.to change { + metric, po = subject.get('checkout.success.' + vmtemplate) + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments checkout.invalid. + bad_template' do + bad_template = 'test-template' + expect { subject.increment('checkout.invalid.' + bad_template) }.to change { + metric, po = subject.get('checkout.invalid.' + bad_template) + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments checkout.invalid.unknown' do + expect { subject.increment('checkout.invalid.unknown') }.to change { + metric, po = subject.get('checkout.invalid.unknown') + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments config.invalid.#{bad_template}' do + bad_template = 'test-template' + expect { subject.increment("config.invalid.#{bad_template}") }.to change { + metric, po = subject.get("config.invalid.#{bad_template}") + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments config.invalid.unknown' do + expect { subject.increment('config.invalid.unknown') }.to change { + metric, po = subject.get('config.invalid.unknown') + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments poolreset.invalid.#{bad_pool}' do + bad_pool = 'test-pool' + expect { subject.increment("poolreset.invalid.#{bad_pool}") }.to change { + metric, po = subject.get("poolreset.invalid.#{bad_pool}") + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments poolreset.invalid.unknown' do + expect { subject.increment('poolreset.invalid.unknown') }.to change { + metric, po = subject.get('poolreset.invalid.unknown') + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments errors.markedasfailed.#{pool}' do + pool = 'test-pool' + expect { subject.increment("errors.markedasfailed.#{pool}") }.to change { + metric, po = subject.get("errors.markedasfailed.#{pool}") + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments errors.duplicatehostname.#{pool_name}' do + pool_name = 'test-pool' + expect { subject.increment("errors.duplicatehostname.#{pool_name}") }.to change { + metric, po = subject.get("errors.duplicatehostname.#{pool_name}") + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments usage.#{user}.#{poolname}' do + user = 'myuser' + poolname = 'test-pool' + expect { subject.increment("usage.#{user}.#{poolname}") }.to change { + metric, po = subject.get("usage.#{user}.#{poolname}") + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments label :user' do + # subject.increment(:user, :instance, :value_stream, :branch, :project, :job_name, :component_to_test, :poolname) - showing labels here + pending 'increment only supports a string containing a dot separator' + expect { subject.increment(:user) }.to change { + metric, po = subject.get(:user) + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments connect.open' do + expect { subject.increment('connect.open') }.to change { + metric, po = subject.get('connect.open') + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments connect.fail' do + expect { subject.increment('connect.fail') }.to change { + metric, po = subject.get('connect.fail') + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments migrate_from.#{vm_hash[\'host_name\']}' do + vm_hash = { 'host_name': 'testhost.testdomain' } + expect { subject.increment("migrate_from.#{vm_hash['host_name']}") }.to change { + metric, po = subject.get("migrate_from.#{vm_hash['host_name']}") + po.get(labels: metric[:labels]) + }.by(1) + end + it 'Increments "migrate_to.#{dest_host_name}"' do + dest_host_name = 'testhost.testdomain' + expect { subject.increment("migrate_to.#{dest_host_name}") }.to change { + metric, po = subject.get("migrate_to.#{dest_host_name}") + po.get(labels: metric[:labels]) + }.by(1) + end + end + + describe '#gauge' do + # metrics.gauge("ready.#{pool_name}", $redis.scard("vmpooler__ready__#{pool_name}")) + it 'sets value of ready.#{pool_name} to $redis.scard("vmpooler__ready__#{pool_name}"))' do + # is there a specific redis value that should be tested? + pool_name = 'test-pool' + test_value = 42 + expect { subject.gauge("ready.#{pool_name}", test_value) }.to change { + metric, po = subject.get("ready.#{pool_name}") + po.get(labels: metric[:labels]) + }.from(0).to(42) + end + # metrics.gauge("running.#{pool_name}", $redis.scard("vmpooler__running__#{pool_name}")) + it 'sets value of running.#{pool_name} to $redis.scard("vmpooler__running__#{pool_name}"))' do + # is there a specific redis value that should be tested? + pool_name = 'test-pool' + test_value = 42 + expect { subject.gauge("running.#{pool_name}", test_value) }.to change { + metric, po = subject.get("running.#{pool_name}") + po.get(labels: metric[:labels]) + }.from(0).to(42) + end + end + + describe '#timing' do + it 'sets histogram value of time_to_ready_state.#{pool} to finish' do + pool = 'test-pool' + finish = 42 + expect { subject.timing("time_to_ready_state.#{pool}", finish) }.to change { + metric, po = subject.get("time_to_ready_state.#{pool}") + po.get(labels: metric[:labels]) + } + end + it 'sets histogram value of clone.#{pool} to finish' do + pool = 'test-pool' + finish = 42 + expect { subject.timing("clone.#{pool}", finish) }.to change { + metric, po = subject.get("clone.#{pool}") + po.get(labels: metric[:labels]) + } + end + it 'sets histogram value of migrate.#{pool} to finish' do + pool = 'test-pool' + finish = 42 + expect { subject.timing("migrate.#{pool}", finish) }.to change { + metric, po = subject.get("migrate.#{pool}") + po.get(labels: metric[:labels]) + } + end + it 'sets histogram value of destroy.#{pool} to finish' do + pool = 'test-pool' + finish = 42 + expect { subject.timing("destroy.#{pool}", finish) }.to change { + metric, po = subject.get("destroy.#{pool}") + po.get(labels: metric[:labels]) + } + end + end + end +end