(maint) Optimize the status api using redis pipeline

Before this change looping over many pools would query the redis backend
for each pool, leading in slow response from the backend for configurations
with many pools (60+)
Changed the requests to use redis pipelines https://redis.io/topics/pipelining
This is supported since the beginning, so will not force any redis update for
users. The pipeline method runs the queries in batches and we need to loop
over the result and reduces the number of requests to redis by N=number of
pools in the configuration.
This commit is contained in:
Samuel Beaulieu 2019-04-18 12:05:42 -05:00
parent 6141cd7ecf
commit 8eb15f8d10
4 changed files with 94 additions and 52 deletions

View file

@ -1,11 +1,20 @@
module Vmpooler
class API
class Dashboard < Sinatra::Base
helpers do
include Vmpooler::API::Helpers
end
# handle to the App's configuration information
def config
@config ||= Vmpooler::API.settings.config
end
def backend
Vmpooler::API.settings.redis
end
# configuration setting for server hosting graph URLs to view
def graph_server
return @graph_server if @graph_server
@ -53,10 +62,13 @@ module Vmpooler
content_type :json
result = {}
Vmpooler::API.settings.config[:pools].each do |pool|
pools = Vmpooler::API.settings.config[:pools]
ready_hash = get_list_across_pools_redis_scard(pools, 'vmpooler__ready__', backend)
pools.each do |pool|
result[pool['name']] ||= {}
result[pool['name']]['size'] = pool['size']
result[pool['name']]['ready'] = Vmpooler::API.settings.redis.scard('vmpooler__ready__' + pool['name'])
result[pool['name']]['ready'] = ready_hash[pool['name']]
end
if params[:history]
@ -91,9 +103,9 @@ module Vmpooler
rescue
end
else
Vmpooler::API.settings.config[:pools].each do |pool|
pools.each do |pool|
result[pool['name']] ||= {}
result[pool['name']]['history'] = [Vmpooler::API.settings.redis.scard('vmpooler__ready__' + pool['name'])]
result[pool['name']]['history'] = [ready_hash[pool['name']]]
end
end
end
@ -104,8 +116,11 @@ module Vmpooler
content_type :json
result = {}
Vmpooler::API.settings.config[:pools].each do |pool|
running = Vmpooler::API.settings.redis.scard('vmpooler__running__' + pool['name'])
pools = Vmpooler::API.settings.config[:pools]
running_hash = get_list_across_pools_redis_scard(pools, 'vmpooler__running__', backend)
pools.each do |pool|
running = running_hash[pool['name']]
pool['major'] = Regexp.last_match[1] if pool['name'] =~ /^(\w+)\-/
result[pool['major']] ||= {}
result[pool['major']]['running'] = result[pool['major']]['running'].to_i + running.to_i

View file

@ -151,6 +151,53 @@ module Vmpooler
backend.hvals("vmpooler__#{task}__" + date_str).map(&:to_f)
end
# Takes the pools and a key to run scard on
# returns an integer for the total count
def get_total_across_pools_redis_scard(pools, key, backend)
# using pipelined is much faster than querying each of the pools and adding them
# as we get the result.
res = backend.pipelined do
pools.each do |pool|
backend.scard(key + pool['name'])
end
end
res.inject(0){ |m, x| m+x }.to_i
end
# Takes the pools and a key to run scard on
# returns a hash with each pool name as key and the value being the count as integer
def get_list_across_pools_redis_scard(pools, key, backend)
# using pipelined is much faster than querying each of the pools and adding them
# as we get the result.
temp_hash = {}
res = backend.pipelined do
pools.each do |pool|
backend.scard(key + pool['name'])
end
end
pools.each_with_index do |pool, i|
temp_hash[pool['name']] = res[i].to_i
end
temp_hash
end
# Takes the pools and a key to run hget on
# returns a hash with each pool name as key and the value as string
def get_list_across_pools_redis_hget(pools, key, backend)
# using pipelined is much faster than querying each of the pools and adding them
# as we get the result.
temp_hash = {}
res = backend.pipelined do
pools.each do |pool|
backend.hget(key, pool['name'])
end
end
pools.each_with_index do |pool, i|
temp_hash[pool['name']] = res[i].to_s
end
temp_hash
end
def get_capacity_metrics(pools, backend)
capacity = {
current: 0,
@ -159,12 +206,11 @@ module Vmpooler
}
pools.each do |pool|
pool['capacity'] = backend.scard('vmpooler__ready__' + pool['name']).to_i
capacity[:current] += pool['capacity']
capacity[:total] += pool['size'].to_i
end
capacity[:current] = get_total_across_pools_redis_scard(pools, 'vmpooler__ready__', backend)
if capacity[:total] > 0
capacity[:percent] = ((capacity[:current].to_f / capacity[:total].to_f) * 100.0).round(1)
end
@ -183,12 +229,10 @@ module Vmpooler
total: 0
}
pools.each do |pool|
queue[:pending] += backend.scard('vmpooler__pending__' + pool['name']).to_i
queue[:ready] += backend.scard('vmpooler__ready__' + pool['name']).to_i
queue[:running] += backend.scard('vmpooler__running__' + pool['name']).to_i
queue[:completed] += backend.scard('vmpooler__completed__' + pool['name']).to_i
end
queue[:pending] = get_total_across_pools_redis_scard(pools,'vmpooler__pending__', backend)
queue[:ready] = get_total_across_pools_redis_scard(pools, 'vmpooler__ready__', backend)
queue[:running] = get_total_across_pools_redis_scard(pools, 'vmpooler__running__', backend)
queue[:completed] = get_total_across_pools_redis_scard(pools, 'vmpooler__completed__', backend)
queue[:cloning] = backend.get('vmpooler__tasks__clone').to_i
queue[:booting] = queue[:pending].to_i - queue[:cloning].to_i

View file

@ -305,13 +305,18 @@ module Vmpooler
# Check for empty pools
result[:pools] = {} unless views and not views.include?("pools")
ready_hash = get_list_across_pools_redis_scard(pools, 'vmpooler__ready__', backend)
running_hash = get_list_across_pools_redis_scard(pools, 'vmpooler__running__', backend)
pending_hash = get_list_across_pools_redis_scard(pools, 'vmpooler__pending__', backend)
lastBoot_hash = get_list_across_pools_redis_hget(pools, 'vmpooler__lastboot', backend)
pools.each do |pool|
# REMIND: move this out of the API and into the back-end
ready = backend.scard('vmpooler__ready__' + pool['name']).to_i
running = backend.scard('vmpooler__running__' + pool['name']).to_i
pending = backend.scard('vmpooler__pending__' + pool['name']).to_i
ready = ready_hash[pool['name']]
running = running_hash[pool['name']]
pending = pending_hash[pool['name']]
max = pool['size']
lastBoot = backend.hget('vmpooler__lastboot',pool['name']).to_s
lastBoot = lastBoot_hash[pool['name']]
aka = pool['alias']
result[:pools][pool['name']] = {
@ -382,14 +387,9 @@ module Vmpooler
end
# using pipelined is much faster than querying each of the pools
res = backend.pipelined do
poolscopy.each do |pool|
backend.scard('vmpooler__ready__' + pool['name'])
end
end
ready_hash = get_list_across_pools_redis_scard(poolscopy, 'vmpooler__ready__', backend)
res.each_with_index { |ready, i| result[:pools][poolscopy[i]['name']][:ready] = ready }
ready_hash.each { |k, v| result[:pools][k][:ready] = v }
JSON.pretty_generate(Hash[result.sort_by { |k, _v| k }])
end
@ -401,15 +401,7 @@ module Vmpooler
running: 0,
}
# using pipelined is much faster than querying each of the pools and adding them
# as we get the result.
res = backend.pipelined do
pools.each do |pool|
backend.scard('vmpooler__running__' + pool['name'])
end
end
queue[:running] = res.inject(0){ |m, x| m+x }
queue[:running] = get_total_across_pools_redis_scard(pools, 'vmpooler__running__', backend)
JSON.pretty_generate(queue)
end

View file

@ -68,6 +68,7 @@ describe Vmpooler::API::Helpers do
describe '#get_capacity_metrics' do
let(:redis) { double('redis') }
let(:backend) { double('backend') }
it 'adds up pools correctly' do
pools = [
@ -75,8 +76,7 @@ describe Vmpooler::API::Helpers do
{'name' => 'p2', 'size' => 5}
]
allow(redis).to receive(:scard).with('vmpooler__ready__p1').and_return 1
allow(redis).to receive(:scard).with('vmpooler__ready__p2').and_return 1
allow(redis).to receive(:pipelined).with(no_args).and_return [1,1]
expect(subject.get_capacity_metrics(pools, redis)).to eq({current: 2, total: 10, percent: 20.0})
end
@ -87,8 +87,7 @@ describe Vmpooler::API::Helpers do
{'name' => 'p2', 'size' => 5}
]
allow(redis).to receive(:scard).with('vmpooler__ready__p1').and_return 1
allow(redis).to receive(:scard).with('vmpooler__ready__p2').and_return 0
allow(redis).to receive(:pipelined).with(no_args).and_return [1,0]
expect(subject.get_capacity_metrics(pools, redis)).to eq({current: 1, total: 10, percent: 10.0})
end
@ -99,13 +98,13 @@ describe Vmpooler::API::Helpers do
{'name' => 'p2', 'size' => 0}
]
allow(redis).to receive(:scard).with('vmpooler__ready__p1').and_return 1
allow(redis).to receive(:scard).with('vmpooler__ready__p2').and_return 0
allow(redis).to receive(:pipelined).with(no_args).and_return [1,0]
expect(subject.get_capacity_metrics(pools, redis)).to eq({current: 1, total: 5, percent: 20.0})
end
it 'handles empty pool array' do
allow(redis).to receive(:pipelined).with(no_args).and_return []
expect(subject.get_capacity_metrics([], redis)).to eq({current: 0, total: 0, percent: 0})
end
end
@ -114,7 +113,7 @@ describe Vmpooler::API::Helpers do
let(:redis) { double('redis') }
it 'handles empty pool array' do
allow(redis).to receive(:scard).and_return 0
allow(redis).to receive(:pipelined).with(no_args).and_return [0]
allow(redis).to receive(:get).and_return 0
expect(subject.get_queue_metrics([], redis)).to eq({pending: 0, cloning: 0, booting: 0, ready: 0, running: 0, completed: 0, total: 0})
@ -126,11 +125,7 @@ describe Vmpooler::API::Helpers do
{'name' => 'p2'}
]
pools.each do |p|
%w(pending ready running completed).each do |action|
allow(redis).to receive(:scard).with('vmpooler__' + action + '__' + p['name']).and_return 1
end
end
allow(redis).to receive(:pipelined).with(no_args).and_return [1,1]
allow(redis).to receive(:get).and_return 1
expect(subject.get_queue_metrics(pools, redis)).to eq({pending: 2, cloning: 1, booting: 1, ready: 2, running: 2, completed: 2, total: 8})
@ -142,11 +137,7 @@ describe Vmpooler::API::Helpers do
{'name' => 'p2'}
]
pools.each do |p|
%w(pending ready running completed).each do |action|
allow(redis).to receive(:scard).with('vmpooler__' + action + '__' + p['name']).and_return 1
end
end
allow(redis).to receive(:pipelined).with(no_args).and_return [1,1]
allow(redis).to receive(:get).and_return 5
expect(subject.get_queue_metrics(pools, redis)).to eq({pending: 2, cloning: 5, booting: 0, ready: 2, running: 2, completed: 2, total: 8})