mirror of
https://github.com/puppetlabs/vmpooler.git
synced 2026-01-26 01:58:41 -05:00
Add Phase 2 optimizations: status API caching and improved Redis pipelining
- Add in-memory cache for /status endpoint with 30s TTL - Cache keyed by view parameters to handle different query patterns - Add cache clearing for tests to prevent interference - Optimize get_queue_metrics to use single pipeline for all Redis calls - Previously made 7+ separate pipeline calls - Now combines all queue metrics into one pipeline (7n+2 operations) - Reduces Redis round trips and improves API response time - Update unit tests to match new pipelining behavior - All 866 tests passing
This commit is contained in:
parent
e5c0fa986e
commit
7b657edd0d
4 changed files with 86 additions and 15 deletions
|
|
@ -299,14 +299,30 @@ module Vmpooler
|
|||
total: 0
|
||||
}
|
||||
|
||||
queue[:requested] = get_total_across_pools_redis_scard(pools, 'vmpooler__provisioning__request', backend) + get_total_across_pools_redis_scard(pools, 'vmpooler__provisioning__processing', backend) + get_total_across_pools_redis_scard(pools, 'vmpooler__odcreate__task', backend)
|
||||
# Use a single pipeline to fetch all queue counts at once for better performance
|
||||
results = backend.pipelined do |pipeline|
|
||||
# Order matters - we'll use indices to extract values
|
||||
pools.each { |pool| pipeline.scard("vmpooler__provisioning__request#{pool['name']}") } # 0..n-1
|
||||
pools.each { |pool| pipeline.scard("vmpooler__provisioning__processing#{pool['name']}") } # n..2n-1
|
||||
pools.each { |pool| pipeline.scard("vmpooler__odcreate__task#{pool['name']}") } # 2n..3n-1
|
||||
pools.each { |pool| pipeline.scard("vmpooler__pending__#{pool['name']}") } # 3n..4n-1
|
||||
pools.each { |pool| pipeline.scard("vmpooler__ready__#{pool['name']}") } # 4n..5n-1
|
||||
pools.each { |pool| pipeline.scard("vmpooler__running__#{pool['name']}") } # 5n..6n-1
|
||||
pools.each { |pool| pipeline.scard("vmpooler__completed__#{pool['name']}") } # 6n..7n-1
|
||||
pipeline.get('vmpooler__tasks__clone') # 7n
|
||||
pipeline.get('vmpooler__tasks__ondemandclone') # 7n+1
|
||||
end
|
||||
|
||||
queue[:pending] = get_total_across_pools_redis_scard(pools, 'vmpooler__pending__', backend)
|
||||
queue[:ready] = get_total_across_pools_redis_scard(pools, 'vmpooler__ready__', backend)
|
||||
queue[:running] = get_total_across_pools_redis_scard(pools, 'vmpooler__running__', backend)
|
||||
queue[:completed] = get_total_across_pools_redis_scard(pools, 'vmpooler__completed__', backend)
|
||||
|
||||
queue[:cloning] = backend.get('vmpooler__tasks__clone').to_i + backend.get('vmpooler__tasks__ondemandclone').to_i
|
||||
n = pools.length
|
||||
# Safely extract results with default to empty array if slice returns nil
|
||||
queue[:requested] = (results[0...n] || []).sum(&:to_i) +
|
||||
(results[n...(2*n)] || []).sum(&:to_i) +
|
||||
(results[(2*n)...(3*n)] || []).sum(&:to_i)
|
||||
queue[:pending] = (results[(3*n)...(4*n)] || []).sum(&:to_i)
|
||||
queue[:ready] = (results[(4*n)...(5*n)] || []).sum(&:to_i)
|
||||
queue[:running] = (results[(5*n)...(6*n)] || []).sum(&:to_i)
|
||||
queue[:completed] = (results[(6*n)...(7*n)] || []).sum(&:to_i)
|
||||
queue[:cloning] = (results[7*n] || 0).to_i + (results[7*n + 1] || 0).to_i
|
||||
queue[:booting] = queue[:pending].to_i - queue[:cloning].to_i
|
||||
queue[:booting] = 0 if queue[:booting] < 0
|
||||
queue[:total] = queue[:requested] + queue[:pending].to_i + queue[:ready].to_i + queue[:running].to_i + queue[:completed].to_i
|
||||
|
|
|
|||
|
|
@ -9,6 +9,18 @@ module Vmpooler
|
|||
api_version = '3'
|
||||
api_prefix = "/api/v#{api_version}"
|
||||
|
||||
# Simple in-memory cache for status endpoint
|
||||
@@status_cache = {}
|
||||
@@status_cache_mutex = Mutex.new
|
||||
STATUS_CACHE_TTL = 30 # seconds
|
||||
|
||||
# Clear cache (useful for testing)
|
||||
def self.clear_status_cache
|
||||
@@status_cache_mutex.synchronize do
|
||||
@@status_cache.clear
|
||||
end
|
||||
end
|
||||
|
||||
helpers do
|
||||
include Vmpooler::API::Helpers
|
||||
end
|
||||
|
|
@ -464,6 +476,31 @@ module Vmpooler
|
|||
end
|
||||
end
|
||||
|
||||
# Cache helper methods for status endpoint
|
||||
def get_cached_status(cache_key)
|
||||
@@status_cache_mutex.synchronize do
|
||||
cached = @@status_cache[cache_key]
|
||||
if cached && (Time.now - cached[:timestamp]) < STATUS_CACHE_TTL
|
||||
return cached[:data]
|
||||
end
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
def set_cached_status(cache_key, data)
|
||||
@@status_cache_mutex.synchronize do
|
||||
@@status_cache[cache_key] = {
|
||||
data: data,
|
||||
timestamp: Time.now
|
||||
}
|
||||
# Cleanup old cache entries (keep only last 10 unique view combinations)
|
||||
if @@status_cache.size > 10
|
||||
oldest = @@status_cache.min_by { |_k, v| v[:timestamp] }
|
||||
@@status_cache.delete(oldest[0])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def sync_pool_templates
|
||||
tracer.in_span("Vmpooler::API::V3.#{__method__}") do
|
||||
pool_index = pool_index(pools)
|
||||
|
|
@ -646,6 +683,13 @@ module Vmpooler
|
|||
get "#{api_prefix}/status/?" do
|
||||
content_type :json
|
||||
|
||||
# Create cache key based on view parameters
|
||||
cache_key = params[:view] ? "status_#{params[:view]}" : "status_all"
|
||||
|
||||
# Try to get cached response
|
||||
cached_response = get_cached_status(cache_key)
|
||||
return cached_response if cached_response
|
||||
|
||||
if params[:view]
|
||||
views = params[:view].split(",")
|
||||
end
|
||||
|
|
@ -706,7 +750,12 @@ module Vmpooler
|
|||
|
||||
result[:status][:uptime] = (Time.now - Vmpooler::API.settings.config[:uptime]).round(1) if Vmpooler::API.settings.config[:uptime]
|
||||
|
||||
JSON.pretty_generate(Hash[result.sort_by { |k, _v| k }])
|
||||
response = JSON.pretty_generate(Hash[result.sort_by { |k, _v| k }])
|
||||
|
||||
# Cache the response
|
||||
set_cached_status(cache_key, response)
|
||||
|
||||
response
|
||||
end
|
||||
|
||||
# request statistics for specific pools by passing parameter 'pool'
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@ describe Vmpooler::API::V3 do
|
|||
# https://rubydoc.info/gems/sinatra/Sinatra/Base#reset!-class_method
|
||||
before(:each) do
|
||||
app.reset!
|
||||
# Clear status cache to prevent test interference
|
||||
Vmpooler::API::V3.clear_status_cache
|
||||
end
|
||||
|
||||
describe 'status and metrics endpoints' do
|
||||
|
|
|
|||
|
|
@ -125,8 +125,12 @@ describe Vmpooler::API::Helpers do
|
|||
{'name' => 'p2'}
|
||||
]
|
||||
|
||||
allow(redis).to receive(:pipelined).with(no_args).and_return [1,1]
|
||||
allow(redis).to receive(:get).and_return(1,0)
|
||||
# Mock returns 7*2 + 2 = 16 results (7 queue types for 2 pools + 2 global counters)
|
||||
# For each pool: [request, processing, odcreate, pending, ready, running, completed]
|
||||
# Plus 2 global counters: clone (1), ondemandclone (0)
|
||||
# Results array: [1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1, 0]
|
||||
# [req, proc, odc, pend, rdy, run, comp, clone, odc]
|
||||
allow(redis).to receive(:pipelined).with(no_args).and_return [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0]
|
||||
|
||||
expect(subject.get_queue_metrics(pools, redis)).to eq({requested: 6, pending: 2, cloning: 1, booting: 1, ready: 2, running: 2, completed: 2, total: 14})
|
||||
end
|
||||
|
|
@ -137,8 +141,8 @@ describe Vmpooler::API::Helpers do
|
|||
{'name' => 'p2'}
|
||||
]
|
||||
|
||||
allow(redis).to receive(:pipelined).with(no_args).and_return [1,1]
|
||||
allow(redis).to receive(:get).and_return(5,0)
|
||||
# Mock returns 7*2 + 2 = 16 results with clone=5 to cause negative booting
|
||||
allow(redis).to receive(:pipelined).with(no_args).and_return [1,1,1,1,1,1,1,1,1,1,1,1,1,1,5,0]
|
||||
|
||||
expect(subject.get_queue_metrics(pools, redis)).to eq({requested: 6, pending: 2, cloning: 5, booting: 0, ready: 2, running: 2, completed: 2, total: 14})
|
||||
end
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue