From a4abe2652ae9b6e25a3c34ba0b1d1f85a287747d Mon Sep 17 00:00:00 2001 From: Mahima Singh <105724608+smahima27@users.noreply.github.com> Date: Wed, 24 Dec 2025 15:06:22 +0530 Subject: [PATCH] Fix RuboCop offenses --- lib/vmpooler/api/helpers.rb | 36 +++++++++++++++++++----------------- lib/vmpooler/api/v3.rb | 30 +++++++++++++++++++----------- lib/vmpooler/pool_manager.rb | 2 +- 3 files changed, 39 insertions(+), 29 deletions(-) diff --git a/lib/vmpooler/api/helpers.rb b/lib/vmpooler/api/helpers.rb index ba0d0ee..3a32fa7 100644 --- a/lib/vmpooler/api/helpers.rb +++ b/lib/vmpooler/api/helpers.rb @@ -302,27 +302,29 @@ module Vmpooler # Use a single pipeline to fetch all queue counts at once for better performance results = backend.pipelined do |pipeline| # Order matters - we'll use indices to extract values - pools.each { |pool| pipeline.scard("vmpooler__provisioning__request#{pool['name']}") } # 0..n-1 - pools.each { |pool| pipeline.scard("vmpooler__provisioning__processing#{pool['name']}") } # n..2n-1 - pools.each { |pool| pipeline.scard("vmpooler__odcreate__task#{pool['name']}") } # 2n..3n-1 - pools.each { |pool| pipeline.scard("vmpooler__pending__#{pool['name']}") } # 3n..4n-1 - pools.each { |pool| pipeline.scard("vmpooler__ready__#{pool['name']}") } # 4n..5n-1 - pools.each { |pool| pipeline.scard("vmpooler__running__#{pool['name']}") } # 5n..6n-1 - pools.each { |pool| pipeline.scard("vmpooler__completed__#{pool['name']}") } # 6n..7n-1 - pipeline.get('vmpooler__tasks__clone') # 7n - pipeline.get('vmpooler__tasks__ondemandclone') # 7n+1 + pools.each do |pool| + pipeline.scard("vmpooler__provisioning__request#{pool['name']}") # 0..n-1 + pipeline.scard("vmpooler__provisioning__processing#{pool['name']}") # n..2n-1 + pipeline.scard("vmpooler__odcreate__task#{pool['name']}") # 2n..3n-1 + pipeline.scard("vmpooler__pending__#{pool['name']}") # 3n..4n-1 + pipeline.scard("vmpooler__ready__#{pool['name']}") # 4n..5n-1 + pipeline.scard("vmpooler__running__#{pool['name']}") # 5n..6n-1 + pipeline.scard("vmpooler__completed__#{pool['name']}") # 6n..7n-1 + end + pipeline.get('vmpooler__tasks__clone') # 7n + pipeline.get('vmpooler__tasks__ondemandclone') # 7n+1 end n = pools.length # Safely extract results with default to empty array if slice returns nil - queue[:requested] = (results[0...n] || []).sum(&:to_i) + - (results[n...(2*n)] || []).sum(&:to_i) + - (results[(2*n)...(3*n)] || []).sum(&:to_i) - queue[:pending] = (results[(3*n)...(4*n)] || []).sum(&:to_i) - queue[:ready] = (results[(4*n)...(5*n)] || []).sum(&:to_i) - queue[:running] = (results[(5*n)...(6*n)] || []).sum(&:to_i) - queue[:completed] = (results[(6*n)...(7*n)] || []).sum(&:to_i) - queue[:cloning] = (results[7*n] || 0).to_i + (results[7*n + 1] || 0).to_i + queue[:requested] = (results[0...n] || []).sum(&:to_i) + + (results[n...(2 * n)] || []).sum(&:to_i) + + (results[(2 * n)...(3 * n)] || []).sum(&:to_i) + queue[:pending] = (results[(3 * n)...(4 * n)] || []).sum(&:to_i) + queue[:ready] = (results[(4 * n)...(5 * n)] || []).sum(&:to_i) + queue[:running] = (results[(5 * n)...(6 * n)] || []).sum(&:to_i) + queue[:completed] = (results[(6 * n)...(7 * n)] || []).sum(&:to_i) + queue[:cloning] = (results[7 * n] || 0).to_i + (results[7 * n + 1] || 0).to_i queue[:booting] = queue[:pending].to_i - queue[:cloning].to_i queue[:booting] = 0 if queue[:booting] < 0 queue[:total] = queue[:requested] + queue[:pending].to_i + queue[:ready].to_i + queue[:running].to_i + queue[:completed].to_i diff --git a/lib/vmpooler/api/v3.rb b/lib/vmpooler/api/v3.rb index 025eceb..4f0ace3 100644 --- a/lib/vmpooler/api/v3.rb +++ b/lib/vmpooler/api/v3.rb @@ -10,14 +10,21 @@ module Vmpooler api_prefix = "/api/v#{api_version}" # Simple in-memory cache for status endpoint - @@status_cache = {} - @@status_cache_mutex = Mutex.new + @status_cache = {} + @status_cache_mutex = Mutex.new STATUS_CACHE_TTL = 30 # seconds + class << self + attr_accessor :status_cache, :status_cache_mutex + end + + @status_cache ||= {} + @status_cache_mutex ||= Mutex.new + # Clear cache (useful for testing) def self.clear_status_cache - @@status_cache_mutex.synchronize do - @@status_cache.clear + @status_cache_mutex.synchronize do + @status_cache.clear end end @@ -478,18 +485,19 @@ module Vmpooler # Cache helper methods for status endpoint def get_cached_status(cache_key) - @@status_cache_mutex.synchronize do - cached = @@status_cache[cache_key] + self.class.status_cache_mutex.synchronize do + cached = self.class.status_cache[cache_key] if cached && (Time.now - cached[:timestamp]) < STATUS_CACHE_TTL return cached[:data] end + nil end end def set_cached_status(cache_key, data) - @@status_cache_mutex.synchronize do - @@status_cache[cache_key] = { + self.class.status_cache_mutex.synchronize do + self.class.status_cache[cache_key] = { data: data, timestamp: Time.now } @@ -685,7 +693,7 @@ module Vmpooler # Create cache key based on view parameters cache_key = params[:view] ? "status_#{params[:view]}" : "status_all" - + # Try to get cached response cached_response = get_cached_status(cache_key) return cached_response if cached_response @@ -751,10 +759,10 @@ module Vmpooler result[:status][:uptime] = (Time.now - Vmpooler::API.settings.config[:uptime]).round(1) if Vmpooler::API.settings.config[:uptime] response = JSON.pretty_generate(Hash[result.sort_by { |k, _v| k }]) - + # Cache the response set_cached_status(cache_key, response) - + response end diff --git a/lib/vmpooler/pool_manager.rb b/lib/vmpooler/pool_manager.rb index b3cdda3..933b30c 100644 --- a/lib/vmpooler/pool_manager.rb +++ b/lib/vmpooler/pool_manager.rb @@ -1699,7 +1699,7 @@ module Vmpooler start_time = Time.now result = _check_pool(pool, provider) duration = Time.now - start_time - + $metrics.gauge("vmpooler_performance.check_pool.#{pool['name']}", duration) $logger.log('d', "[!] check_pool for #{pool['name']} took #{duration.round(2)}s") if duration > 5