mirror of
https://github.com/puppetlabs/vmpooler.git
synced 2026-01-26 01:58:41 -05:00
(POOLER-158) Add capability to provision VMs on demand
This change adds a capability to vmpooler to provision instances on demand. Without this change vmpooler only supports retrieving machines from pre-provisioned pools. Additionally, this change refactors redis interactions to reduce round trips to redis. Specifically, multi and pipelined redis commands are added where possible to reduce the number of times we are calling redis. To support the redis refactor the redis interaction has changed to leveraging a connection pool. In addition to offering multiple connections for pool manager to use, the redis interactions in pool manager are now thread safe. Ready TTL is now a global parameter that can be set as a default for all pools. A default of 0 has been removed, because this is an unreasonable default behavior, which would leave a provisioned instance in the pool indefinitely. Pool empty messages have been removed when the pool size is set to 0. Without this change, when a pool was set to a size of 0 the API and pool manager would both show that a pool is empty.
This commit is contained in:
parent
1f6f08d172
commit
811fd8b60f
34 changed files with 3326 additions and 1098 deletions
|
|
@ -40,97 +40,115 @@ def token_exists?(token)
|
|||
result && !result.empty?
|
||||
end
|
||||
|
||||
def create_ready_vm(template, name, token = nil)
|
||||
create_vm(name, token)
|
||||
def create_ready_vm(template, name, redis, token = nil)
|
||||
create_vm(name, redis, token)
|
||||
redis.sadd("vmpooler__ready__#{template}", name)
|
||||
redis.hset("vmpooler__vm__#{name}", "template", template)
|
||||
end
|
||||
|
||||
def create_running_vm(template, name, token = nil, user = nil)
|
||||
create_vm(name, token, nil, user)
|
||||
def create_running_vm(template, name, redis, token = nil, user = nil)
|
||||
create_vm(name, redis, token, user)
|
||||
redis.sadd("vmpooler__running__#{template}", name)
|
||||
redis.hset("vmpooler__vm__#{name}", 'template', template)
|
||||
redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
|
||||
redis.hset("vmpooler__vm__#{name}", 'host', 'host1')
|
||||
end
|
||||
|
||||
def create_pending_vm(template, name, token = nil)
|
||||
create_vm(name, token)
|
||||
def create_pending_vm(template, name, redis, token = nil)
|
||||
create_vm(name, redis, token)
|
||||
redis.sadd("vmpooler__pending__#{template}", name)
|
||||
redis.hset("vmpooler__vm__#{name}", "template", template)
|
||||
end
|
||||
|
||||
def create_vm(name, token = nil, redis_handle = nil, user = nil)
|
||||
redis_db = redis_handle ? redis_handle : redis
|
||||
redis_db.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
|
||||
redis_db.hset("vmpooler__vm__#{name}", 'token:token', token) if token
|
||||
redis_db.hset("vmpooler__vm__#{name}", 'token:user', user) if user
|
||||
def create_vm(name, redis, token = nil, user = nil)
|
||||
redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
|
||||
redis.hset("vmpooler__vm__#{name}", 'clone', Time.now)
|
||||
redis.hset("vmpooler__vm__#{name}", 'token:token', token) if token
|
||||
redis.hset("vmpooler__vm__#{name}", 'token:user', user) if user
|
||||
end
|
||||
|
||||
def create_completed_vm(name, pool, active = false, redis_handle = nil)
|
||||
redis_db = redis_handle ? redis_handle : redis
|
||||
redis_db.sadd("vmpooler__completed__#{pool}", name)
|
||||
redis_db.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
|
||||
redis_db.hset("vmpooler__active__#{pool}", name, Time.now) if active
|
||||
def create_completed_vm(name, pool, redis, active = false)
|
||||
redis.sadd("vmpooler__completed__#{pool}", name)
|
||||
redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
|
||||
redis.hset("vmpooler__active__#{pool}", name, Time.now) if active
|
||||
end
|
||||
|
||||
def create_discovered_vm(name, pool, redis_handle = nil)
|
||||
redis_db = redis_handle ? redis_handle : redis
|
||||
redis_db.sadd("vmpooler__discovered__#{pool}", name)
|
||||
def create_discovered_vm(name, pool, redis)
|
||||
redis.sadd("vmpooler__discovered__#{pool}", name)
|
||||
end
|
||||
|
||||
def create_migrating_vm(name, pool, redis_handle = nil)
|
||||
redis_db = redis_handle ? redis_handle : redis
|
||||
redis_db.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
|
||||
redis_db.sadd("vmpooler__migrating__#{pool}", name)
|
||||
def create_migrating_vm(name, pool, redis)
|
||||
redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
|
||||
redis.sadd("vmpooler__migrating__#{pool}", name)
|
||||
end
|
||||
|
||||
def create_tag(vm, tag_name, tag_value, redis_handle = nil)
|
||||
redis_db = redis_handle ? redis-handle : redis
|
||||
redis_db.hset("vmpooler__vm__#{vm}", "tag:#{tag_name}", tag_value)
|
||||
def create_tag(vm, tag_name, tag_value, redis)
|
||||
redis.hset("vmpooler__vm__#{vm}", "tag:#{tag_name}", tag_value)
|
||||
end
|
||||
|
||||
def add_vm_to_migration_set(name, redis_handle = nil)
|
||||
redis_db = redis_handle ? redis_handle : redis
|
||||
redis_db.sadd('vmpooler__migration', name)
|
||||
def add_vm_to_migration_set(name, redis)
|
||||
redis.sadd('vmpooler__migration', name)
|
||||
end
|
||||
|
||||
def fetch_vm(vm)
|
||||
redis.hgetall("vmpooler__vm__#{vm}")
|
||||
end
|
||||
|
||||
def set_vm_data(vm, key, value)
|
||||
def set_vm_data(vm, key, value, redis)
|
||||
redis.hset("vmpooler__vm__#{vm}", key, value)
|
||||
end
|
||||
|
||||
def snapshot_revert_vm(vm, snapshot = '12345678901234567890123456789012')
|
||||
def snapshot_revert_vm(vm, snapshot = '12345678901234567890123456789012', redis)
|
||||
redis.sadd('vmpooler__tasks__snapshot-revert', "#{vm}:#{snapshot}")
|
||||
redis.hset("vmpooler__vm__#{vm}", "snapshot:#{snapshot}", "1")
|
||||
end
|
||||
|
||||
def snapshot_vm(vm, snapshot = '12345678901234567890123456789012')
|
||||
def snapshot_vm(vm, snapshot = '12345678901234567890123456789012', redis)
|
||||
redis.sadd('vmpooler__tasks__snapshot', "#{vm}:#{snapshot}")
|
||||
redis.hset("vmpooler__vm__#{vm}", "snapshot:#{snapshot}", "1")
|
||||
end
|
||||
|
||||
def disk_task_vm(vm, disk_size = '10')
|
||||
def disk_task_vm(vm, disk_size = '10', redis)
|
||||
redis.sadd('vmpooler__tasks__disk', "#{vm}:#{disk_size}")
|
||||
end
|
||||
|
||||
def has_vm_snapshot?(vm)
|
||||
def has_vm_snapshot?(vm, redis)
|
||||
redis.smembers('vmpooler__tasks__snapshot').any? do |snapshot|
|
||||
instance, sha = snapshot.split(':')
|
||||
instance, _sha = snapshot.split(':')
|
||||
vm == instance
|
||||
end
|
||||
end
|
||||
|
||||
def vm_reverted_to_snapshot?(vm, snapshot = nil)
|
||||
def vm_reverted_to_snapshot?(vm, redis, snapshot = nil)
|
||||
redis.smembers('vmpooler__tasks__snapshot-revert').any? do |action|
|
||||
instance, sha = action.split(':')
|
||||
instance == vm and (snapshot ? (sha == snapshot) : true)
|
||||
end
|
||||
end
|
||||
|
||||
def pool_has_ready_vm?(pool, vm)
|
||||
def pool_has_ready_vm?(pool, vm, redis)
|
||||
!!redis.sismember('vmpooler__ready__' + pool, vm)
|
||||
end
|
||||
|
||||
def create_ondemand_request_for_test(request_id, score, platforms_string, redis, user = nil, token = nil)
|
||||
redis.zadd('vmpooler__provisioning__request', score, request_id)
|
||||
redis.hset("vmpooler__odrequest__#{request_id}", 'requested', platforms_string)
|
||||
redis.hset("vmpooler__odrequest__#{request_id}", 'token:token', token) if token
|
||||
redis.hset("vmpooler__odrequest__#{request_id}", 'token:user', user) if user
|
||||
end
|
||||
|
||||
def set_ondemand_request_status(request_id, status, redis)
|
||||
redis.hset("vmpooler__odrequest__#{request_id}", 'status', status)
|
||||
end
|
||||
|
||||
def create_ondemand_vm(vmname, request_id, pool, pool_alias, redis)
|
||||
redis.sadd("vmpooler__#{request_id}__#{pool_alias}__#{pool}", vmname)
|
||||
end
|
||||
|
||||
def create_ondemand_creationtask(request_string, score, redis)
|
||||
redis.zadd('vmpooler__odcreate__task', score, request_string)
|
||||
end
|
||||
|
||||
def create_ondemand_processing(request_id, score, redis)
|
||||
redis.zadd('vmpooler__provisioning__processing', score, request_id)
|
||||
end
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue