mirror of
https://github.com/puppetlabs/vmpooler.git
synced 2026-01-26 01:58:41 -05:00
Parallelize VM clones in threads
This commit is contained in:
parent
6c74a89e11
commit
1ebf772d70
1 changed files with 111 additions and 96 deletions
|
|
@ -9,8 +9,8 @@ require 'lib/logger'
|
|||
require 'lib/require_relative'
|
||||
require 'lib/vsphere_helper'
|
||||
|
||||
logger = Logger.new
|
||||
vsphere_helper = VsphereHelper.new
|
||||
$logger = Logger.new
|
||||
$vsphere_helper = VsphereHelper.new
|
||||
|
||||
Dir.chdir(File.dirname(__FILE__))
|
||||
|
||||
|
|
@ -24,6 +24,78 @@ vsphere = config[:vsphere]
|
|||
# Connect to Redis
|
||||
$redis = Redis.new
|
||||
|
||||
|
||||
# Clone a V
|
||||
def clone_vm template, pool, folder, datastore
|
||||
Thread.new {
|
||||
vm = {}
|
||||
|
||||
if template =~ /\//
|
||||
templatefolders = template.split('/')
|
||||
vm['template'] = templatefolders.pop
|
||||
end
|
||||
|
||||
if templatefolders
|
||||
vm[vm['template']] = $vsphere_helper.find_folder(templatefolders.join('/')).find(vm['template'])
|
||||
else
|
||||
raise "Please provide a full path to the template"
|
||||
end
|
||||
|
||||
if vm['template'].length == 0
|
||||
raise "Unable to find template '#{vm['template']}'!"
|
||||
end
|
||||
|
||||
# Generate a randomized hostname
|
||||
o = [('a'..'z'),('0'..'9')].map{|r| r.to_a}.flatten
|
||||
vm['hostname'] = o[rand(25)]+(0...14).map{o[rand(o.length)]}.join
|
||||
|
||||
# Annotate with creation time, origin template, etc.
|
||||
configSpec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
||||
:annotation =>
|
||||
'Base template: ' + vm['template'] + "\n" +
|
||||
'Creation time: ' + Time.now.strftime("%Y-%m-%d %H:%M")
|
||||
)
|
||||
|
||||
# Put the VM in the specified folder and resource pool
|
||||
relocateSpec = RbVmomi::VIM.VirtualMachineRelocateSpec(
|
||||
:datastore => $vsphere_helper.find_datastore(datastore),
|
||||
:pool => $vsphere_helper.find_pool(pool),
|
||||
:diskMoveType => :moveChildMostDiskBacking
|
||||
)
|
||||
|
||||
# Create a clone spec
|
||||
spec = RbVmomi::VIM.VirtualMachineCloneSpec(
|
||||
:location => relocateSpec,
|
||||
:config => configSpec,
|
||||
:powerOn => true,
|
||||
:template => false
|
||||
)
|
||||
|
||||
# Clone the VM
|
||||
$logger.log('d', "[ ] '#{vm['hostname']}' is being cloned from '#{vm['template']}'")
|
||||
|
||||
start = Time.now
|
||||
vm[vm['template']].CloneVM_Task(
|
||||
:folder => $vsphere_helper.find_folder(folder),
|
||||
:name => vm['hostname'],
|
||||
:spec => spec
|
||||
).wait_for_completion
|
||||
finish = '%.2f' % (Time.now-start)
|
||||
|
||||
# Add VM to Redis inventory ('pending' pool)
|
||||
$redis.sadd('vmware_host_pool__pending__'+pool, vm['hostname'])
|
||||
|
||||
$logger.log('s', "[+] '#{vm['hostname']}' cloned from '#{vm['template']}' in #{finish} seconds")
|
||||
|
||||
# Metrics
|
||||
$redis.lpush('vmware_host_pool_metrics__deploy', finish)
|
||||
$redis.ltrim('vmware_host_pool_metrics__deploy', 0, 100)
|
||||
}
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
# Update loop
|
||||
loop do
|
||||
pools.each do |pool|
|
||||
|
|
@ -31,7 +103,7 @@ loop do
|
|||
total = 0
|
||||
|
||||
# Locate the resource pool
|
||||
base = vsphere_helper.find_pool(pool['pool'])
|
||||
base = $vsphere_helper.find_pool(pool['pool'])
|
||||
|
||||
# Make sure all VMs in resource pool are accounted-for
|
||||
base.vm.each do |vm|
|
||||
|
|
@ -56,19 +128,19 @@ loop do
|
|||
end
|
||||
|
||||
if (
|
||||
(vsphere_helper.find_vms(vm)[vm]) and
|
||||
(vsphere_helper.find_vms(vm)[vm].summary.guest.toolsRunningStatus == 'guestToolsRunning') and
|
||||
(vsphere_helper.find_vms(vm)[vm].summary.guest.hostName) and
|
||||
(vsphere_helper.find_vms(vm)[vm].summary.guest.hostName == vm) and
|
||||
(vsphere_helper.find_vms(vm)[vm].summary.guest.ipAddress != nil)
|
||||
($vsphere_helper.find_vms(vm)[vm]) and
|
||||
($vsphere_helper.find_vms(vm)[vm].summary.guest.toolsRunningStatus == 'guestToolsRunning') and
|
||||
($vsphere_helper.find_vms(vm)[vm].summary.guest.hostName) and
|
||||
($vsphere_helper.find_vms(vm)[vm].summary.guest.hostName == vm) and
|
||||
($vsphere_helper.find_vms(vm)[vm].summary.guest.ipAddress != nil)
|
||||
)
|
||||
begin
|
||||
Socket.getaddrinfo(vm, nil)
|
||||
rescue
|
||||
if (
|
||||
(vsphere_helper.find_vms(vm)[vm].runtime) and
|
||||
(vsphere_helper.find_vms(vm)[vm].runtime.bootTime) and
|
||||
(((( Time.now - vsphere_helper.find_mvs(vm)[vm].runtime.bootTime ) / 60 ) / 60 ) >= 1)
|
||||
($vsphere_helper.find_vms(vm)[vm].runtime) and
|
||||
($vsphere_helper.find_vms(vm)[vm].runtime.bootTime) and
|
||||
(((( Time.now - $vsphere_helper.find_mvs(vm)[vm].runtime.bootTime ) / 60 ) / 60 ) >= 1)
|
||||
)
|
||||
$redis.srem('vmware_host_pool__pending__'+pool['name'], vm)
|
||||
$redis.sadd('vmware_host_pool__failed__'+pool['name'], vm)
|
||||
|
|
@ -76,7 +148,7 @@ loop do
|
|||
# Metrics
|
||||
$redis.lpush('vmware_host_pool_metrics__deploy_fail', '1')
|
||||
|
||||
logger.log('s', "[<] '#{vm}' moved to 'failed' queue")
|
||||
$logger.log('s', "[<] '#{vm}' moved to 'failed' queue")
|
||||
end
|
||||
|
||||
next
|
||||
|
|
@ -85,7 +157,7 @@ loop do
|
|||
$redis.sadd('vmware_host_pool__ready__'+pool['name'], vm)
|
||||
$redis.srem('vmware_host_pool__pending__'+pool['name'], vm)
|
||||
|
||||
logger.log('s', "[>] '#{vm}' moved to 'ready' queue")
|
||||
$logger.log('s', "[>] '#{vm}' moved to 'ready' queue")
|
||||
|
||||
# Metrics
|
||||
$redis.lpush('vmware_host_pool_metrics__deploy_fail', '0')
|
||||
|
|
@ -99,15 +171,15 @@ loop do
|
|||
end
|
||||
|
||||
if (
|
||||
(! vsphere_helper.find_vms(vm)[vm]) or
|
||||
(vsphere_helper.find_vms(vm)[vm].summary.guest.toolsRunningStatus != 'guestToolsRunning') or
|
||||
(vsphere_helper.find_vms(vm)[vm].summary.guest.hostName != vm) or
|
||||
(vsphere_helper.find_vms(vm)[vm].summary.guest.ipAddress == nil)
|
||||
(! $vsphere_helper.find_vms(vm)[vm]) or
|
||||
($vsphere_helper.find_vms(vm)[vm].summary.guest.toolsRunningStatus != 'guestToolsRunning') or
|
||||
($vsphere_helper.find_vms(vm)[vm].summary.guest.hostName != vm) or
|
||||
($vsphere_helper.find_vms(vm)[vm].summary.guest.ipAddress == nil)
|
||||
)
|
||||
$redis.srem('vmware_host_pool__ready__'+pool['name'], vm)
|
||||
$redis.sadd('vmware_host_pool__failed__'+pool['name'], vm)
|
||||
|
||||
logger.log('s', "[<] '#{vm}' moved to 'failed' queue")
|
||||
$logger.log('s', "[<] '#{vm}' moved to 'failed' queue")
|
||||
|
||||
next
|
||||
end
|
||||
|
|
@ -118,7 +190,7 @@ loop do
|
|||
$redis.srem('vmware_host_pool__ready__'+pool['name'], vm)
|
||||
$redis.sadd('vmware_host_pool__failed__'+pool['name'], vm)
|
||||
|
||||
logger.log('s', "[<] '#{vm}' moved to 'failed' queue")
|
||||
$logger.log('s', "[<] '#{vm}' moved to 'failed' queue")
|
||||
|
||||
# Metrics
|
||||
$redis.lpush('vmware_host_pool_metrics__deploy_fail', '1')
|
||||
|
|
@ -131,7 +203,7 @@ loop do
|
|||
$redis.srem('vmware_host_pool__completed__'+pool['name'], vm)
|
||||
end
|
||||
|
||||
host = vsphere_helper.find_vms(vm)[vm]
|
||||
host = $vsphere_helper.find_vms(vm)[vm]
|
||||
|
||||
if (
|
||||
(host) and
|
||||
|
|
@ -140,14 +212,14 @@ loop do
|
|||
start = Time.now
|
||||
|
||||
if host.runtime.powerState == 'poweredOn'
|
||||
logger.log('d', "[ ] '#{vm}' is being shut down")
|
||||
$logger.log('d', "[ ] '#{vm}' is being shut down")
|
||||
host.PowerOffVM_Task.wait_for_completion
|
||||
end
|
||||
|
||||
host.Destroy_Task.wait_for_completion
|
||||
finish = '%.2f' % (Time.now-start)
|
||||
|
||||
logger.log('s', "[-] '#{vm}' destroyed in #{finish} seconds")
|
||||
$logger.log('s', "[-] '#{vm}' destroyed in #{finish} seconds")
|
||||
end
|
||||
end
|
||||
|
||||
|
|
@ -158,10 +230,10 @@ loop do
|
|||
end
|
||||
|
||||
if (
|
||||
(vsphere_helper.find_vms(vm)[vm]) and
|
||||
(vsphere_helper.find_vms(vm)[vm].summary.guest.toolsRunningStatus == 'guestToolsRunning') and
|
||||
(vsphere_helper.find_vms(vm)[vm].summary.guest.hostName == vm) and
|
||||
(vsphere_helper.find_vms(vm)[vm].summary.guest.ipAddress != nil)
|
||||
($vsphere_helper.find_vms(vm)[vm]) and
|
||||
($vsphere_helper.find_vms(vm)[vm].summary.guest.toolsRunningStatus == 'guestToolsRunning') and
|
||||
($vsphere_helper.find_vms(vm)[vm].summary.guest.hostName == vm) and
|
||||
($vsphere_helper.find_vms(vm)[vm].summary.guest.ipAddress != nil)
|
||||
)
|
||||
begin
|
||||
Socket.getaddrinfo(vm, nil)
|
||||
|
|
@ -172,94 +244,37 @@ loop do
|
|||
$redis.sadd('vmware_host_pool__ready__'+pool['name'], vm)
|
||||
$redis.srem('vmware_host_pool__failed__'+pool['name'], vm)
|
||||
|
||||
logger.log('s', "[>] '#{vm}' moved to 'ready' queue")
|
||||
$logger.log('s', "[>] '#{vm}' moved to 'ready' queue")
|
||||
else
|
||||
if (
|
||||
(vsphere_helper.find_vms(vm)[vm]) and
|
||||
(vsphere_helper.find_vms(vm)[vm].runtime)
|
||||
($vsphere_helper.find_vms(vm)[vm]) and
|
||||
($vsphere_helper.find_vms(vm)[vm].runtime)
|
||||
)
|
||||
start = Time.now
|
||||
|
||||
if vsphere_helper.find_vms(vm)[vm].runtime.powerState == 'poweredOn'
|
||||
logger.log('d', "[ ] '#{vm}' is being shut down")
|
||||
vsphere_helper.find_vms(vm)[vm].PowerOffVM_Task.wait_for_completion
|
||||
if $vsphere_helper.find_vms(vm)[vm].runtime.powerState == 'poweredOn'
|
||||
$logger.log('d', "[ ] '#{vm}' is being shut down")
|
||||
$vsphere_helper.find_vms(vm)[vm].PowerOffVM_Task.wait_for_completion
|
||||
end
|
||||
|
||||
vsphere_helper.find_vms(vm)[vm].Destroy_Task.wait_for_completion
|
||||
$vsphere_helper.find_vms(vm)[vm].Destroy_Task.wait_for_completion
|
||||
finish = '%.2f' % (Time.now-start)
|
||||
|
||||
logger.log('s', "[-] '#{vm}' destroyed in #{finish} seconds")
|
||||
$logger.log('s', "[-] '#{vm}' destroyed in #{finish} seconds")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Bring the pool up to the desired size
|
||||
if total < pool['size']
|
||||
|
||||
# Provision VMs
|
||||
(1..(pool['size']-total)).each { |i|
|
||||
vm = {}
|
||||
|
||||
if pool['template'] =~ /\//
|
||||
templatefolders = pool['template'].split('/')
|
||||
vm['template'] = templatefolders.pop
|
||||
end
|
||||
|
||||
if templatefolders
|
||||
vm[vm['template']] = vsphere_helper.find_folder(templatefolders.join('/')).find(vm['template'])
|
||||
else
|
||||
raise "Please provide a full path to the template"
|
||||
end
|
||||
|
||||
if vm['template'].length == 0
|
||||
raise "Unable to find template '#{vm['template']}'!"
|
||||
end
|
||||
|
||||
# Generate a randomized hostname
|
||||
o = [('a'..'z'),('0'..'9')].map{|r| r.to_a}.flatten
|
||||
vm['hostname'] = o[rand(25)]+(0...14).map{o[rand(o.length)]}.join
|
||||
|
||||
# Annotate with creation time, origin template, etc.
|
||||
configSpec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
||||
:annotation =>
|
||||
'Base template: ' + vm['template'] + "\n" +
|
||||
'Creation time: ' + Time.now.strftime("%Y-%m-%d %H:%M")
|
||||
clone_vm(
|
||||
pool['template'],
|
||||
pool['pool'],
|
||||
pool['folder'],
|
||||
pool['datastore']
|
||||
)
|
||||
|
||||
# Put the VM in the specified folder and resource pool
|
||||
relocateSpec = RbVmomi::VIM.VirtualMachineRelocateSpec(
|
||||
:datastore => vsphere_helper.find_datastore(pool['datastore']),
|
||||
:pool => vsphere_helper.find_pool(pool['pool']),
|
||||
:diskMoveType => :moveChildMostDiskBacking
|
||||
)
|
||||
|
||||
# Create a clone spec
|
||||
spec = RbVmomi::VIM.VirtualMachineCloneSpec(
|
||||
:location => relocateSpec,
|
||||
:config => configSpec,
|
||||
:powerOn => true,
|
||||
:template => false
|
||||
)
|
||||
|
||||
# Clone the VM
|
||||
logger.log('d', "[ ] '#{vm['hostname']}' is being cloned from '#{vm['template']}'")
|
||||
|
||||
start = Time.now
|
||||
vm[vm['template']].CloneVM_Task(
|
||||
:folder => vsphere_helper.find_folder(pool['folder']),
|
||||
:name => vm['hostname'],
|
||||
:spec => spec
|
||||
).wait_for_completion
|
||||
finish = '%.2f' % (Time.now-start)
|
||||
|
||||
# Add VM to Redis inventory ('pending' pool)
|
||||
$redis.sadd('vmware_host_pool__pending__'+pool['name'], vm['hostname'])
|
||||
|
||||
logger.log('s', "[+] '#{vm['hostname']}' cloned from '#{vm['template']}' in #{finish} seconds")
|
||||
|
||||
# Metrics
|
||||
$redis.lpush('vmware_host_pool_metrics__deploy', finish)
|
||||
$redis.ltrim('vmware_host_pool_metrics__deploy', 0, 100)
|
||||
}
|
||||
end
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue