WIP (POOLER-70) Refactor out VSphere to separate class

WIP
This commit is contained in:
Glenn Sarti 2017-01-26 15:25:06 -08:00
parent 626195685f
commit 05f781ab69
6 changed files with 1443 additions and 653 deletions

View file

@ -12,7 +12,7 @@ module Vmpooler
require 'yaml'
require 'set'
%w( api graphite logger pool_manager vsphere_helper statsd dummy_statsd ).each do |lib|
%w( api graphite logger pool_manager vsphere_helper statsd dummy_statsd backingservice ).each do |lib|
begin
require "vmpooler/#{lib}"
rescue LoadError

View file

@ -0,0 +1,8 @@
# TODO remove dummy for commit history
%w( base vsphere dummy ).each do |lib|
begin
require "vmpooler/backingservice/#{lib}"
rescue LoadError
require File.expand_path(File.join(File.dirname(__FILE__), 'backingservice', lib))
end
end

View file

@ -0,0 +1,102 @@
module Vmpooler
class PoolManager
class BackingService
class Base
# These defs must be overidden in child classes
def initialize(options)
end
#def validate_config(config)
# false
#end
# inputs
# pool : hashtable from config file
# returns
# hashtable
# name : name of the device
def vms_in_pool(pool)
fail "#{self.class.name} does not implement vms_in_pool"
end
# inputs
# vm_name: string
# returns
# [String] hostname = Name of the host computer running the vm. If this is not a Virtual Machine, it returns the vm_name
def get_vm_host(vm_name)
fail "#{self.class.name} does not implement get_vm_host"
end
# inputs
# vm_name: string
# returns
# [String] hostname = Name of the most appropriate host computer to run this VM. Useful for load balancing VMs in a cluster
# If this is not a Virtual Machine, it returns the vm_name
def find_least_used_compatible_host(vm_name)
fail "#{self.class.name} does not implement find_least_used_compatible_host"
end
# inputs
# vm_name: string
# dest_host_name: string (Name of the host to migrate `vm_name` to)
# returns
# [Boolean] Returns true on success or false on failure
def migrate_vm_to_host(vm_name, dest_host_name)
fail "#{self.class.name} does not implement migrate_vm_to_host"
end
# inputs
# vm_name: string
# returns
# nil if it doesn't exist
# Hastable of the VM
# [String] hostname = Name reported by Vmware tools (host.summary.guest.hostName)
# [String] template = This is the name of template exposed by the API. It must _match_ the poolname
# [String] poolname = Name of the pool the VM is located
# [Time] boottime = Time when the VM was created/booted
# [String] powerstate = Current power state of a VM. Valid values (as per vCenter API)
# - 'PoweredOn','PoweredOff'
def get_vm(vm_name)
fail "#{self.class.name} does not implement get_vm"
end
# inputs
# pool: string
# returns
# vm name: string
def create_vm(pool)
fail "#{self.class.name} does not implement create_vm"
end
# inputs
# vm_name: string
# pool: string
# returns
# boolean : true if success, false on error
def destroy_vm(vm_name,pool)
fail "#{self.class.name} does not implement destroy_vm"
end
# inputs
# vm : string
# pool: string
# timeout: int (Seconds)
# returns
# result: boolean
def is_vm_ready?(vm,pool,timeout)
fail "#{self.class.name} does not implement is_vm_ready?"
end
# inputs
# vm : string
# returns
# result: boolean
def vm_exists?(vm)
fail "#{self.class.name} does not implement vm_exists?"
end
end
end
end
end

View file

@ -0,0 +1,622 @@
require 'rubygems' unless defined?(Gem)
module Vmpooler
class PoolManager
class BackingService
class Vsphere < Vmpooler::PoolManager::BackingService::Base
#--------------- Public methods
def initialize(options)
$credentials = options['credentials']
$metrics = options['metrics']
end
def devices_in_pool(pool)
base = find_folder(pool['folder'])
base.childEntity.each do |vm|
vm
end
end
# def destroy_vm()
# # Destroy a VM
# def _destroy_vm(vm, pool, vsphere)
# $redis.srem('vmpooler__completed__' + pool, vm)
# $redis.hdel('vmpooler__active__' + pool, vm)
# $redis.hset('vmpooler__vm__' + vm, 'destroy', Time.now)
# # Auto-expire metadata key
# $redis.expire('vmpooler__vm__' + vm, ($config[:redis]['data_ttl'].to_i * 60 * 60))
# # TODO This is all vSphere specific
# host = vsphere.find_vm(vm)
# if host
# start = Time.now
# if
# (host.runtime) &&
# (host.runtime.powerState) &&
# (host.runtime.powerState == 'poweredOn')
# $logger.log('d', "[ ] [#{pool}] '#{vm}' is being shut down")
# host.PowerOffVM_Task.wait_for_completion
# end
# host.Destroy_Task.wait_for_completion
# finish = '%.2f' % (Time.now - start)
# $logger.log('s', "[-] [#{pool}] '#{vm}' destroyed in #{finish} seconds")
# $metrics.timing("destroy.#{pool}", finish)
# end
# end
# end
def create_device(pool)
'12345'
# clone_vm(
# pool['template'],
# pool['folder'],
# pool['datastore'],
# pool['clone_target'],
# vsphere
# )
# Thread.new do
# begin
# vm = {}
# if template =~ /\//
# templatefolders = template.split('/')
# vm['template'] = templatefolders.pop
# end
# if templatefolders
# vm[vm['template']] = vsphere.find_folder(templatefolders.join('/')).find(vm['template'])
# else
# fail 'Please provide a full path to the template'
# end
# if vm['template'].length == 0
# fail "Unable to find template '#{vm['template']}'!"
# end
# # Generate a randomized hostname
# o = [('a'..'z'), ('0'..'9')].map(&:to_a).flatten
# vm['hostname'] = $config[:config]['prefix'] + o[rand(25)] + (0...14).map { o[rand(o.length)] }.join
# # Add VM to Redis inventory ('pending' pool)
# $redis.sadd('vmpooler__pending__' + vm['template'], vm['hostname'])
# $redis.hset('vmpooler__vm__' + vm['hostname'], 'clone', Time.now)
# $redis.hset('vmpooler__vm__' + vm['hostname'], 'template', vm['template'])
# # Annotate with creation time, origin template, etc.
# # Add extraconfig options that can be queried by vmtools
# configSpec = RbVmomi::VIM.VirtualMachineConfigSpec(
# annotation: JSON.pretty_generate(
# name: vm['hostname'],
# created_by: $config[:vsphere]['username'],
# base_template: vm['template'],
# creation_timestamp: Time.now.utc
# ),
# extraConfig: [
# { key: 'guestinfo.hostname',
# value: vm['hostname']
# }
# ]
# )
# # Choose a clone target
# if target
# $clone_target = vsphere.find_least_used_host(target)
# elsif $config[:config]['clone_target']
# $clone_target = vsphere.find_least_used_host($config[:config]['clone_target'])
# end
# # Put the VM in the specified folder and resource pool
# relocateSpec = RbVmomi::VIM.VirtualMachineRelocateSpec(
# datastore: vsphere.find_datastore(datastore),
# host: $clone_target,
# diskMoveType: :moveChildMostDiskBacking
# )
# # Create a clone spec
# spec = RbVmomi::VIM.VirtualMachineCloneSpec(
# location: relocateSpec,
# config: configSpec,
# powerOn: true,
# template: false
# )
# # Clone the VM
# $logger.log('d', "[ ] [#{vm['template']}] '#{vm['hostname']}' is being cloned from '#{vm['template']}'")
# begin
# start = Time.now
# vm[vm['template']].CloneVM_Task(
# folder: vsphere.find_folder(folder),
# name: vm['hostname'],
# spec: spec
# ).wait_for_completion
# finish = '%.2f' % (Time.now - start)
# $redis.hset('vmpooler__clone__' + Date.today.to_s, vm['template'] + ':' + vm['hostname'], finish)
# $redis.hset('vmpooler__vm__' + vm['hostname'], 'clone_time', finish)
# $logger.log('s', "[+] [#{vm['template']}] '#{vm['hostname']}' cloned from '#{vm['template']}' in #{finish} seconds")
# rescue => err
# $logger.log('s', "[!] [#{vm['template']}] '#{vm['hostname']}' clone failed with an error: #{err}")
# $redis.srem('vmpooler__pending__' + vm['template'], vm['hostname'])
# raise
# end
# $redis.decr('vmpooler__tasks__clone')
# $metrics.timing("clone.#{vm['template']}", finish)
# rescue => err
# $logger.log('s', "[!] [#{vm['template']}] '#{vm['hostname']}' failed while preparing to clone with an error: #{err}")
# raise
# end
# end
end
#**** When getting the VM details
# if (host.summary) &&
# (host.summary.guest) &&
# (host.summary.guest.hostName) &&
# (host.summary.guest.hostName == vm)
#
def is_vm_ready?(vm,pool,timeout)
fail "!!!!"
# def open_socket(host, domain=nil, timeout=5, port=22, &block)
# Timeout.timeout(timeout) do
# target_host = host
# target_host = "#{host}.#{domain}" if domain
# sock = TCPSocket.new target_host, port
# begin
# yield sock if block_given?
# ensure
# sock.close
# end
# end
# end
# def _check_pending_vm(vm, pool, timeout, vsphere)
# host = vsphere.find_vm(vm)
# if ! host
# fail_pending_vm(vm, pool, timeout, false)
# return
# end
# open_socket vm
# move_pending_vm_to_ready(vm, pool, host)
# rescue
# fail_pending_vm(vm, pool, timeout)
# end
end
#--------------- Private methods
private
ADAPTER_TYPE = 'lsiLogic'
DISK_TYPE = 'thin'
DISK_MODE = 'persistent'
def ensure_connected(connection, credentials)
connection.serviceInstance.CurrentTime
rescue
$metrics.increment("connect.open")
connect_to_vsphere $credentials
end
def connect_to_vsphere(credentials)
@connection = RbVmomi::VIM.connect host: credentials['server'],
user: credentials['username'],
password: credentials['password'],
insecure: credentials['insecure'] || true
end
def add_disk(vm, size, datastore)
ensure_connected @connection, $credentials
return false unless size.to_i > 0
vmdk_datastore = find_datastore(datastore)
vmdk_file_name = "#{vm['name']}/#{vm['name']}_#{find_vmdks(vm['name'], datastore).length + 1}.vmdk"
controller = find_disk_controller(vm)
vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec(
capacityKb: size.to_i * 1024 * 1024,
adapterType: ADAPTER_TYPE,
diskType: DISK_TYPE
)
vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
datastore: vmdk_datastore,
diskMode: DISK_MODE,
fileName: "[#{vmdk_datastore.name}] #{vmdk_file_name}"
)
device = RbVmomi::VIM::VirtualDisk(
backing: vmdk_backing,
capacityInKB: size.to_i * 1024 * 1024,
controllerKey: controller.key,
key: -1,
unitNumber: find_disk_unit_number(vm, controller)
)
device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
device: device,
operation: RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add')
)
vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
deviceChange: [device_config_spec]
)
@connection.serviceContent.virtualDiskManager.CreateVirtualDisk_Task(
datacenter: @connection.serviceInstance.find_datacenter,
name: "[#{vmdk_datastore.name}] #{vmdk_file_name}",
spec: vmdk_spec
).wait_for_completion
vm.ReconfigVM_Task(spec: vm_config_spec).wait_for_completion
true
end
def find_datastore(datastorename)
ensure_connected @connection, $credentials
datacenter = @connection.serviceInstance.find_datacenter
datacenter.find_datastore(datastorename)
end
def find_device(vm, deviceName)
ensure_connected @connection, $credentials
vm.config.hardware.device.each do |device|
return device if device.deviceInfo.label == deviceName
end
nil
end
def find_disk_controller(vm)
ensure_connected @connection, $credentials
devices = find_disk_devices(vm)
devices.keys.sort.each do |device|
if devices[device]['children'].length < 15
return find_device(vm, devices[device]['device'].deviceInfo.label)
end
end
nil
end
def find_disk_devices(vm)
ensure_connected @connection, $credentials
devices = {}
vm.config.hardware.device.each do |device|
if device.is_a? RbVmomi::VIM::VirtualSCSIController
if devices[device.controllerKey].nil?
devices[device.key] = {}
devices[device.key]['children'] = []
end
devices[device.key]['device'] = device
end
if device.is_a? RbVmomi::VIM::VirtualDisk
if devices[device.controllerKey].nil?
devices[device.controllerKey] = {}
devices[device.controllerKey]['children'] = []
end
devices[device.controllerKey]['children'].push(device)
end
end
devices
end
def find_disk_unit_number(vm, controller)
ensure_connected @connection, $credentials
used_unit_numbers = []
available_unit_numbers = []
devices = find_disk_devices(vm)
devices.keys.sort.each do |c|
next unless controller.key == devices[c]['device'].key
used_unit_numbers.push(devices[c]['device'].scsiCtlrUnitNumber)
devices[c]['children'].each do |disk|
used_unit_numbers.push(disk.unitNumber)
end
end
(0..15).each do |scsi_id|
if used_unit_numbers.grep(scsi_id).length <= 0
available_unit_numbers.push(scsi_id)
end
end
available_unit_numbers.sort[0]
end
def find_folder(foldername)
ensure_connected @connection, $credentials
datacenter = @connection.serviceInstance.find_datacenter
base = datacenter.vmFolder
folders = foldername.split('/')
folders.each do |folder|
case base
when RbVmomi::VIM::Folder
base = base.childEntity.find { |f| f.name == folder }
else
abort "Unexpected object type encountered (#{base.class}) while finding folder"
end
end
base
end
# Returns an array containing cumulative CPU and memory utilization of a host, and its object reference
# Params:
# +model+:: CPU arch version to match on
# +limit+:: Hard limit for CPU or memory utilization beyond which a host is excluded for deployments
def get_host_utilization(host, model=nil, limit=90)
if model
return nil unless host_has_cpu_model? host, model
end
return nil if host.runtime.inMaintenanceMode
return nil unless host.overallStatus == 'green'
cpu_utilization = cpu_utilization_for host
memory_utilization = memory_utilization_for host
return nil if cpu_utilization > limit
return nil if memory_utilization > limit
[ cpu_utilization + memory_utilization, host ]
end
def host_has_cpu_model?(host, model)
get_host_cpu_arch_version(host) == model
end
def get_host_cpu_arch_version(host)
cpu_model = host.hardware.cpuPkg[0].description
cpu_model_parts = cpu_model.split()
arch_version = cpu_model_parts[4]
arch_version
end
def cpu_utilization_for(host)
cpu_usage = host.summary.quickStats.overallCpuUsage
cpu_size = host.summary.hardware.cpuMhz * host.summary.hardware.numCpuCores
(cpu_usage.to_f / cpu_size.to_f) * 100
end
def memory_utilization_for(host)
memory_usage = host.summary.quickStats.overallMemoryUsage
memory_size = host.summary.hardware.memorySize / 1024 / 1024
(memory_usage.to_f / memory_size.to_f) * 100
end
def find_least_used_host(cluster)
ensure_connected @connection, $credentials
cluster_object = find_cluster(cluster)
target_hosts = get_cluster_host_utilization(cluster_object)
least_used_host = target_hosts.sort[0][1]
least_used_host
end
def find_cluster(cluster)
datacenter = @connection.serviceInstance.find_datacenter
datacenter.hostFolder.children.find { |cluster_object| cluster_object.name == cluster }
end
def get_cluster_host_utilization(cluster)
cluster_hosts = []
cluster.host.each do |host|
host_usage = get_host_utilization(host)
cluster_hosts << host_usage if host_usage
end
cluster_hosts
end
def find_least_used_compatible_host(vm)
ensure_connected @connection, $credentials
source_host = vm.summary.runtime.host
model = get_host_cpu_arch_version(source_host)
cluster = source_host.parent
target_hosts = []
cluster.host.each do |host|
host_usage = get_host_utilization(host, model)
target_hosts << host_usage if host_usage
end
target_host = target_hosts.sort[0][1]
[target_host, target_host.name]
end
def find_pool(poolname)
ensure_connected @connection, $credentials
datacenter = @connection.serviceInstance.find_datacenter
base = datacenter.hostFolder
pools = poolname.split('/')
pools.each do |pool|
case base
when RbVmomi::VIM::Folder
base = base.childEntity.find { |f| f.name == pool }
when RbVmomi::VIM::ClusterComputeResource
base = base.resourcePool.resourcePool.find { |f| f.name == pool }
when RbVmomi::VIM::ResourcePool
base = base.resourcePool.find { |f| f.name == pool }
else
abort "Unexpected object type encountered (#{base.class}) while finding resource pool"
end
end
base = base.resourcePool unless base.is_a?(RbVmomi::VIM::ResourcePool) && base.respond_to?(:resourcePool)
base
end
def find_snapshot(vm, snapshotname)
if vm.snapshot
get_snapshot_list(vm.snapshot.rootSnapshotList, snapshotname)
end
end
def find_vm(vmname)
ensure_connected @connection, $credentials
find_vm_light(vmname) || find_vm_heavy(vmname)[vmname]
end
def find_vm_light(vmname)
ensure_connected @connection, $credentials
@connection.searchIndex.FindByDnsName(vmSearch: true, dnsName: vmname)
end
def find_vm_heavy(vmname)
ensure_connected @connection, $credentials
vmname = vmname.is_a?(Array) ? vmname : [vmname]
containerView = get_base_vm_container_from @connection
propertyCollector = @connection.propertyCollector
objectSet = [{
obj: containerView,
skip: true,
selectSet: [RbVmomi::VIM::TraversalSpec.new(
name: 'gettingTheVMs',
path: 'view',
skip: false,
type: 'ContainerView'
)]
}]
propSet = [{
pathSet: ['name'],
type: 'VirtualMachine'
}]
results = propertyCollector.RetrievePropertiesEx(
specSet: [{
objectSet: objectSet,
propSet: propSet
}],
options: { maxObjects: nil }
)
vms = {}
results.objects.each do |result|
name = result.propSet.first.val
next unless vmname.include? name
vms[name] = result.obj
end
while results.token
results = propertyCollector.ContinueRetrievePropertiesEx(token: results.token)
results.objects.each do |result|
name = result.propSet.first.val
next unless vmname.include? name
vms[name] = result.obj
end
end
vms
end
def find_vmdks(vmname, datastore)
ensure_connected @connection, $credentials
disks = []
vmdk_datastore = find_datastore(datastore)
vm_files = vmdk_datastore._connection.serviceContent.propertyCollector.collectMultiple vmdk_datastore.vm, 'layoutEx.file'
vm_files.keys.each do |f|
vm_files[f]['layoutEx.file'].each do |l|
if l.name.match(/^\[#{vmdk_datastore.name}\] #{vmname}\/#{vmname}_([0-9]+).vmdk/)
disks.push(l)
end
end
end
disks
end
def get_base_vm_container_from(connection)
ensure_connected @connection, $credentials
viewManager = connection.serviceContent.viewManager
viewManager.CreateContainerView(
container: connection.serviceContent.rootFolder,
recursive: true,
type: ['VirtualMachine']
)
end
def get_snapshot_list(tree, snapshotname)
snapshot = nil
tree.each do |child|
if child.name == snapshotname
snapshot ||= child.snapshot
else
snapshot ||= get_snapshot_list(child.childSnapshotList, snapshotname)
end
end
snapshot
end
def migrate_vm_host(vm, host)
relospec = RbVmomi::VIM.VirtualMachineRelocateSpec(host: host)
vm.RelocateVM_Task(spec: relospec).wait_for_completion
end
def close
@connection.close
end
end
end
end
end
module Vmpooler
class VsphereHelper
end
end

View file

@ -12,51 +12,76 @@ module Vmpooler
# Connect to Redis
$redis = redis
# vSphere object
$vsphere = {}
# per pool VM Backing Services
$backing_services = {}
# Our thread-tracker object
$threads = {}
# WARNING DEBUG
$logger.log('d',"Flushing REDIS WARNING!!!")
$redis.flushdb
end
# Check the state of a VM
def check_pending_vm(vm, pool, timeout, vsphere)
# DONE
def check_pending_vm(vm, pool, timeout, backingservice)
Thread.new do
_check_pending_vm(vm, pool, timeout, vsphere)
_check_pending_vm(vm, pool, timeout, backingservice)
end
end
def open_socket(host, domain=nil, timeout=5, port=22, &block)
Timeout.timeout(timeout) do
target_host = host
target_host = "#{host}.#{domain}" if domain
sock = TCPSocket.new target_host, port
begin
yield sock if block_given?
ensure
sock.close
end
end
end
def _check_pending_vm(vm, pool, timeout, vsphere)
host = vsphere.find_vm(vm)
# DONE
def _check_pending_vm(vm, pool, timeout, backingservice)
host = backingservice.get_vm(vm)
if ! host
fail_pending_vm(vm, pool, timeout, false)
return
end
open_socket vm
if backingservice.is_vm_ready?(vm,pool,timeout)
move_pending_vm_to_ready(vm, pool, host)
rescue
else
fail "VM is not ready"
end
rescue => err
$logger.log('s', "[!] [#{pool}] '#{vm}' errored while checking a pending vm : #{err}")
fail_pending_vm(vm, pool, timeout)
raise
end
# def open_socket(host, domain=nil, timeout=5, port=22, &block)
# Timeout.timeout(timeout) do
# target_host = host
# target_host = "#{host}.#{domain}" if domain
# sock = TCPSocket.new target_host, port
# begin
# yield sock if block_given?
# ensure
# sock.close
# end
# end
# end
# def _check_pending_vm(vm, pool, timeout, vsphere)
# host = vsphere.find_vm(vm)
# if ! host
# fail_pending_vm(vm, pool, timeout, false)
# return
# end
# open_socket vm
# move_pending_vm_to_ready(vm, pool, host)
# rescue
# fail_pending_vm(vm, pool, timeout)
# end
# DONE
def remove_nonexistent_vm(vm, pool)
$redis.srem("vmpooler__pending__#{pool}", vm)
$logger.log('d', "[!] [#{pool}] '#{vm}' no longer exists. Removing from pending.")
end
# DONE
def fail_pending_vm(vm, pool, timeout, exists=true)
clone_stamp = $redis.hget("vmpooler__vm__#{vm}", 'clone')
return if ! clone_stamp
@ -74,14 +99,11 @@ module Vmpooler
$logger.log('d', "Fail pending VM failed with an error: #{err}")
end
# DONE
def move_pending_vm_to_ready(vm, pool, host)
if (host.summary) &&
(host.summary.guest) &&
(host.summary.guest.hostName) &&
(host.summary.guest.hostName == vm)
if host['hostname'] == vm
begin
Socket.getaddrinfo(vm, nil) # WTF?
Socket.getaddrinfo(vm, nil) # WTF? I assume this is just priming the local DNS resolver cache?!?!
rescue
end
@ -91,58 +113,60 @@ module Vmpooler
$redis.smove('vmpooler__pending__' + pool, 'vmpooler__ready__' + pool, vm)
$redis.hset('vmpooler__boot__' + Date.today.to_s, pool + ':' + vm, finish)
$logger.log('s', "[>] [#{pool}] '#{vm}' moved to 'ready' queue")
$logger.log('s', "[>] [#{pool}] '#{vm}' moved from 'pending' to 'ready' queue")
end
end
def check_ready_vm(vm, pool, ttl, vsphere)
# DONE
def check_ready_vm(vm, pool, ttl, backingservice)
Thread.new do
_check_ready_vm(vm, pool, ttl, backingservice)
end
end
# DONE
def _check_ready_vm(vm, pool, ttl, backingservice)
host = backingservice.get_vm(vm)
# Check if the host even exists
if !host
$redis.srem('vmpooler__ready__' + pool, vm)
$logger.log('s', "[!] [#{pool}] '#{vm}' not found in inventory for pool #{pool}, removed from 'ready' queue")
return
end
# Check if the hosts TTL has expired
if ttl > 0
if (((Time.now - host.runtime.bootTime) / 60).to_s[/^\d+\.\d{1}/].to_f) > ttl
if (((Time.now - host['boottime']) / 60).to_s[/^\d+\.\d{1}/].to_f) > ttl
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
$logger.log('d', "[!] [#{pool}] '#{vm}' reached end of TTL after #{ttl} minutes, removed from 'ready' queue")
end
end
# Periodically check that the VM is available
check_stamp = $redis.hget('vmpooler__vm__' + vm, 'check')
if
(!check_stamp) ||
(((Time.now - Time.parse(check_stamp)) / 60) > $config[:config]['vm_checktime'])
$redis.hset('vmpooler__vm__' + vm, 'check', Time.now)
host = vsphere.find_vm(vm)
if host
# Check if the VM is not powered on
if
(host.runtime) &&
(host.runtime.powerState) &&
(host.runtime.powerState != 'poweredOn')
(host['powerstate'] != 'PoweredOn')
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
$logger.log('d', "[!] [#{pool}] '#{vm}' appears to be powered off, removed from 'ready' queue")
end
if
(host.summary.guest) &&
(host.summary.guest.hostName) &&
(host.summary.guest.hostName != vm)
# Check if the hostname has magically changed from underneath Pooler
if (host['hostname'] != vm)
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
$logger.log('d', "[!] [#{pool}] '#{vm}' has mismatched hostname, removed from 'ready' queue")
end
else
$redis.srem('vmpooler__ready__' + pool, vm)
$logger.log('s', "[!] [#{pool}] '#{vm}' not found in vCenter inventory, removed from 'ready' queue")
end
# Check if the VM is still ready/available
begin
open_socket vm
fail "VM #{vm} is not ready" unless backingservice.is_vm_ready?(vm,pool,5)
rescue
if $redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
$logger.log('d', "[!] [#{pool}] '#{vm}' is unreachable, removed from 'ready' queue")
@ -151,17 +175,21 @@ module Vmpooler
end
end
end
end
rescue => err
$logger.log('s', "[!] [#{vm['poolname']}] '#{vm['hostname']}' failed while checking a ready vm : #{err}")
raise
end
def check_running_vm(vm, pool, ttl, vsphere)
# DONE
def check_running_vm(vm, pool, ttl, backingservice)
Thread.new do
_check_running_vm(vm, pool, ttl, vsphere)
_check_running_vm(vm, pool, ttl, backingservice)
end
end
def _check_running_vm(vm, pool, ttl, vsphere)
host = vsphere.find_vm(vm)
# DONE
def _check_running_vm(vm, pool, ttl, backingservice)
host = backingservice.get_vm(vm)
if host
queue_from, queue_to = 'running', 'completed'
@ -179,114 +207,131 @@ module Vmpooler
end
end
# DONE
def move_vm_queue(pool, vm, queue_from, queue_to, msg)
$redis.smove("vmpooler__#{queue_from}__#{pool}", "vmpooler__#{queue_to}__#{pool}", vm)
$logger.log('d', "[!] [#{pool}] '#{vm}' #{msg}")
end
# Clone a VM
def clone_vm(template, folder, datastore, target, vsphere)
# DONE
def clone_vm(pool, backingservice)
Thread.new do
begin
vm = {}
if template =~ /\//
templatefolders = template.split('/')
vm['template'] = templatefolders.pop
backingservice.create_vm(pool)
end
end
if templatefolders
vm[vm['template']] = vsphere.find_folder(templatefolders.join('/')).find(vm['template'])
else
fail 'Please provide a full path to the template'
end
# Clone a VM
# def clone_vm(template, folder, datastore, target, vsphere)
# Thread.new do
# begin
# vm = {}
if vm['template'].length == 0
fail "Unable to find template '#{vm['template']}'!"
end
# if template =~ /\//
# templatefolders = template.split('/')
# vm['template'] = templatefolders.pop
# end
# Generate a randomized hostname
o = [('a'..'z'), ('0'..'9')].map(&:to_a).flatten
vm['hostname'] = $config[:config]['prefix'] + o[rand(25)] + (0...14).map { o[rand(o.length)] }.join
# if templatefolders
# vm[vm['template']] = vsphere.find_folder(templatefolders.join('/')).find(vm['template'])
# else
# fail 'Please provide a full path to the template'
# end
# Add VM to Redis inventory ('pending' pool)
$redis.sadd('vmpooler__pending__' + vm['template'], vm['hostname'])
$redis.hset('vmpooler__vm__' + vm['hostname'], 'clone', Time.now)
$redis.hset('vmpooler__vm__' + vm['hostname'], 'template', vm['template'])
# if vm['template'].length == 0
# fail "Unable to find template '#{vm['template']}'!"
# end
# Annotate with creation time, origin template, etc.
# Add extraconfig options that can be queried by vmtools
configSpec = RbVmomi::VIM.VirtualMachineConfigSpec(
annotation: JSON.pretty_generate(
name: vm['hostname'],
created_by: $config[:vsphere]['username'],
base_template: vm['template'],
creation_timestamp: Time.now.utc
),
extraConfig: [
{ key: 'guestinfo.hostname',
value: vm['hostname']
}
]
)
# # Generate a randomized hostname
# o = [('a'..'z'), ('0'..'9')].map(&:to_a).flatten
# vm['hostname'] = $config[:config]['prefix'] + o[rand(25)] + (0...14).map { o[rand(o.length)] }.join
# Choose a clone target
if target
$clone_target = vsphere.find_least_used_host(target)
elsif $config[:config]['clone_target']
$clone_target = vsphere.find_least_used_host($config[:config]['clone_target'])
end
# # Add VM to Redis inventory ('pending' pool)
# $redis.sadd('vmpooler__pending__' + vm['template'], vm['hostname'])
# $redis.hset('vmpooler__vm__' + vm['hostname'], 'clone', Time.now)
# $redis.hset('vmpooler__vm__' + vm['hostname'], 'template', vm['template'])
# Put the VM in the specified folder and resource pool
relocateSpec = RbVmomi::VIM.VirtualMachineRelocateSpec(
datastore: vsphere.find_datastore(datastore),
host: $clone_target,
diskMoveType: :moveChildMostDiskBacking
)
# # Annotate with creation time, origin template, etc.
# # Add extraconfig options that can be queried by vmtools
# configSpec = RbVmomi::VIM.VirtualMachineConfigSpec(
# annotation: JSON.pretty_generate(
# name: vm['hostname'],
# created_by: $config[:vsphere]['username'],
# base_template: vm['template'],
# creation_timestamp: Time.now.utc
# ),
# extraConfig: [
# { key: 'guestinfo.hostname',
# value: vm['hostname']
# }
# ]
# )
# Create a clone spec
spec = RbVmomi::VIM.VirtualMachineCloneSpec(
location: relocateSpec,
config: configSpec,
powerOn: true,
template: false
)
# # Choose a clone target
# if target
# $clone_target = vsphere.find_least_used_host(target)
# elsif $config[:config]['clone_target']
# $clone_target = vsphere.find_least_used_host($config[:config]['clone_target'])
# end
# Clone the VM
$logger.log('d', "[ ] [#{vm['template']}] '#{vm['hostname']}' is being cloned from '#{vm['template']}'")
# # Put the VM in the specified folder and resource pool
# relocateSpec = RbVmomi::VIM.VirtualMachineRelocateSpec(
# datastore: vsphere.find_datastore(datastore),
# host: $clone_target,
# diskMoveType: :moveChildMostDiskBacking
# )
begin
start = Time.now
vm[vm['template']].CloneVM_Task(
folder: vsphere.find_folder(folder),
name: vm['hostname'],
spec: spec
).wait_for_completion
finish = '%.2f' % (Time.now - start)
# # Create a clone spec
# spec = RbVmomi::VIM.VirtualMachineCloneSpec(
# location: relocateSpec,
# config: configSpec,
# powerOn: true,
# template: false
# )
$redis.hset('vmpooler__clone__' + Date.today.to_s, vm['template'] + ':' + vm['hostname'], finish)
$redis.hset('vmpooler__vm__' + vm['hostname'], 'clone_time', finish)
# # Clone the VM
# $logger.log('d', "[ ] [#{vm['template']}] '#{vm['hostname']}' is being cloned from '#{vm['template']}'")
$logger.log('s', "[+] [#{vm['template']}] '#{vm['hostname']}' cloned from '#{vm['template']}' in #{finish} seconds")
rescue => err
$logger.log('s', "[!] [#{vm['template']}] '#{vm['hostname']}' clone failed with an error: #{err}")
$redis.srem('vmpooler__pending__' + vm['template'], vm['hostname'])
raise
end
# begin
# start = Time.now
# vm[vm['template']].CloneVM_Task(
# folder: vsphere.find_folder(folder),
# name: vm['hostname'],
# spec: spec
# ).wait_for_completion
# finish = '%.2f' % (Time.now - start)
$redis.decr('vmpooler__tasks__clone')
# $redis.hset('vmpooler__clone__' + Date.today.to_s, vm['template'] + ':' + vm['hostname'], finish)
# $redis.hset('vmpooler__vm__' + vm['hostname'], 'clone_time', finish)
$metrics.timing("clone.#{vm['template']}", finish)
rescue => err
$logger.log('s', "[!] [#{vm['template']}] '#{vm['hostname']}' failed while preparing to clone with an error: #{err}")
raise
end
# $logger.log('s', "[+] [#{vm['template']}] '#{vm['hostname']}' cloned from '#{vm['template']}' in #{finish} seconds")
# rescue => err
# $logger.log('s', "[!] [#{vm['template']}] '#{vm['hostname']}' clone failed with an error: #{err}")
# $redis.srem('vmpooler__pending__' + vm['template'], vm['hostname'])
# raise
# end
# $redis.decr('vmpooler__tasks__clone')
# $metrics.timing("clone.#{vm['template']}", finish)
# rescue => err
# $logger.log('s', "[!] [#{vm['template']}] '#{vm['hostname']}' failed while preparing to clone with an error: #{err}")
# raise
# end
# end
# end
# Destroy a VM
# DONE
# TODO These calls should wrap the rescue block, not inside. This traps bad functions. Need to modify all functions
def destroy_vm(vm, pool, backingservice)
Thread.new do
_destroy_vm(vm, pool, backingservice)
end
end
# Destroy a VM
def destroy_vm(vm, pool, vsphere)
Thread.new do
# DONE
def _destroy_vm(vm, pool, backingservice)
$redis.srem('vmpooler__completed__' + pool, vm)
$redis.hdel('vmpooler__active__' + pool, vm)
$redis.hset('vmpooler__vm__' + vm, 'destroy', Time.now)
@ -294,36 +339,21 @@ module Vmpooler
# Auto-expire metadata key
$redis.expire('vmpooler__vm__' + vm, ($config[:redis]['data_ttl'].to_i * 60 * 60))
host = vsphere.find_vm(vm)
if host
start = Time.now
if
(host.runtime) &&
(host.runtime.powerState) &&
(host.runtime.powerState == 'poweredOn')
$logger.log('d', "[ ] [#{pool}] '#{vm}' is being shut down")
host.PowerOffVM_Task.wait_for_completion
end
host.Destroy_Task.wait_for_completion
finish = '%.2f' % (Time.now - start)
$logger.log('s', "[-] [#{pool}] '#{vm}' destroyed in #{finish} seconds")
$metrics.timing("destroy.#{pool}", finish)
end
end
backingservice.destroy_vm(vm,pool)
rescue => err
$logger.log('d', "[!] [#{pool}] '#{vm}' failed while destroying the VM with an error: #{err}")
raise
end
def create_vm_disk(vm, disk_size, vsphere)
# TODO This is all vSphere specific
Thread.new do
_create_vm_disk(vm, disk_size, vsphere)
end
end
def _create_vm_disk(vm, disk_size, vsphere)
# TODO This is all vSphere specific
host = vsphere.find_vm(vm)
if (host) && ((! disk_size.nil?) && (! disk_size.empty?) && (disk_size.to_i > 0))
@ -358,12 +388,14 @@ module Vmpooler
end
def create_vm_snapshot(vm, snapshot_name, vsphere)
# TODO This is all vSphere specific
Thread.new do
_create_vm_snapshot(vm, snapshot_name, vsphere)
end
end
def _create_vm_snapshot(vm, snapshot_name, vsphere)
# TODO This is all vSphere specific
host = vsphere.find_vm(vm)
if (host) && ((! snapshot_name.nil?) && (! snapshot_name.empty?))
@ -387,12 +419,14 @@ module Vmpooler
end
def revert_vm_snapshot(vm, snapshot_name, vsphere)
# TODO This is all vSphere specific
Thread.new do
_revert_vm_snapshot(vm, snapshot_name, vsphere)
end
end
def _revert_vm_snapshot(vm, snapshot_name, vsphere)
# TODO This is all vSphere specific
host = vsphere.find_vm(vm)
if host
@ -413,6 +447,7 @@ module Vmpooler
end
def check_disk_queue
# TODO This is all vSphere specific
$logger.log('d', "[*] [disk_manager] starting worker thread")
$vsphere['disk_manager'] ||= Vmpooler::VsphereHelper.new $config, $metrics
@ -426,6 +461,7 @@ module Vmpooler
end
def _check_disk_queue(vsphere)
# TODO This is all vSphere specific
vm = $redis.spop('vmpooler__tasks__disk')
unless vm.nil?
@ -439,6 +475,7 @@ module Vmpooler
end
def check_snapshot_queue
# TODO This is all vSphere specific
$logger.log('d', "[*] [snapshot_manager] starting worker thread")
$vsphere['snapshot_manager'] ||= Vmpooler::VsphereHelper.new $config, $metrics
@ -452,6 +489,7 @@ module Vmpooler
end
def _check_snapshot_queue(vsphere)
# TODO This is all vSphere specific
vm = $redis.spop('vmpooler__tasks__snapshot')
unless vm.nil?
@ -475,23 +513,26 @@ module Vmpooler
end
end
# DONE
def migration_limit(migration_limit)
# Returns migration_limit setting when enabled
return false if migration_limit == 0 || ! migration_limit
migration_limit if migration_limit >= 1
end
def migrate_vm(vm, pool, vsphere)
# DONE
def migrate_vm(vm, pool, backingservice)
Thread.new do
_migrate_vm(vm, pool, vsphere)
_migrate_vm(vm, pool, backingservice)
end
end
def _migrate_vm(vm, pool, vsphere)
# DONE
def _migrate_vm(vm, pool, backingservice)
begin
$redis.srem('vmpooler__migrating__' + pool, vm)
vm_object = vsphere.find_vm(vm)
parent_host, parent_host_name = get_vm_host_info(vm_object)
parent_host_name = backingservice.get_vm_host(vm)
migration_limit = migration_limit $config[:config]['migration_limit']
migration_count = $redis.scard('vmpooler__migration')
@ -504,11 +545,11 @@ module Vmpooler
return
else
$redis.sadd('vmpooler__migration', vm)
host, host_name = vsphere.find_least_used_compatible_host(vm_object)
if host == parent_host
host_name = backingservice.find_least_used_compatible_host(vm)
if host_name == parent_host_name
$logger.log('s', "[ ] [#{pool}] No migration required for '#{vm}' running on #{parent_host_name}")
else
finish = migrate_vm_and_record_timing(vm_object, vm, pool, host, parent_host_name, host_name, vsphere)
finish = migrate_vm_and_record_timing(vm, pool, parent_host_name, host_name, backingservice)
$logger.log('s', "[>] [#{pool}] '#{vm}' migrated from #{parent_host_name} to #{host_name} in #{finish} seconds")
end
remove_vmpooler_migration_vm(pool, vm)
@ -520,11 +561,13 @@ module Vmpooler
end
end
def get_vm_host_info(vm_object)
parent_host = vm_object.summary.runtime.host
[parent_host, parent_host.name]
end
# TODO This is all vSphere specific
# def get_vm_host_info(vm_object)
# parent_host = vm_object.summary.runtime.host
# [parent_host, parent_host.name]
# end
# DONE
def remove_vmpooler_migration_vm(pool, vm)
begin
$redis.srem('vmpooler__migration', vm)
@ -533,9 +576,10 @@ module Vmpooler
end
end
def migrate_vm_and_record_timing(vm_object, vm_name, pool, host, source_host_name, dest_host_name, vsphere)
# DONE
def migrate_vm_and_record_timing(vm_name, pool, source_host_name, dest_host_name, backingservice)
start = Time.now
vsphere.migrate_vm_host(vm_object, host)
backingservice.migrate_vm_to_host(vm_name, dest_host_name)
finish = '%.2f' % (Time.now - start)
$metrics.timing("migrate.#{pool}", finish)
$metrics.increment("migrate_from.#{source_host_name}")
@ -546,26 +590,35 @@ module Vmpooler
finish
end
# DONE
def check_pool(pool)
$logger.log('d', "[*] [#{pool['name']}] starting worker thread")
$vsphere[pool['name']] ||= Vmpooler::VsphereHelper.new $config, $metrics
case pool['backingservice']
when 'vsphere'
$backing_services[pool['name']] ||= Vmpooler::PoolManager::BackingService::Vsphere.new({ 'metrics' => $metrics}) # TODO Vmpooler::VsphereHelper.new $config[:vsphere], $metrics
when 'dummy'
$backing_services[pool['name']] ||= Vmpooler::PoolManager::BackingService::Dummy.new($config[:backingservice][:dummy])
else
$logger.log('s', "[!] backing service #{pool['backingservice']} is unknown for pool [#{pool['name']}]")
end
$threads[pool['name']] = Thread.new do
loop do
_check_pool(pool, $vsphere[pool['name']])
sleep(5)
_check_pool(pool, $backing_services[pool['name']])
# TODO Should this be configurable?
sleep(2) # Should be 5
end
end
end
def _check_pool(pool, vsphere)
def _check_pool(pool,backingservice)
puts "CHECK POOL STARTING"
# INVENTORY
# DONE!!
inventory = {}
begin
base = vsphere.find_folder(pool['folder'])
base.childEntity.each do |vm|
backingservice.vms_in_pool(pool).each do |vm|
if
(! $redis.sismember('vmpooler__running__' + pool['name'], vm['name'])) &&
(! $redis.sismember('vmpooler__ready__' + pool['name'], vm['name'])) &&
@ -586,11 +639,12 @@ module Vmpooler
end
# RUNNING
# DONE!!
$redis.smembers("vmpooler__running__#{pool['name']}").each do |vm|
if inventory[vm]
begin
vm_lifetime = $redis.hget('vmpooler__vm__' + vm, 'lifetime') || $config[:config]['vm_lifetime'] || 12
check_running_vm(vm, pool['name'], vm_lifetime, vsphere)
check_running_vm(vm, pool['name'], vm_lifetime, backingservice)
rescue => err
$logger.log('d', "[!] [#{pool['name']}] _check_pool with an error while evaluating running VMs: #{err}")
end
@ -598,10 +652,11 @@ module Vmpooler
end
# READY
# DONE!!
$redis.smembers("vmpooler__ready__#{pool['name']}").each do |vm|
if inventory[vm]
begin
check_ready_vm(vm, pool['name'], pool['ready_ttl'] || 0, vsphere)
check_ready_vm(vm, pool['name'], pool['ready_ttl'] || 0, backingservice)
rescue => err
$logger.log('d', "[!] [#{pool['name']}] _check_pool failed with an error while evaluating ready VMs: #{err}")
end
@ -609,11 +664,12 @@ module Vmpooler
end
# PENDING
# DONE!!
$redis.smembers("vmpooler__pending__#{pool['name']}").each do |vm|
pool_timeout = pool['timeout'] || $config[:config]['timeout'] || 15
if inventory[vm]
begin
check_pending_vm(vm, pool['name'], pool_timeout, vsphere)
check_pending_vm(vm, pool['name'], pool_timeout, backingservice)
rescue => err
$logger.log('d', "[!] [#{pool['name']}] _check_pool failed with an error while evaluating pending VMs: #{err}")
end
@ -623,10 +679,11 @@ module Vmpooler
end
# COMPLETED
# DONE!!
$redis.smembers("vmpooler__completed__#{pool['name']}").each do |vm|
if inventory[vm]
begin
destroy_vm(vm, pool['name'], vsphere)
destroy_vm(vm, pool['name'], backingservice)
rescue => err
$redis.srem("vmpooler__completed__#{pool['name']}", vm)
$redis.hdel("vmpooler__active__#{pool['name']}", vm)
@ -642,6 +699,7 @@ module Vmpooler
end
# DISCOVERED
# DONE
begin
$redis.smembers("vmpooler__discovered__#{pool['name']}").each do |vm|
%w(pending ready running completed).each do |queue|
@ -660,10 +718,11 @@ module Vmpooler
end
# MIGRATIONS
# DONE
$redis.smembers("vmpooler__migrating__#{pool['name']}").each do |vm|
if inventory[vm]
begin
migrate_vm(vm, pool['name'], vsphere)
migrate_vm(vm, pool['name'], backingservice)
rescue => err
$logger.log('s', "[x] [#{pool['name']}] '#{vm}' failed to migrate: #{err}")
end
@ -671,6 +730,7 @@ module Vmpooler
end
# REPOPULATE
# DONE
ready = $redis.scard("vmpooler__ready__#{pool['name']}")
total = $redis.scard("vmpooler__pending__#{pool['name']}") + ready
@ -693,14 +753,7 @@ module Vmpooler
if $redis.get('vmpooler__tasks__clone').to_i < $config[:config]['task_limit'].to_i
begin
$redis.incr('vmpooler__tasks__clone')
clone_vm(
pool['template'],
pool['folder'],
pool['datastore'],
pool['clone_target'],
vsphere
)
clone_vm(pool,backingservice)
rescue => err
$logger.log('s', "[!] [#{pool['name']}] clone failed during check_pool with an error: #{err}")
$redis.decr('vmpooler__tasks__clone')
@ -721,21 +774,26 @@ module Vmpooler
$redis.set('vmpooler__tasks__clone', 0)
# Clear out vmpooler__migrations since stale entries may be left after a restart
$redis.del('vmpooler__migration')
# Set default backingservice for all pools that do not have one defined
$config[:pools].each do |pool|
pool['backingservice'] = 'vsphere' if pool['backingservice'].nil?
end
loop do
if ! $threads['disk_manager']
check_disk_queue
elsif ! $threads['disk_manager'].alive?
$logger.log('d', "[!] [disk_manager] worker thread died, restarting")
check_disk_queue
end
# DEBUG TO DO
# if ! $threads['disk_manager']
# check_disk_queue
# elsif ! $threads['disk_manager'].alive?
# $logger.log('d', "[!] [disk_manager] worker thread died, restarting")
# check_disk_queue
# end
if ! $threads['snapshot_manager']
check_snapshot_queue
elsif ! $threads['snapshot_manager'].alive?
$logger.log('d', "[!] [snapshot_manager] worker thread died, restarting")
check_snapshot_queue
end
# if ! $threads['snapshot_manager']
# check_snapshot_queue
# elsif ! $threads['snapshot_manager'].alive?
# $logger.log('d', "[!] [snapshot_manager] worker thread died, restarting")
# check_snapshot_queue
# end
$config[:pools].each do |pool|
if ! $threads[pool['name']]

View file

@ -1,417 +1,417 @@
require 'rubygems' unless defined?(Gem)
module Vmpooler
class VsphereHelper
ADAPTER_TYPE = 'lsiLogic'
DISK_TYPE = 'thin'
DISK_MODE = 'persistent'
def initialize(config, metrics)
$credentials = config[:vsphere]
$conf = config[:config]
$metrics = metrics
end
def ensure_connected(connection, credentials)
connection.serviceInstance.CurrentTime
rescue
$metrics.increment("connect.open")
connect_to_vsphere $credentials
end
def connect_to_vsphere(credentials)
max_tries = $conf['max_tries'] || 3
retry_factor = $conf['retry_factor'] || 10
try = 1
begin
@connection = RbVmomi::VIM.connect host: credentials['server'],
user: credentials['username'],
password: credentials['password'],
insecure: credentials['insecure'] || true
$metrics.increment("connect.open")
rescue => err
try += 1
$metrics.increment("connect.fail")
raise err if try == max_tries
sleep(try * retry_factor)
retry
end
end
def add_disk(vm, size, datastore)
ensure_connected @connection, $credentials
return false unless size.to_i > 0
vmdk_datastore = find_datastore(datastore)
vmdk_file_name = "#{vm['name']}/#{vm['name']}_#{find_vmdks(vm['name'], datastore).length + 1}.vmdk"
controller = find_disk_controller(vm)
vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec(
capacityKb: size.to_i * 1024 * 1024,
adapterType: ADAPTER_TYPE,
diskType: DISK_TYPE
)
vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
datastore: vmdk_datastore,
diskMode: DISK_MODE,
fileName: "[#{vmdk_datastore.name}] #{vmdk_file_name}"
)
device = RbVmomi::VIM::VirtualDisk(
backing: vmdk_backing,
capacityInKB: size.to_i * 1024 * 1024,
controllerKey: controller.key,
key: -1,
unitNumber: find_disk_unit_number(vm, controller)
)
device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
device: device,
operation: RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add')
)
vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
deviceChange: [device_config_spec]
)
@connection.serviceContent.virtualDiskManager.CreateVirtualDisk_Task(
datacenter: @connection.serviceInstance.find_datacenter,
name: "[#{vmdk_datastore.name}] #{vmdk_file_name}",
spec: vmdk_spec
).wait_for_completion
vm.ReconfigVM_Task(spec: vm_config_spec).wait_for_completion
true
end
def find_datastore(datastorename)
ensure_connected @connection, $credentials
datacenter = @connection.serviceInstance.find_datacenter
datacenter.find_datastore(datastorename)
end
def find_device(vm, deviceName)
ensure_connected @connection, $credentials
vm.config.hardware.device.each do |device|
return device if device.deviceInfo.label == deviceName
end
nil
end
def find_disk_controller(vm)
ensure_connected @connection, $credentials
devices = find_disk_devices(vm)
devices.keys.sort.each do |device|
if devices[device]['children'].length < 15
return find_device(vm, devices[device]['device'].deviceInfo.label)
end
end
nil
end
def find_disk_devices(vm)
ensure_connected @connection, $credentials
devices = {}
vm.config.hardware.device.each do |device|
if device.is_a? RbVmomi::VIM::VirtualSCSIController
if devices[device.controllerKey].nil?
devices[device.key] = {}
devices[device.key]['children'] = []
end
devices[device.key]['device'] = device
end
if device.is_a? RbVmomi::VIM::VirtualDisk
if devices[device.controllerKey].nil?
devices[device.controllerKey] = {}
devices[device.controllerKey]['children'] = []
end
devices[device.controllerKey]['children'].push(device)
end
end
devices
end
def find_disk_unit_number(vm, controller)
ensure_connected @connection, $credentials
used_unit_numbers = []
available_unit_numbers = []
devices = find_disk_devices(vm)
devices.keys.sort.each do |c|
next unless controller.key == devices[c]['device'].key
used_unit_numbers.push(devices[c]['device'].scsiCtlrUnitNumber)
devices[c]['children'].each do |disk|
used_unit_numbers.push(disk.unitNumber)
end
end
(0..15).each do |scsi_id|
if used_unit_numbers.grep(scsi_id).length <= 0
available_unit_numbers.push(scsi_id)
end
end
available_unit_numbers.sort[0]
end
def find_folder(foldername)
ensure_connected @connection, $credentials
datacenter = @connection.serviceInstance.find_datacenter
base = datacenter.vmFolder
folders = foldername.split('/')
folders.each do |folder|
case base
when RbVmomi::VIM::Folder
base = base.childEntity.find { |f| f.name == folder }
else
abort "Unexpected object type encountered (#{base.class}) while finding folder"
end
end
base
end
# Returns an array containing cumulative CPU and memory utilization of a host, and its object reference
# Params:
# +model+:: CPU arch version to match on
# +limit+:: Hard limit for CPU or memory utilization beyond which a host is excluded for deployments
def get_host_utilization(host, model=nil, limit=90)
if model
return nil unless host_has_cpu_model? host, model
end
return nil if host.runtime.inMaintenanceMode
return nil unless host.overallStatus == 'green'
cpu_utilization = cpu_utilization_for host
memory_utilization = memory_utilization_for host
return nil if cpu_utilization > limit
return nil if memory_utilization > limit
[ cpu_utilization + memory_utilization, host ]
end
def host_has_cpu_model?(host, model)
get_host_cpu_arch_version(host) == model
end
def get_host_cpu_arch_version(host)
cpu_model = host.hardware.cpuPkg[0].description
cpu_model_parts = cpu_model.split()
arch_version = cpu_model_parts[4]
arch_version
end
def cpu_utilization_for(host)
cpu_usage = host.summary.quickStats.overallCpuUsage
cpu_size = host.summary.hardware.cpuMhz * host.summary.hardware.numCpuCores
(cpu_usage.to_f / cpu_size.to_f) * 100
end
def memory_utilization_for(host)
memory_usage = host.summary.quickStats.overallMemoryUsage
memory_size = host.summary.hardware.memorySize / 1024 / 1024
(memory_usage.to_f / memory_size.to_f) * 100
end
def find_least_used_host(cluster)
ensure_connected @connection, $credentials
cluster_object = find_cluster(cluster)
target_hosts = get_cluster_host_utilization(cluster_object)
least_used_host = target_hosts.sort[0][1]
least_used_host
end
def find_cluster(cluster)
datacenter = @connection.serviceInstance.find_datacenter
datacenter.hostFolder.children.find { |cluster_object| cluster_object.name == cluster }
end
def get_cluster_host_utilization(cluster)
cluster_hosts = []
cluster.host.each do |host|
host_usage = get_host_utilization(host)
cluster_hosts << host_usage if host_usage
end
cluster_hosts
end
def find_least_used_compatible_host(vm)
ensure_connected @connection, $credentials
source_host = vm.summary.runtime.host
model = get_host_cpu_arch_version(source_host)
cluster = source_host.parent
target_hosts = []
cluster.host.each do |host|
host_usage = get_host_utilization(host, model)
target_hosts << host_usage if host_usage
end
target_host = target_hosts.sort[0][1]
[target_host, target_host.name]
end
def find_pool(poolname)
ensure_connected @connection, $credentials
datacenter = @connection.serviceInstance.find_datacenter
base = datacenter.hostFolder
pools = poolname.split('/')
pools.each do |pool|
case base
when RbVmomi::VIM::Folder
base = base.childEntity.find { |f| f.name == pool }
when RbVmomi::VIM::ClusterComputeResource
base = base.resourcePool.resourcePool.find { |f| f.name == pool }
when RbVmomi::VIM::ResourcePool
base = base.resourcePool.find { |f| f.name == pool }
else
abort "Unexpected object type encountered (#{base.class}) while finding resource pool"
end
end
base = base.resourcePool unless base.is_a?(RbVmomi::VIM::ResourcePool) && base.respond_to?(:resourcePool)
base
end
def find_snapshot(vm, snapshotname)
if vm.snapshot
get_snapshot_list(vm.snapshot.rootSnapshotList, snapshotname)
end
end
def find_vm(vmname)
ensure_connected @connection, $credentials
find_vm_light(vmname) || find_vm_heavy(vmname)[vmname]
end
def find_vm_light(vmname)
ensure_connected @connection, $credentials
@connection.searchIndex.FindByDnsName(vmSearch: true, dnsName: vmname)
end
def find_vm_heavy(vmname)
ensure_connected @connection, $credentials
vmname = vmname.is_a?(Array) ? vmname : [vmname]
containerView = get_base_vm_container_from @connection
propertyCollector = @connection.propertyCollector
objectSet = [{
obj: containerView,
skip: true,
selectSet: [RbVmomi::VIM::TraversalSpec.new(
name: 'gettingTheVMs',
path: 'view',
skip: false,
type: 'ContainerView'
)]
}]
propSet = [{
pathSet: ['name'],
type: 'VirtualMachine'
}]
results = propertyCollector.RetrievePropertiesEx(
specSet: [{
objectSet: objectSet,
propSet: propSet
}],
options: { maxObjects: nil }
)
vms = {}
results.objects.each do |result|
name = result.propSet.first.val
next unless vmname.include? name
vms[name] = result.obj
end
while results.token
results = propertyCollector.ContinueRetrievePropertiesEx(token: results.token)
results.objects.each do |result|
name = result.propSet.first.val
next unless vmname.include? name
vms[name] = result.obj
end
end
vms
end
def find_vmdks(vmname, datastore)
ensure_connected @connection, $credentials
disks = []
vmdk_datastore = find_datastore(datastore)
vm_files = vmdk_datastore._connection.serviceContent.propertyCollector.collectMultiple vmdk_datastore.vm, 'layoutEx.file'
vm_files.keys.each do |f|
vm_files[f]['layoutEx.file'].each do |l|
if l.name.match(/^\[#{vmdk_datastore.name}\] #{vmname}\/#{vmname}_([0-9]+).vmdk/)
disks.push(l)
end
end
end
disks
end
def get_base_vm_container_from(connection)
ensure_connected @connection, $credentials
viewManager = connection.serviceContent.viewManager
viewManager.CreateContainerView(
container: connection.serviceContent.rootFolder,
recursive: true,
type: ['VirtualMachine']
)
end
def get_snapshot_list(tree, snapshotname)
snapshot = nil
tree.each do |child|
if child.name == snapshotname
snapshot ||= child.snapshot
else
snapshot ||= get_snapshot_list(child.childSnapshotList, snapshotname)
end
end
snapshot
end
def migrate_vm_host(vm, host)
relospec = RbVmomi::VIM.VirtualMachineRelocateSpec(host: host)
vm.RelocateVM_Task(spec: relospec).wait_for_completion
end
def close
@connection.close
end
end
end
# require 'rubygems' unless defined?(Gem)
# module Vmpooler
# class VsphereHelper
# ADAPTER_TYPE = 'lsiLogic'
# DISK_TYPE = 'thin'
# DISK_MODE = 'persistent'
# def initialize(config, metrics)
# $credentials = config[:vsphere]
# $conf = config[:config]
# $metrics = metrics
# end
# def ensure_connected(connection, credentials)
# connection.serviceInstance.CurrentTime
# rescue
# $metrics.increment("connect.open")
# connect_to_vsphere $credentials
# end
# def connect_to_vsphere(credentials)
# max_tries = $conf['max_tries'] || 3
# retry_factor = $conf['retry_factor'] || 10
# try = 1
# begin
# @connection = RbVmomi::VIM.connect host: credentials['server'],
# user: credentials['username'],
# password: credentials['password'],
# insecure: credentials['insecure'] || true
# $metrics.increment("connect.open")
# rescue => err
# try += 1
# $metrics.increment("connect.fail")
# raise err if try == max_tries
# sleep(try * retry_factor)
# retry
# end
# end
# def add_disk(vm, size, datastore)
# ensure_connected @connection, $credentials
# return false unless size.to_i > 0
# vmdk_datastore = find_datastore(datastore)
# vmdk_file_name = "#{vm['name']}/#{vm['name']}_#{find_vmdks(vm['name'], datastore).length + 1}.vmdk"
# controller = find_disk_controller(vm)
# vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec(
# capacityKb: size.to_i * 1024 * 1024,
# adapterType: ADAPTER_TYPE,
# diskType: DISK_TYPE
# )
# vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
# datastore: vmdk_datastore,
# diskMode: DISK_MODE,
# fileName: "[#{vmdk_datastore.name}] #{vmdk_file_name}"
# )
# device = RbVmomi::VIM::VirtualDisk(
# backing: vmdk_backing,
# capacityInKB: size.to_i * 1024 * 1024,
# controllerKey: controller.key,
# key: -1,
# unitNumber: find_disk_unit_number(vm, controller)
# )
# device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
# device: device,
# operation: RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add')
# )
# vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
# deviceChange: [device_config_spec]
# )
# @connection.serviceContent.virtualDiskManager.CreateVirtualDisk_Task(
# datacenter: @connection.serviceInstance.find_datacenter,
# name: "[#{vmdk_datastore.name}] #{vmdk_file_name}",
# spec: vmdk_spec
# ).wait_for_completion
# vm.ReconfigVM_Task(spec: vm_config_spec).wait_for_completion
# true
# end
# def find_datastore(datastorename)
# ensure_connected @connection, $credentials
# datacenter = @connection.serviceInstance.find_datacenter
# datacenter.find_datastore(datastorename)
# end
# def find_device(vm, deviceName)
# ensure_connected @connection, $credentials
# vm.config.hardware.device.each do |device|
# return device if device.deviceInfo.label == deviceName
# end
# nil
# end
# def find_disk_controller(vm)
# ensure_connected @connection, $credentials
# devices = find_disk_devices(vm)
# devices.keys.sort.each do |device|
# if devices[device]['children'].length < 15
# return find_device(vm, devices[device]['device'].deviceInfo.label)
# end
# end
# nil
# end
# def find_disk_devices(vm)
# ensure_connected @connection, $credentials
# devices = {}
# vm.config.hardware.device.each do |device|
# if device.is_a? RbVmomi::VIM::VirtualSCSIController
# if devices[device.controllerKey].nil?
# devices[device.key] = {}
# devices[device.key]['children'] = []
# end
# devices[device.key]['device'] = device
# end
# if device.is_a? RbVmomi::VIM::VirtualDisk
# if devices[device.controllerKey].nil?
# devices[device.controllerKey] = {}
# devices[device.controllerKey]['children'] = []
# end
# devices[device.controllerKey]['children'].push(device)
# end
# end
# devices
# end
# def find_disk_unit_number(vm, controller)
# ensure_connected @connection, $credentials
# used_unit_numbers = []
# available_unit_numbers = []
# devices = find_disk_devices(vm)
# devices.keys.sort.each do |c|
# next unless controller.key == devices[c]['device'].key
# used_unit_numbers.push(devices[c]['device'].scsiCtlrUnitNumber)
# devices[c]['children'].each do |disk|
# used_unit_numbers.push(disk.unitNumber)
# end
# end
# (0..15).each do |scsi_id|
# if used_unit_numbers.grep(scsi_id).length <= 0
# available_unit_numbers.push(scsi_id)
# end
# end
# available_unit_numbers.sort[0]
# end
# def find_folder(foldername)
# ensure_connected @connection, $credentials
# datacenter = @connection.serviceInstance.find_datacenter
# base = datacenter.vmFolder
# folders = foldername.split('/')
# folders.each do |folder|
# case base
# when RbVmomi::VIM::Folder
# base = base.childEntity.find { |f| f.name == folder }
# else
# abort "Unexpected object type encountered (#{base.class}) while finding folder"
# end
# end
# base
# end
# # Returns an array containing cumulative CPU and memory utilization of a host, and its object reference
# # Params:
# # +model+:: CPU arch version to match on
# # +limit+:: Hard limit for CPU or memory utilization beyond which a host is excluded for deployments
# def get_host_utilization(host, model=nil, limit=90)
# if model
# return nil unless host_has_cpu_model? host, model
# end
# return nil if host.runtime.inMaintenanceMode
# return nil unless host.overallStatus == 'green'
# cpu_utilization = cpu_utilization_for host
# memory_utilization = memory_utilization_for host
# return nil if cpu_utilization > limit
# return nil if memory_utilization > limit
# [ cpu_utilization + memory_utilization, host ]
# end
# def host_has_cpu_model?(host, model)
# get_host_cpu_arch_version(host) == model
# end
# def get_host_cpu_arch_version(host)
# cpu_model = host.hardware.cpuPkg[0].description
# cpu_model_parts = cpu_model.split()
# arch_version = cpu_model_parts[4]
# arch_version
# end
# def cpu_utilization_for(host)
# cpu_usage = host.summary.quickStats.overallCpuUsage
# cpu_size = host.summary.hardware.cpuMhz * host.summary.hardware.numCpuCores
# (cpu_usage.to_f / cpu_size.to_f) * 100
# end
# def memory_utilization_for(host)
# memory_usage = host.summary.quickStats.overallMemoryUsage
# memory_size = host.summary.hardware.memorySize / 1024 / 1024
# (memory_usage.to_f / memory_size.to_f) * 100
# end
# def find_least_used_host(cluster)
# ensure_connected @connection, $credentials
# cluster_object = find_cluster(cluster)
# target_hosts = get_cluster_host_utilization(cluster_object)
# least_used_host = target_hosts.sort[0][1]
# least_used_host
# end
# def find_cluster(cluster)
# datacenter = @connection.serviceInstance.find_datacenter
# datacenter.hostFolder.children.find { |cluster_object| cluster_object.name == cluster }
# end
# def get_cluster_host_utilization(cluster)
# cluster_hosts = []
# cluster.host.each do |host|
# host_usage = get_host_utilization(host)
# cluster_hosts << host_usage if host_usage
# end
# cluster_hosts
# end
# def find_least_used_compatible_host(vm)
# ensure_connected @connection, $credentials
# source_host = vm.summary.runtime.host
# model = get_host_cpu_arch_version(source_host)
# cluster = source_host.parent
# target_hosts = []
# cluster.host.each do |host|
# host_usage = get_host_utilization(host, model)
# target_hosts << host_usage if host_usage
# end
# target_host = target_hosts.sort[0][1]
# [target_host, target_host.name]
# end
# def find_pool(poolname)
# ensure_connected @connection, $credentials
# datacenter = @connection.serviceInstance.find_datacenter
# base = datacenter.hostFolder
# pools = poolname.split('/')
# pools.each do |pool|
# case base
# when RbVmomi::VIM::Folder
# base = base.childEntity.find { |f| f.name == pool }
# when RbVmomi::VIM::ClusterComputeResource
# base = base.resourcePool.resourcePool.find { |f| f.name == pool }
# when RbVmomi::VIM::ResourcePool
# base = base.resourcePool.find { |f| f.name == pool }
# else
# abort "Unexpected object type encountered (#{base.class}) while finding resource pool"
# end
# end
# base = base.resourcePool unless base.is_a?(RbVmomi::VIM::ResourcePool) && base.respond_to?(:resourcePool)
# base
# end
# def find_snapshot(vm, snapshotname)
# if vm.snapshot
# get_snapshot_list(vm.snapshot.rootSnapshotList, snapshotname)
# end
# end
# def find_vm(vmname)
# ensure_connected @connection, $credentials
# find_vm_light(vmname) || find_vm_heavy(vmname)[vmname]
# end
# def find_vm_light(vmname)
# ensure_connected @connection, $credentials
# @connection.searchIndex.FindByDnsName(vmSearch: true, dnsName: vmname)
# end
# def find_vm_heavy(vmname)
# ensure_connected @connection, $credentials
# vmname = vmname.is_a?(Array) ? vmname : [vmname]
# containerView = get_base_vm_container_from @connection
# propertyCollector = @connection.propertyCollector
# objectSet = [{
# obj: containerView,
# skip: true,
# selectSet: [RbVmomi::VIM::TraversalSpec.new(
# name: 'gettingTheVMs',
# path: 'view',
# skip: false,
# type: 'ContainerView'
# )]
# }]
# propSet = [{
# pathSet: ['name'],
# type: 'VirtualMachine'
# }]
# results = propertyCollector.RetrievePropertiesEx(
# specSet: [{
# objectSet: objectSet,
# propSet: propSet
# }],
# options: { maxObjects: nil }
# )
# vms = {}
# results.objects.each do |result|
# name = result.propSet.first.val
# next unless vmname.include? name
# vms[name] = result.obj
# end
# while results.token
# results = propertyCollector.ContinueRetrievePropertiesEx(token: results.token)
# results.objects.each do |result|
# name = result.propSet.first.val
# next unless vmname.include? name
# vms[name] = result.obj
# end
# end
# vms
# end
# def find_vmdks(vmname, datastore)
# ensure_connected @connection, $credentials
# disks = []
# vmdk_datastore = find_datastore(datastore)
# vm_files = vmdk_datastore._connection.serviceContent.propertyCollector.collectMultiple vmdk_datastore.vm, 'layoutEx.file'
# vm_files.keys.each do |f|
# vm_files[f]['layoutEx.file'].each do |l|
# if l.name.match(/^\[#{vmdk_datastore.name}\] #{vmname}\/#{vmname}_([0-9]+).vmdk/)
# disks.push(l)
# end
# end
# end
# disks
# end
# def get_base_vm_container_from(connection)
# ensure_connected @connection, $credentials
# viewManager = connection.serviceContent.viewManager
# viewManager.CreateContainerView(
# container: connection.serviceContent.rootFolder,
# recursive: true,
# type: ['VirtualMachine']
# )
# end
# def get_snapshot_list(tree, snapshotname)
# snapshot = nil
# tree.each do |child|
# if child.name == snapshotname
# snapshot ||= child.snapshot
# else
# snapshot ||= get_snapshot_list(child.childSnapshotList, snapshotname)
# end
# end
# snapshot
# end
# def migrate_vm_host(vm, host)
# relospec = RbVmomi::VIM.VirtualMachineRelocateSpec(host: host)
# vm.RelocateVM_Task(spec: relospec).wait_for_completion
# end
# def close
# @connection.close
# end
# end
# end