mirror of
https://github.com/puppetlabs/vmpooler.git
synced 2026-01-27 02:18:41 -05:00
WIP (POOLER-70) Refactor out VSphere to separate class
WIP
This commit is contained in:
parent
626195685f
commit
05f781ab69
6 changed files with 1443 additions and 653 deletions
|
|
@ -12,7 +12,7 @@ module Vmpooler
|
||||||
require 'yaml'
|
require 'yaml'
|
||||||
require 'set'
|
require 'set'
|
||||||
|
|
||||||
%w( api graphite logger pool_manager vsphere_helper statsd dummy_statsd ).each do |lib|
|
%w( api graphite logger pool_manager vsphere_helper statsd dummy_statsd backingservice ).each do |lib|
|
||||||
begin
|
begin
|
||||||
require "vmpooler/#{lib}"
|
require "vmpooler/#{lib}"
|
||||||
rescue LoadError
|
rescue LoadError
|
||||||
|
|
|
||||||
8
lib/vmpooler/backingservice.rb
Normal file
8
lib/vmpooler/backingservice.rb
Normal file
|
|
@ -0,0 +1,8 @@
|
||||||
|
# TODO remove dummy for commit history
|
||||||
|
%w( base vsphere dummy ).each do |lib|
|
||||||
|
begin
|
||||||
|
require "vmpooler/backingservice/#{lib}"
|
||||||
|
rescue LoadError
|
||||||
|
require File.expand_path(File.join(File.dirname(__FILE__), 'backingservice', lib))
|
||||||
|
end
|
||||||
|
end
|
||||||
102
lib/vmpooler/backingservice/base.rb
Normal file
102
lib/vmpooler/backingservice/base.rb
Normal file
|
|
@ -0,0 +1,102 @@
|
||||||
|
module Vmpooler
|
||||||
|
class PoolManager
|
||||||
|
class BackingService
|
||||||
|
class Base
|
||||||
|
# These defs must be overidden in child classes
|
||||||
|
|
||||||
|
def initialize(options)
|
||||||
|
end
|
||||||
|
|
||||||
|
#def validate_config(config)
|
||||||
|
# false
|
||||||
|
#end
|
||||||
|
|
||||||
|
# inputs
|
||||||
|
# pool : hashtable from config file
|
||||||
|
# returns
|
||||||
|
# hashtable
|
||||||
|
# name : name of the device
|
||||||
|
def vms_in_pool(pool)
|
||||||
|
fail "#{self.class.name} does not implement vms_in_pool"
|
||||||
|
end
|
||||||
|
|
||||||
|
# inputs
|
||||||
|
# vm_name: string
|
||||||
|
# returns
|
||||||
|
# [String] hostname = Name of the host computer running the vm. If this is not a Virtual Machine, it returns the vm_name
|
||||||
|
def get_vm_host(vm_name)
|
||||||
|
fail "#{self.class.name} does not implement get_vm_host"
|
||||||
|
end
|
||||||
|
|
||||||
|
# inputs
|
||||||
|
# vm_name: string
|
||||||
|
# returns
|
||||||
|
# [String] hostname = Name of the most appropriate host computer to run this VM. Useful for load balancing VMs in a cluster
|
||||||
|
# If this is not a Virtual Machine, it returns the vm_name
|
||||||
|
def find_least_used_compatible_host(vm_name)
|
||||||
|
fail "#{self.class.name} does not implement find_least_used_compatible_host"
|
||||||
|
end
|
||||||
|
|
||||||
|
# inputs
|
||||||
|
# vm_name: string
|
||||||
|
# dest_host_name: string (Name of the host to migrate `vm_name` to)
|
||||||
|
# returns
|
||||||
|
# [Boolean] Returns true on success or false on failure
|
||||||
|
def migrate_vm_to_host(vm_name, dest_host_name)
|
||||||
|
fail "#{self.class.name} does not implement migrate_vm_to_host"
|
||||||
|
end
|
||||||
|
|
||||||
|
# inputs
|
||||||
|
# vm_name: string
|
||||||
|
# returns
|
||||||
|
# nil if it doesn't exist
|
||||||
|
# Hastable of the VM
|
||||||
|
# [String] hostname = Name reported by Vmware tools (host.summary.guest.hostName)
|
||||||
|
# [String] template = This is the name of template exposed by the API. It must _match_ the poolname
|
||||||
|
# [String] poolname = Name of the pool the VM is located
|
||||||
|
# [Time] boottime = Time when the VM was created/booted
|
||||||
|
# [String] powerstate = Current power state of a VM. Valid values (as per vCenter API)
|
||||||
|
# - 'PoweredOn','PoweredOff'
|
||||||
|
def get_vm(vm_name)
|
||||||
|
fail "#{self.class.name} does not implement get_vm"
|
||||||
|
end
|
||||||
|
|
||||||
|
# inputs
|
||||||
|
# pool: string
|
||||||
|
# returns
|
||||||
|
# vm name: string
|
||||||
|
def create_vm(pool)
|
||||||
|
fail "#{self.class.name} does not implement create_vm"
|
||||||
|
end
|
||||||
|
|
||||||
|
# inputs
|
||||||
|
# vm_name: string
|
||||||
|
# pool: string
|
||||||
|
# returns
|
||||||
|
# boolean : true if success, false on error
|
||||||
|
def destroy_vm(vm_name,pool)
|
||||||
|
fail "#{self.class.name} does not implement destroy_vm"
|
||||||
|
end
|
||||||
|
|
||||||
|
# inputs
|
||||||
|
# vm : string
|
||||||
|
# pool: string
|
||||||
|
# timeout: int (Seconds)
|
||||||
|
# returns
|
||||||
|
# result: boolean
|
||||||
|
def is_vm_ready?(vm,pool,timeout)
|
||||||
|
fail "#{self.class.name} does not implement is_vm_ready?"
|
||||||
|
end
|
||||||
|
|
||||||
|
# inputs
|
||||||
|
# vm : string
|
||||||
|
# returns
|
||||||
|
# result: boolean
|
||||||
|
def vm_exists?(vm)
|
||||||
|
fail "#{self.class.name} does not implement vm_exists?"
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
622
lib/vmpooler/backingservice/vsphere.rb
Normal file
622
lib/vmpooler/backingservice/vsphere.rb
Normal file
|
|
@ -0,0 +1,622 @@
|
||||||
|
require 'rubygems' unless defined?(Gem)
|
||||||
|
|
||||||
|
module Vmpooler
|
||||||
|
class PoolManager
|
||||||
|
class BackingService
|
||||||
|
class Vsphere < Vmpooler::PoolManager::BackingService::Base
|
||||||
|
#--------------- Public methods
|
||||||
|
|
||||||
|
def initialize(options)
|
||||||
|
$credentials = options['credentials']
|
||||||
|
$metrics = options['metrics']
|
||||||
|
end
|
||||||
|
|
||||||
|
def devices_in_pool(pool)
|
||||||
|
base = find_folder(pool['folder'])
|
||||||
|
|
||||||
|
base.childEntity.each do |vm|
|
||||||
|
vm
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# def destroy_vm()
|
||||||
|
# # Destroy a VM
|
||||||
|
# def _destroy_vm(vm, pool, vsphere)
|
||||||
|
# $redis.srem('vmpooler__completed__' + pool, vm)
|
||||||
|
# $redis.hdel('vmpooler__active__' + pool, vm)
|
||||||
|
# $redis.hset('vmpooler__vm__' + vm, 'destroy', Time.now)
|
||||||
|
|
||||||
|
# # Auto-expire metadata key
|
||||||
|
# $redis.expire('vmpooler__vm__' + vm, ($config[:redis]['data_ttl'].to_i * 60 * 60))
|
||||||
|
|
||||||
|
# # TODO This is all vSphere specific
|
||||||
|
|
||||||
|
# host = vsphere.find_vm(vm)
|
||||||
|
|
||||||
|
# if host
|
||||||
|
# start = Time.now
|
||||||
|
|
||||||
|
# if
|
||||||
|
# (host.runtime) &&
|
||||||
|
# (host.runtime.powerState) &&
|
||||||
|
# (host.runtime.powerState == 'poweredOn')
|
||||||
|
|
||||||
|
# $logger.log('d', "[ ] [#{pool}] '#{vm}' is being shut down")
|
||||||
|
# host.PowerOffVM_Task.wait_for_completion
|
||||||
|
# end
|
||||||
|
|
||||||
|
# host.Destroy_Task.wait_for_completion
|
||||||
|
# finish = '%.2f' % (Time.now - start)
|
||||||
|
|
||||||
|
# $logger.log('s', "[-] [#{pool}] '#{vm}' destroyed in #{finish} seconds")
|
||||||
|
# $metrics.timing("destroy.#{pool}", finish)
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
|
||||||
|
def create_device(pool)
|
||||||
|
'12345'
|
||||||
|
# clone_vm(
|
||||||
|
# pool['template'],
|
||||||
|
# pool['folder'],
|
||||||
|
# pool['datastore'],
|
||||||
|
# pool['clone_target'],
|
||||||
|
# vsphere
|
||||||
|
# )
|
||||||
|
|
||||||
|
# Thread.new do
|
||||||
|
# begin
|
||||||
|
# vm = {}
|
||||||
|
|
||||||
|
# if template =~ /\//
|
||||||
|
# templatefolders = template.split('/')
|
||||||
|
# vm['template'] = templatefolders.pop
|
||||||
|
# end
|
||||||
|
|
||||||
|
# if templatefolders
|
||||||
|
# vm[vm['template']] = vsphere.find_folder(templatefolders.join('/')).find(vm['template'])
|
||||||
|
# else
|
||||||
|
# fail 'Please provide a full path to the template'
|
||||||
|
# end
|
||||||
|
|
||||||
|
# if vm['template'].length == 0
|
||||||
|
# fail "Unable to find template '#{vm['template']}'!"
|
||||||
|
# end
|
||||||
|
|
||||||
|
# # Generate a randomized hostname
|
||||||
|
# o = [('a'..'z'), ('0'..'9')].map(&:to_a).flatten
|
||||||
|
# vm['hostname'] = $config[:config]['prefix'] + o[rand(25)] + (0...14).map { o[rand(o.length)] }.join
|
||||||
|
|
||||||
|
# # Add VM to Redis inventory ('pending' pool)
|
||||||
|
# $redis.sadd('vmpooler__pending__' + vm['template'], vm['hostname'])
|
||||||
|
# $redis.hset('vmpooler__vm__' + vm['hostname'], 'clone', Time.now)
|
||||||
|
# $redis.hset('vmpooler__vm__' + vm['hostname'], 'template', vm['template'])
|
||||||
|
|
||||||
|
# # Annotate with creation time, origin template, etc.
|
||||||
|
# # Add extraconfig options that can be queried by vmtools
|
||||||
|
# configSpec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
||||||
|
# annotation: JSON.pretty_generate(
|
||||||
|
# name: vm['hostname'],
|
||||||
|
# created_by: $config[:vsphere]['username'],
|
||||||
|
# base_template: vm['template'],
|
||||||
|
# creation_timestamp: Time.now.utc
|
||||||
|
# ),
|
||||||
|
# extraConfig: [
|
||||||
|
# { key: 'guestinfo.hostname',
|
||||||
|
# value: vm['hostname']
|
||||||
|
# }
|
||||||
|
# ]
|
||||||
|
# )
|
||||||
|
|
||||||
|
# # Choose a clone target
|
||||||
|
# if target
|
||||||
|
# $clone_target = vsphere.find_least_used_host(target)
|
||||||
|
# elsif $config[:config]['clone_target']
|
||||||
|
# $clone_target = vsphere.find_least_used_host($config[:config]['clone_target'])
|
||||||
|
# end
|
||||||
|
|
||||||
|
# # Put the VM in the specified folder and resource pool
|
||||||
|
# relocateSpec = RbVmomi::VIM.VirtualMachineRelocateSpec(
|
||||||
|
# datastore: vsphere.find_datastore(datastore),
|
||||||
|
# host: $clone_target,
|
||||||
|
# diskMoveType: :moveChildMostDiskBacking
|
||||||
|
# )
|
||||||
|
|
||||||
|
# # Create a clone spec
|
||||||
|
# spec = RbVmomi::VIM.VirtualMachineCloneSpec(
|
||||||
|
# location: relocateSpec,
|
||||||
|
# config: configSpec,
|
||||||
|
# powerOn: true,
|
||||||
|
# template: false
|
||||||
|
# )
|
||||||
|
|
||||||
|
# # Clone the VM
|
||||||
|
# $logger.log('d', "[ ] [#{vm['template']}] '#{vm['hostname']}' is being cloned from '#{vm['template']}'")
|
||||||
|
|
||||||
|
# begin
|
||||||
|
# start = Time.now
|
||||||
|
# vm[vm['template']].CloneVM_Task(
|
||||||
|
# folder: vsphere.find_folder(folder),
|
||||||
|
# name: vm['hostname'],
|
||||||
|
# spec: spec
|
||||||
|
# ).wait_for_completion
|
||||||
|
# finish = '%.2f' % (Time.now - start)
|
||||||
|
|
||||||
|
# $redis.hset('vmpooler__clone__' + Date.today.to_s, vm['template'] + ':' + vm['hostname'], finish)
|
||||||
|
# $redis.hset('vmpooler__vm__' + vm['hostname'], 'clone_time', finish)
|
||||||
|
|
||||||
|
# $logger.log('s', "[+] [#{vm['template']}] '#{vm['hostname']}' cloned from '#{vm['template']}' in #{finish} seconds")
|
||||||
|
# rescue => err
|
||||||
|
# $logger.log('s', "[!] [#{vm['template']}] '#{vm['hostname']}' clone failed with an error: #{err}")
|
||||||
|
# $redis.srem('vmpooler__pending__' + vm['template'], vm['hostname'])
|
||||||
|
# raise
|
||||||
|
# end
|
||||||
|
|
||||||
|
# $redis.decr('vmpooler__tasks__clone')
|
||||||
|
|
||||||
|
# $metrics.timing("clone.#{vm['template']}", finish)
|
||||||
|
# rescue => err
|
||||||
|
# $logger.log('s', "[!] [#{vm['template']}] '#{vm['hostname']}' failed while preparing to clone with an error: #{err}")
|
||||||
|
# raise
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
#**** When getting the VM details
|
||||||
|
# if (host.summary) &&
|
||||||
|
# (host.summary.guest) &&
|
||||||
|
# (host.summary.guest.hostName) &&
|
||||||
|
# (host.summary.guest.hostName == vm)
|
||||||
|
#
|
||||||
|
|
||||||
|
def is_vm_ready?(vm,pool,timeout)
|
||||||
|
fail "!!!!"
|
||||||
|
|
||||||
|
|
||||||
|
# def open_socket(host, domain=nil, timeout=5, port=22, &block)
|
||||||
|
# Timeout.timeout(timeout) do
|
||||||
|
# target_host = host
|
||||||
|
# target_host = "#{host}.#{domain}" if domain
|
||||||
|
# sock = TCPSocket.new target_host, port
|
||||||
|
# begin
|
||||||
|
# yield sock if block_given?
|
||||||
|
# ensure
|
||||||
|
# sock.close
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
|
||||||
|
# def _check_pending_vm(vm, pool, timeout, vsphere)
|
||||||
|
# host = vsphere.find_vm(vm)
|
||||||
|
|
||||||
|
# if ! host
|
||||||
|
# fail_pending_vm(vm, pool, timeout, false)
|
||||||
|
# return
|
||||||
|
# end
|
||||||
|
# open_socket vm
|
||||||
|
# move_pending_vm_to_ready(vm, pool, host)
|
||||||
|
# rescue
|
||||||
|
# fail_pending_vm(vm, pool, timeout)
|
||||||
|
# end
|
||||||
|
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#--------------- Private methods
|
||||||
|
private
|
||||||
|
ADAPTER_TYPE = 'lsiLogic'
|
||||||
|
DISK_TYPE = 'thin'
|
||||||
|
DISK_MODE = 'persistent'
|
||||||
|
|
||||||
|
def ensure_connected(connection, credentials)
|
||||||
|
connection.serviceInstance.CurrentTime
|
||||||
|
rescue
|
||||||
|
$metrics.increment("connect.open")
|
||||||
|
connect_to_vsphere $credentials
|
||||||
|
end
|
||||||
|
|
||||||
|
def connect_to_vsphere(credentials)
|
||||||
|
@connection = RbVmomi::VIM.connect host: credentials['server'],
|
||||||
|
user: credentials['username'],
|
||||||
|
password: credentials['password'],
|
||||||
|
insecure: credentials['insecure'] || true
|
||||||
|
end
|
||||||
|
|
||||||
|
def add_disk(vm, size, datastore)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
|
||||||
|
return false unless size.to_i > 0
|
||||||
|
|
||||||
|
vmdk_datastore = find_datastore(datastore)
|
||||||
|
vmdk_file_name = "#{vm['name']}/#{vm['name']}_#{find_vmdks(vm['name'], datastore).length + 1}.vmdk"
|
||||||
|
|
||||||
|
controller = find_disk_controller(vm)
|
||||||
|
|
||||||
|
vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec(
|
||||||
|
capacityKb: size.to_i * 1024 * 1024,
|
||||||
|
adapterType: ADAPTER_TYPE,
|
||||||
|
diskType: DISK_TYPE
|
||||||
|
)
|
||||||
|
|
||||||
|
vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
|
||||||
|
datastore: vmdk_datastore,
|
||||||
|
diskMode: DISK_MODE,
|
||||||
|
fileName: "[#{vmdk_datastore.name}] #{vmdk_file_name}"
|
||||||
|
)
|
||||||
|
|
||||||
|
device = RbVmomi::VIM::VirtualDisk(
|
||||||
|
backing: vmdk_backing,
|
||||||
|
capacityInKB: size.to_i * 1024 * 1024,
|
||||||
|
controllerKey: controller.key,
|
||||||
|
key: -1,
|
||||||
|
unitNumber: find_disk_unit_number(vm, controller)
|
||||||
|
)
|
||||||
|
|
||||||
|
device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
|
||||||
|
device: device,
|
||||||
|
operation: RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add')
|
||||||
|
)
|
||||||
|
|
||||||
|
vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
|
||||||
|
deviceChange: [device_config_spec]
|
||||||
|
)
|
||||||
|
|
||||||
|
@connection.serviceContent.virtualDiskManager.CreateVirtualDisk_Task(
|
||||||
|
datacenter: @connection.serviceInstance.find_datacenter,
|
||||||
|
name: "[#{vmdk_datastore.name}] #{vmdk_file_name}",
|
||||||
|
spec: vmdk_spec
|
||||||
|
).wait_for_completion
|
||||||
|
|
||||||
|
vm.ReconfigVM_Task(spec: vm_config_spec).wait_for_completion
|
||||||
|
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_datastore(datastorename)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
|
||||||
|
datacenter = @connection.serviceInstance.find_datacenter
|
||||||
|
datacenter.find_datastore(datastorename)
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_device(vm, deviceName)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
|
||||||
|
vm.config.hardware.device.each do |device|
|
||||||
|
return device if device.deviceInfo.label == deviceName
|
||||||
|
end
|
||||||
|
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_disk_controller(vm)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
|
||||||
|
devices = find_disk_devices(vm)
|
||||||
|
|
||||||
|
devices.keys.sort.each do |device|
|
||||||
|
if devices[device]['children'].length < 15
|
||||||
|
return find_device(vm, devices[device]['device'].deviceInfo.label)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_disk_devices(vm)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
|
||||||
|
devices = {}
|
||||||
|
|
||||||
|
vm.config.hardware.device.each do |device|
|
||||||
|
if device.is_a? RbVmomi::VIM::VirtualSCSIController
|
||||||
|
if devices[device.controllerKey].nil?
|
||||||
|
devices[device.key] = {}
|
||||||
|
devices[device.key]['children'] = []
|
||||||
|
end
|
||||||
|
|
||||||
|
devices[device.key]['device'] = device
|
||||||
|
end
|
||||||
|
|
||||||
|
if device.is_a? RbVmomi::VIM::VirtualDisk
|
||||||
|
if devices[device.controllerKey].nil?
|
||||||
|
devices[device.controllerKey] = {}
|
||||||
|
devices[device.controllerKey]['children'] = []
|
||||||
|
end
|
||||||
|
|
||||||
|
devices[device.controllerKey]['children'].push(device)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
devices
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_disk_unit_number(vm, controller)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
|
||||||
|
used_unit_numbers = []
|
||||||
|
available_unit_numbers = []
|
||||||
|
|
||||||
|
devices = find_disk_devices(vm)
|
||||||
|
|
||||||
|
devices.keys.sort.each do |c|
|
||||||
|
next unless controller.key == devices[c]['device'].key
|
||||||
|
used_unit_numbers.push(devices[c]['device'].scsiCtlrUnitNumber)
|
||||||
|
devices[c]['children'].each do |disk|
|
||||||
|
used_unit_numbers.push(disk.unitNumber)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
(0..15).each do |scsi_id|
|
||||||
|
if used_unit_numbers.grep(scsi_id).length <= 0
|
||||||
|
available_unit_numbers.push(scsi_id)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
available_unit_numbers.sort[0]
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_folder(foldername)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
|
||||||
|
datacenter = @connection.serviceInstance.find_datacenter
|
||||||
|
base = datacenter.vmFolder
|
||||||
|
folders = foldername.split('/')
|
||||||
|
folders.each do |folder|
|
||||||
|
case base
|
||||||
|
when RbVmomi::VIM::Folder
|
||||||
|
base = base.childEntity.find { |f| f.name == folder }
|
||||||
|
else
|
||||||
|
abort "Unexpected object type encountered (#{base.class}) while finding folder"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
base
|
||||||
|
end
|
||||||
|
|
||||||
|
# Returns an array containing cumulative CPU and memory utilization of a host, and its object reference
|
||||||
|
# Params:
|
||||||
|
# +model+:: CPU arch version to match on
|
||||||
|
# +limit+:: Hard limit for CPU or memory utilization beyond which a host is excluded for deployments
|
||||||
|
def get_host_utilization(host, model=nil, limit=90)
|
||||||
|
if model
|
||||||
|
return nil unless host_has_cpu_model? host, model
|
||||||
|
end
|
||||||
|
return nil if host.runtime.inMaintenanceMode
|
||||||
|
return nil unless host.overallStatus == 'green'
|
||||||
|
|
||||||
|
cpu_utilization = cpu_utilization_for host
|
||||||
|
memory_utilization = memory_utilization_for host
|
||||||
|
|
||||||
|
return nil if cpu_utilization > limit
|
||||||
|
return nil if memory_utilization > limit
|
||||||
|
|
||||||
|
[ cpu_utilization + memory_utilization, host ]
|
||||||
|
end
|
||||||
|
|
||||||
|
def host_has_cpu_model?(host, model)
|
||||||
|
get_host_cpu_arch_version(host) == model
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_host_cpu_arch_version(host)
|
||||||
|
cpu_model = host.hardware.cpuPkg[0].description
|
||||||
|
cpu_model_parts = cpu_model.split()
|
||||||
|
arch_version = cpu_model_parts[4]
|
||||||
|
arch_version
|
||||||
|
end
|
||||||
|
|
||||||
|
def cpu_utilization_for(host)
|
||||||
|
cpu_usage = host.summary.quickStats.overallCpuUsage
|
||||||
|
cpu_size = host.summary.hardware.cpuMhz * host.summary.hardware.numCpuCores
|
||||||
|
(cpu_usage.to_f / cpu_size.to_f) * 100
|
||||||
|
end
|
||||||
|
|
||||||
|
def memory_utilization_for(host)
|
||||||
|
memory_usage = host.summary.quickStats.overallMemoryUsage
|
||||||
|
memory_size = host.summary.hardware.memorySize / 1024 / 1024
|
||||||
|
(memory_usage.to_f / memory_size.to_f) * 100
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_least_used_host(cluster)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
|
||||||
|
cluster_object = find_cluster(cluster)
|
||||||
|
target_hosts = get_cluster_host_utilization(cluster_object)
|
||||||
|
least_used_host = target_hosts.sort[0][1]
|
||||||
|
least_used_host
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_cluster(cluster)
|
||||||
|
datacenter = @connection.serviceInstance.find_datacenter
|
||||||
|
datacenter.hostFolder.children.find { |cluster_object| cluster_object.name == cluster }
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_cluster_host_utilization(cluster)
|
||||||
|
cluster_hosts = []
|
||||||
|
cluster.host.each do |host|
|
||||||
|
host_usage = get_host_utilization(host)
|
||||||
|
cluster_hosts << host_usage if host_usage
|
||||||
|
end
|
||||||
|
cluster_hosts
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_least_used_compatible_host(vm)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
|
||||||
|
source_host = vm.summary.runtime.host
|
||||||
|
model = get_host_cpu_arch_version(source_host)
|
||||||
|
cluster = source_host.parent
|
||||||
|
target_hosts = []
|
||||||
|
cluster.host.each do |host|
|
||||||
|
host_usage = get_host_utilization(host, model)
|
||||||
|
target_hosts << host_usage if host_usage
|
||||||
|
end
|
||||||
|
target_host = target_hosts.sort[0][1]
|
||||||
|
[target_host, target_host.name]
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_pool(poolname)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
|
||||||
|
datacenter = @connection.serviceInstance.find_datacenter
|
||||||
|
base = datacenter.hostFolder
|
||||||
|
pools = poolname.split('/')
|
||||||
|
pools.each do |pool|
|
||||||
|
case base
|
||||||
|
when RbVmomi::VIM::Folder
|
||||||
|
base = base.childEntity.find { |f| f.name == pool }
|
||||||
|
when RbVmomi::VIM::ClusterComputeResource
|
||||||
|
base = base.resourcePool.resourcePool.find { |f| f.name == pool }
|
||||||
|
when RbVmomi::VIM::ResourcePool
|
||||||
|
base = base.resourcePool.find { |f| f.name == pool }
|
||||||
|
else
|
||||||
|
abort "Unexpected object type encountered (#{base.class}) while finding resource pool"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
base = base.resourcePool unless base.is_a?(RbVmomi::VIM::ResourcePool) && base.respond_to?(:resourcePool)
|
||||||
|
base
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_snapshot(vm, snapshotname)
|
||||||
|
if vm.snapshot
|
||||||
|
get_snapshot_list(vm.snapshot.rootSnapshotList, snapshotname)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_vm(vmname)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
find_vm_light(vmname) || find_vm_heavy(vmname)[vmname]
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_vm_light(vmname)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
|
||||||
|
@connection.searchIndex.FindByDnsName(vmSearch: true, dnsName: vmname)
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_vm_heavy(vmname)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
|
||||||
|
vmname = vmname.is_a?(Array) ? vmname : [vmname]
|
||||||
|
containerView = get_base_vm_container_from @connection
|
||||||
|
propertyCollector = @connection.propertyCollector
|
||||||
|
|
||||||
|
objectSet = [{
|
||||||
|
obj: containerView,
|
||||||
|
skip: true,
|
||||||
|
selectSet: [RbVmomi::VIM::TraversalSpec.new(
|
||||||
|
name: 'gettingTheVMs',
|
||||||
|
path: 'view',
|
||||||
|
skip: false,
|
||||||
|
type: 'ContainerView'
|
||||||
|
)]
|
||||||
|
}]
|
||||||
|
|
||||||
|
propSet = [{
|
||||||
|
pathSet: ['name'],
|
||||||
|
type: 'VirtualMachine'
|
||||||
|
}]
|
||||||
|
|
||||||
|
results = propertyCollector.RetrievePropertiesEx(
|
||||||
|
specSet: [{
|
||||||
|
objectSet: objectSet,
|
||||||
|
propSet: propSet
|
||||||
|
}],
|
||||||
|
options: { maxObjects: nil }
|
||||||
|
)
|
||||||
|
|
||||||
|
vms = {}
|
||||||
|
results.objects.each do |result|
|
||||||
|
name = result.propSet.first.val
|
||||||
|
next unless vmname.include? name
|
||||||
|
vms[name] = result.obj
|
||||||
|
end
|
||||||
|
|
||||||
|
while results.token
|
||||||
|
results = propertyCollector.ContinueRetrievePropertiesEx(token: results.token)
|
||||||
|
results.objects.each do |result|
|
||||||
|
name = result.propSet.first.val
|
||||||
|
next unless vmname.include? name
|
||||||
|
vms[name] = result.obj
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
vms
|
||||||
|
end
|
||||||
|
|
||||||
|
def find_vmdks(vmname, datastore)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
|
||||||
|
disks = []
|
||||||
|
|
||||||
|
vmdk_datastore = find_datastore(datastore)
|
||||||
|
|
||||||
|
vm_files = vmdk_datastore._connection.serviceContent.propertyCollector.collectMultiple vmdk_datastore.vm, 'layoutEx.file'
|
||||||
|
vm_files.keys.each do |f|
|
||||||
|
vm_files[f]['layoutEx.file'].each do |l|
|
||||||
|
if l.name.match(/^\[#{vmdk_datastore.name}\] #{vmname}\/#{vmname}_([0-9]+).vmdk/)
|
||||||
|
disks.push(l)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
disks
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_base_vm_container_from(connection)
|
||||||
|
ensure_connected @connection, $credentials
|
||||||
|
|
||||||
|
viewManager = connection.serviceContent.viewManager
|
||||||
|
viewManager.CreateContainerView(
|
||||||
|
container: connection.serviceContent.rootFolder,
|
||||||
|
recursive: true,
|
||||||
|
type: ['VirtualMachine']
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_snapshot_list(tree, snapshotname)
|
||||||
|
snapshot = nil
|
||||||
|
|
||||||
|
tree.each do |child|
|
||||||
|
if child.name == snapshotname
|
||||||
|
snapshot ||= child.snapshot
|
||||||
|
else
|
||||||
|
snapshot ||= get_snapshot_list(child.childSnapshotList, snapshotname)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
snapshot
|
||||||
|
end
|
||||||
|
|
||||||
|
def migrate_vm_host(vm, host)
|
||||||
|
relospec = RbVmomi::VIM.VirtualMachineRelocateSpec(host: host)
|
||||||
|
vm.RelocateVM_Task(spec: relospec).wait_for_completion
|
||||||
|
end
|
||||||
|
|
||||||
|
def close
|
||||||
|
@connection.close
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
module Vmpooler
|
||||||
|
class VsphereHelper
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
@ -12,51 +12,76 @@ module Vmpooler
|
||||||
# Connect to Redis
|
# Connect to Redis
|
||||||
$redis = redis
|
$redis = redis
|
||||||
|
|
||||||
# vSphere object
|
# per pool VM Backing Services
|
||||||
$vsphere = {}
|
$backing_services = {}
|
||||||
|
|
||||||
# Our thread-tracker object
|
# Our thread-tracker object
|
||||||
$threads = {}
|
$threads = {}
|
||||||
|
|
||||||
|
# WARNING DEBUG
|
||||||
|
$logger.log('d',"Flushing REDIS WARNING!!!")
|
||||||
|
$redis.flushdb
|
||||||
end
|
end
|
||||||
|
|
||||||
# Check the state of a VM
|
# Check the state of a VM
|
||||||
def check_pending_vm(vm, pool, timeout, vsphere)
|
# DONE
|
||||||
|
def check_pending_vm(vm, pool, timeout, backingservice)
|
||||||
Thread.new do
|
Thread.new do
|
||||||
_check_pending_vm(vm, pool, timeout, vsphere)
|
_check_pending_vm(vm, pool, timeout, backingservice)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def open_socket(host, domain=nil, timeout=5, port=22, &block)
|
# DONE
|
||||||
Timeout.timeout(timeout) do
|
def _check_pending_vm(vm, pool, timeout, backingservice)
|
||||||
target_host = host
|
host = backingservice.get_vm(vm)
|
||||||
target_host = "#{host}.#{domain}" if domain
|
|
||||||
sock = TCPSocket.new target_host, port
|
|
||||||
begin
|
|
||||||
yield sock if block_given?
|
|
||||||
ensure
|
|
||||||
sock.close
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def _check_pending_vm(vm, pool, timeout, vsphere)
|
|
||||||
host = vsphere.find_vm(vm)
|
|
||||||
|
|
||||||
if ! host
|
if ! host
|
||||||
fail_pending_vm(vm, pool, timeout, false)
|
fail_pending_vm(vm, pool, timeout, false)
|
||||||
return
|
return
|
||||||
end
|
end
|
||||||
open_socket vm
|
if backingservice.is_vm_ready?(vm,pool,timeout)
|
||||||
move_pending_vm_to_ready(vm, pool, host)
|
move_pending_vm_to_ready(vm, pool, host)
|
||||||
rescue
|
else
|
||||||
|
fail "VM is not ready"
|
||||||
|
end
|
||||||
|
rescue => err
|
||||||
|
$logger.log('s', "[!] [#{pool}] '#{vm}' errored while checking a pending vm : #{err}")
|
||||||
fail_pending_vm(vm, pool, timeout)
|
fail_pending_vm(vm, pool, timeout)
|
||||||
|
raise
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# def open_socket(host, domain=nil, timeout=5, port=22, &block)
|
||||||
|
# Timeout.timeout(timeout) do
|
||||||
|
# target_host = host
|
||||||
|
# target_host = "#{host}.#{domain}" if domain
|
||||||
|
# sock = TCPSocket.new target_host, port
|
||||||
|
# begin
|
||||||
|
# yield sock if block_given?
|
||||||
|
# ensure
|
||||||
|
# sock.close
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
|
||||||
|
# def _check_pending_vm(vm, pool, timeout, vsphere)
|
||||||
|
# host = vsphere.find_vm(vm)
|
||||||
|
|
||||||
|
# if ! host
|
||||||
|
# fail_pending_vm(vm, pool, timeout, false)
|
||||||
|
# return
|
||||||
|
# end
|
||||||
|
# open_socket vm
|
||||||
|
# move_pending_vm_to_ready(vm, pool, host)
|
||||||
|
# rescue
|
||||||
|
# fail_pending_vm(vm, pool, timeout)
|
||||||
|
# end
|
||||||
|
|
||||||
|
# DONE
|
||||||
def remove_nonexistent_vm(vm, pool)
|
def remove_nonexistent_vm(vm, pool)
|
||||||
$redis.srem("vmpooler__pending__#{pool}", vm)
|
$redis.srem("vmpooler__pending__#{pool}", vm)
|
||||||
$logger.log('d', "[!] [#{pool}] '#{vm}' no longer exists. Removing from pending.")
|
$logger.log('d', "[!] [#{pool}] '#{vm}' no longer exists. Removing from pending.")
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# DONE
|
||||||
def fail_pending_vm(vm, pool, timeout, exists=true)
|
def fail_pending_vm(vm, pool, timeout, exists=true)
|
||||||
clone_stamp = $redis.hget("vmpooler__vm__#{vm}", 'clone')
|
clone_stamp = $redis.hget("vmpooler__vm__#{vm}", 'clone')
|
||||||
return if ! clone_stamp
|
return if ! clone_stamp
|
||||||
|
|
@ -74,14 +99,11 @@ module Vmpooler
|
||||||
$logger.log('d', "Fail pending VM failed with an error: #{err}")
|
$logger.log('d', "Fail pending VM failed with an error: #{err}")
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# DONE
|
||||||
def move_pending_vm_to_ready(vm, pool, host)
|
def move_pending_vm_to_ready(vm, pool, host)
|
||||||
if (host.summary) &&
|
if host['hostname'] == vm
|
||||||
(host.summary.guest) &&
|
|
||||||
(host.summary.guest.hostName) &&
|
|
||||||
(host.summary.guest.hostName == vm)
|
|
||||||
|
|
||||||
begin
|
begin
|
||||||
Socket.getaddrinfo(vm, nil) # WTF?
|
Socket.getaddrinfo(vm, nil) # WTF? I assume this is just priming the local DNS resolver cache?!?!
|
||||||
rescue
|
rescue
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -91,58 +113,60 @@ module Vmpooler
|
||||||
$redis.smove('vmpooler__pending__' + pool, 'vmpooler__ready__' + pool, vm)
|
$redis.smove('vmpooler__pending__' + pool, 'vmpooler__ready__' + pool, vm)
|
||||||
$redis.hset('vmpooler__boot__' + Date.today.to_s, pool + ':' + vm, finish)
|
$redis.hset('vmpooler__boot__' + Date.today.to_s, pool + ':' + vm, finish)
|
||||||
|
|
||||||
$logger.log('s', "[>] [#{pool}] '#{vm}' moved to 'ready' queue")
|
$logger.log('s', "[>] [#{pool}] '#{vm}' moved from 'pending' to 'ready' queue")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def check_ready_vm(vm, pool, ttl, vsphere)
|
# DONE
|
||||||
|
def check_ready_vm(vm, pool, ttl, backingservice)
|
||||||
Thread.new do
|
Thread.new do
|
||||||
|
_check_ready_vm(vm, pool, ttl, backingservice)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# DONE
|
||||||
|
def _check_ready_vm(vm, pool, ttl, backingservice)
|
||||||
|
host = backingservice.get_vm(vm)
|
||||||
|
# Check if the host even exists
|
||||||
|
if !host
|
||||||
|
$redis.srem('vmpooler__ready__' + pool, vm)
|
||||||
|
$logger.log('s', "[!] [#{pool}] '#{vm}' not found in inventory for pool #{pool}, removed from 'ready' queue")
|
||||||
|
return
|
||||||
|
end
|
||||||
|
|
||||||
|
# Check if the hosts TTL has expired
|
||||||
if ttl > 0
|
if ttl > 0
|
||||||
if (((Time.now - host.runtime.bootTime) / 60).to_s[/^\d+\.\d{1}/].to_f) > ttl
|
if (((Time.now - host['boottime']) / 60).to_s[/^\d+\.\d{1}/].to_f) > ttl
|
||||||
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
||||||
|
|
||||||
$logger.log('d', "[!] [#{pool}] '#{vm}' reached end of TTL after #{ttl} minutes, removed from 'ready' queue")
|
$logger.log('d', "[!] [#{pool}] '#{vm}' reached end of TTL after #{ttl} minutes, removed from 'ready' queue")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# Periodically check that the VM is available
|
||||||
check_stamp = $redis.hget('vmpooler__vm__' + vm, 'check')
|
check_stamp = $redis.hget('vmpooler__vm__' + vm, 'check')
|
||||||
|
|
||||||
if
|
if
|
||||||
(!check_stamp) ||
|
(!check_stamp) ||
|
||||||
(((Time.now - Time.parse(check_stamp)) / 60) > $config[:config]['vm_checktime'])
|
(((Time.now - Time.parse(check_stamp)) / 60) > $config[:config]['vm_checktime'])
|
||||||
|
|
||||||
$redis.hset('vmpooler__vm__' + vm, 'check', Time.now)
|
$redis.hset('vmpooler__vm__' + vm, 'check', Time.now)
|
||||||
|
|
||||||
host = vsphere.find_vm(vm)
|
# Check if the VM is not powered on
|
||||||
|
|
||||||
if host
|
|
||||||
if
|
if
|
||||||
(host.runtime) &&
|
(host['powerstate'] != 'PoweredOn')
|
||||||
(host.runtime.powerState) &&
|
|
||||||
(host.runtime.powerState != 'poweredOn')
|
|
||||||
|
|
||||||
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
||||||
|
|
||||||
$logger.log('d', "[!] [#{pool}] '#{vm}' appears to be powered off, removed from 'ready' queue")
|
$logger.log('d', "[!] [#{pool}] '#{vm}' appears to be powered off, removed from 'ready' queue")
|
||||||
end
|
end
|
||||||
|
|
||||||
if
|
# Check if the hostname has magically changed from underneath Pooler
|
||||||
(host.summary.guest) &&
|
if (host['hostname'] != vm)
|
||||||
(host.summary.guest.hostName) &&
|
|
||||||
(host.summary.guest.hostName != vm)
|
|
||||||
|
|
||||||
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
$redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
||||||
|
|
||||||
$logger.log('d', "[!] [#{pool}] '#{vm}' has mismatched hostname, removed from 'ready' queue")
|
$logger.log('d', "[!] [#{pool}] '#{vm}' has mismatched hostname, removed from 'ready' queue")
|
||||||
end
|
end
|
||||||
else
|
|
||||||
$redis.srem('vmpooler__ready__' + pool, vm)
|
|
||||||
|
|
||||||
$logger.log('s', "[!] [#{pool}] '#{vm}' not found in vCenter inventory, removed from 'ready' queue")
|
|
||||||
end
|
|
||||||
|
|
||||||
|
# Check if the VM is still ready/available
|
||||||
begin
|
begin
|
||||||
open_socket vm
|
fail "VM #{vm} is not ready" unless backingservice.is_vm_ready?(vm,pool,5)
|
||||||
rescue
|
rescue
|
||||||
if $redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
if $redis.smove('vmpooler__ready__' + pool, 'vmpooler__completed__' + pool, vm)
|
||||||
$logger.log('d', "[!] [#{pool}] '#{vm}' is unreachable, removed from 'ready' queue")
|
$logger.log('d', "[!] [#{pool}] '#{vm}' is unreachable, removed from 'ready' queue")
|
||||||
|
|
@ -151,17 +175,21 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
rescue => err
|
||||||
|
$logger.log('s', "[!] [#{vm['poolname']}] '#{vm['hostname']}' failed while checking a ready vm : #{err}")
|
||||||
|
raise
|
||||||
end
|
end
|
||||||
|
|
||||||
def check_running_vm(vm, pool, ttl, vsphere)
|
# DONE
|
||||||
|
def check_running_vm(vm, pool, ttl, backingservice)
|
||||||
Thread.new do
|
Thread.new do
|
||||||
_check_running_vm(vm, pool, ttl, vsphere)
|
_check_running_vm(vm, pool, ttl, backingservice)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def _check_running_vm(vm, pool, ttl, vsphere)
|
# DONE
|
||||||
host = vsphere.find_vm(vm)
|
def _check_running_vm(vm, pool, ttl, backingservice)
|
||||||
|
host = backingservice.get_vm(vm)
|
||||||
|
|
||||||
if host
|
if host
|
||||||
queue_from, queue_to = 'running', 'completed'
|
queue_from, queue_to = 'running', 'completed'
|
||||||
|
|
@ -179,114 +207,131 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# DONE
|
||||||
def move_vm_queue(pool, vm, queue_from, queue_to, msg)
|
def move_vm_queue(pool, vm, queue_from, queue_to, msg)
|
||||||
$redis.smove("vmpooler__#{queue_from}__#{pool}", "vmpooler__#{queue_to}__#{pool}", vm)
|
$redis.smove("vmpooler__#{queue_from}__#{pool}", "vmpooler__#{queue_to}__#{pool}", vm)
|
||||||
$logger.log('d', "[!] [#{pool}] '#{vm}' #{msg}")
|
$logger.log('d', "[!] [#{pool}] '#{vm}' #{msg}")
|
||||||
end
|
end
|
||||||
|
|
||||||
# Clone a VM
|
# DONE
|
||||||
def clone_vm(template, folder, datastore, target, vsphere)
|
def clone_vm(pool, backingservice)
|
||||||
Thread.new do
|
Thread.new do
|
||||||
begin
|
backingservice.create_vm(pool)
|
||||||
vm = {}
|
end
|
||||||
|
|
||||||
if template =~ /\//
|
|
||||||
templatefolders = template.split('/')
|
|
||||||
vm['template'] = templatefolders.pop
|
|
||||||
end
|
end
|
||||||
|
|
||||||
if templatefolders
|
# Clone a VM
|
||||||
vm[vm['template']] = vsphere.find_folder(templatefolders.join('/')).find(vm['template'])
|
# def clone_vm(template, folder, datastore, target, vsphere)
|
||||||
else
|
# Thread.new do
|
||||||
fail 'Please provide a full path to the template'
|
# begin
|
||||||
end
|
# vm = {}
|
||||||
|
|
||||||
if vm['template'].length == 0
|
# if template =~ /\//
|
||||||
fail "Unable to find template '#{vm['template']}'!"
|
# templatefolders = template.split('/')
|
||||||
end
|
# vm['template'] = templatefolders.pop
|
||||||
|
# end
|
||||||
|
|
||||||
# Generate a randomized hostname
|
# if templatefolders
|
||||||
o = [('a'..'z'), ('0'..'9')].map(&:to_a).flatten
|
# vm[vm['template']] = vsphere.find_folder(templatefolders.join('/')).find(vm['template'])
|
||||||
vm['hostname'] = $config[:config]['prefix'] + o[rand(25)] + (0...14).map { o[rand(o.length)] }.join
|
# else
|
||||||
|
# fail 'Please provide a full path to the template'
|
||||||
|
# end
|
||||||
|
|
||||||
# Add VM to Redis inventory ('pending' pool)
|
# if vm['template'].length == 0
|
||||||
$redis.sadd('vmpooler__pending__' + vm['template'], vm['hostname'])
|
# fail "Unable to find template '#{vm['template']}'!"
|
||||||
$redis.hset('vmpooler__vm__' + vm['hostname'], 'clone', Time.now)
|
# end
|
||||||
$redis.hset('vmpooler__vm__' + vm['hostname'], 'template', vm['template'])
|
|
||||||
|
|
||||||
# Annotate with creation time, origin template, etc.
|
# # Generate a randomized hostname
|
||||||
# Add extraconfig options that can be queried by vmtools
|
# o = [('a'..'z'), ('0'..'9')].map(&:to_a).flatten
|
||||||
configSpec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
# vm['hostname'] = $config[:config]['prefix'] + o[rand(25)] + (0...14).map { o[rand(o.length)] }.join
|
||||||
annotation: JSON.pretty_generate(
|
|
||||||
name: vm['hostname'],
|
|
||||||
created_by: $config[:vsphere]['username'],
|
|
||||||
base_template: vm['template'],
|
|
||||||
creation_timestamp: Time.now.utc
|
|
||||||
),
|
|
||||||
extraConfig: [
|
|
||||||
{ key: 'guestinfo.hostname',
|
|
||||||
value: vm['hostname']
|
|
||||||
}
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
# Choose a clone target
|
# # Add VM to Redis inventory ('pending' pool)
|
||||||
if target
|
# $redis.sadd('vmpooler__pending__' + vm['template'], vm['hostname'])
|
||||||
$clone_target = vsphere.find_least_used_host(target)
|
# $redis.hset('vmpooler__vm__' + vm['hostname'], 'clone', Time.now)
|
||||||
elsif $config[:config]['clone_target']
|
# $redis.hset('vmpooler__vm__' + vm['hostname'], 'template', vm['template'])
|
||||||
$clone_target = vsphere.find_least_used_host($config[:config]['clone_target'])
|
|
||||||
end
|
|
||||||
|
|
||||||
# Put the VM in the specified folder and resource pool
|
# # Annotate with creation time, origin template, etc.
|
||||||
relocateSpec = RbVmomi::VIM.VirtualMachineRelocateSpec(
|
# # Add extraconfig options that can be queried by vmtools
|
||||||
datastore: vsphere.find_datastore(datastore),
|
# configSpec = RbVmomi::VIM.VirtualMachineConfigSpec(
|
||||||
host: $clone_target,
|
# annotation: JSON.pretty_generate(
|
||||||
diskMoveType: :moveChildMostDiskBacking
|
# name: vm['hostname'],
|
||||||
)
|
# created_by: $config[:vsphere]['username'],
|
||||||
|
# base_template: vm['template'],
|
||||||
|
# creation_timestamp: Time.now.utc
|
||||||
|
# ),
|
||||||
|
# extraConfig: [
|
||||||
|
# { key: 'guestinfo.hostname',
|
||||||
|
# value: vm['hostname']
|
||||||
|
# }
|
||||||
|
# ]
|
||||||
|
# )
|
||||||
|
|
||||||
# Create a clone spec
|
# # Choose a clone target
|
||||||
spec = RbVmomi::VIM.VirtualMachineCloneSpec(
|
# if target
|
||||||
location: relocateSpec,
|
# $clone_target = vsphere.find_least_used_host(target)
|
||||||
config: configSpec,
|
# elsif $config[:config]['clone_target']
|
||||||
powerOn: true,
|
# $clone_target = vsphere.find_least_used_host($config[:config]['clone_target'])
|
||||||
template: false
|
# end
|
||||||
)
|
|
||||||
|
|
||||||
# Clone the VM
|
# # Put the VM in the specified folder and resource pool
|
||||||
$logger.log('d', "[ ] [#{vm['template']}] '#{vm['hostname']}' is being cloned from '#{vm['template']}'")
|
# relocateSpec = RbVmomi::VIM.VirtualMachineRelocateSpec(
|
||||||
|
# datastore: vsphere.find_datastore(datastore),
|
||||||
|
# host: $clone_target,
|
||||||
|
# diskMoveType: :moveChildMostDiskBacking
|
||||||
|
# )
|
||||||
|
|
||||||
begin
|
# # Create a clone spec
|
||||||
start = Time.now
|
# spec = RbVmomi::VIM.VirtualMachineCloneSpec(
|
||||||
vm[vm['template']].CloneVM_Task(
|
# location: relocateSpec,
|
||||||
folder: vsphere.find_folder(folder),
|
# config: configSpec,
|
||||||
name: vm['hostname'],
|
# powerOn: true,
|
||||||
spec: spec
|
# template: false
|
||||||
).wait_for_completion
|
# )
|
||||||
finish = '%.2f' % (Time.now - start)
|
|
||||||
|
|
||||||
$redis.hset('vmpooler__clone__' + Date.today.to_s, vm['template'] + ':' + vm['hostname'], finish)
|
# # Clone the VM
|
||||||
$redis.hset('vmpooler__vm__' + vm['hostname'], 'clone_time', finish)
|
# $logger.log('d', "[ ] [#{vm['template']}] '#{vm['hostname']}' is being cloned from '#{vm['template']}'")
|
||||||
|
|
||||||
$logger.log('s', "[+] [#{vm['template']}] '#{vm['hostname']}' cloned from '#{vm['template']}' in #{finish} seconds")
|
# begin
|
||||||
rescue => err
|
# start = Time.now
|
||||||
$logger.log('s', "[!] [#{vm['template']}] '#{vm['hostname']}' clone failed with an error: #{err}")
|
# vm[vm['template']].CloneVM_Task(
|
||||||
$redis.srem('vmpooler__pending__' + vm['template'], vm['hostname'])
|
# folder: vsphere.find_folder(folder),
|
||||||
raise
|
# name: vm['hostname'],
|
||||||
end
|
# spec: spec
|
||||||
|
# ).wait_for_completion
|
||||||
|
# finish = '%.2f' % (Time.now - start)
|
||||||
|
|
||||||
$redis.decr('vmpooler__tasks__clone')
|
# $redis.hset('vmpooler__clone__' + Date.today.to_s, vm['template'] + ':' + vm['hostname'], finish)
|
||||||
|
# $redis.hset('vmpooler__vm__' + vm['hostname'], 'clone_time', finish)
|
||||||
|
|
||||||
$metrics.timing("clone.#{vm['template']}", finish)
|
# $logger.log('s', "[+] [#{vm['template']}] '#{vm['hostname']}' cloned from '#{vm['template']}' in #{finish} seconds")
|
||||||
rescue => err
|
# rescue => err
|
||||||
$logger.log('s', "[!] [#{vm['template']}] '#{vm['hostname']}' failed while preparing to clone with an error: #{err}")
|
# $logger.log('s', "[!] [#{vm['template']}] '#{vm['hostname']}' clone failed with an error: #{err}")
|
||||||
raise
|
# $redis.srem('vmpooler__pending__' + vm['template'], vm['hostname'])
|
||||||
end
|
# raise
|
||||||
|
# end
|
||||||
|
|
||||||
|
# $redis.decr('vmpooler__tasks__clone')
|
||||||
|
|
||||||
|
# $metrics.timing("clone.#{vm['template']}", finish)
|
||||||
|
# rescue => err
|
||||||
|
# $logger.log('s', "[!] [#{vm['template']}] '#{vm['hostname']}' failed while preparing to clone with an error: #{err}")
|
||||||
|
# raise
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
|
||||||
|
# Destroy a VM
|
||||||
|
# DONE
|
||||||
|
# TODO These calls should wrap the rescue block, not inside. This traps bad functions. Need to modify all functions
|
||||||
|
def destroy_vm(vm, pool, backingservice)
|
||||||
|
Thread.new do
|
||||||
|
_destroy_vm(vm, pool, backingservice)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# Destroy a VM
|
# Destroy a VM
|
||||||
def destroy_vm(vm, pool, vsphere)
|
# DONE
|
||||||
Thread.new do
|
def _destroy_vm(vm, pool, backingservice)
|
||||||
$redis.srem('vmpooler__completed__' + pool, vm)
|
$redis.srem('vmpooler__completed__' + pool, vm)
|
||||||
$redis.hdel('vmpooler__active__' + pool, vm)
|
$redis.hdel('vmpooler__active__' + pool, vm)
|
||||||
$redis.hset('vmpooler__vm__' + vm, 'destroy', Time.now)
|
$redis.hset('vmpooler__vm__' + vm, 'destroy', Time.now)
|
||||||
|
|
@ -294,36 +339,21 @@ module Vmpooler
|
||||||
# Auto-expire metadata key
|
# Auto-expire metadata key
|
||||||
$redis.expire('vmpooler__vm__' + vm, ($config[:redis]['data_ttl'].to_i * 60 * 60))
|
$redis.expire('vmpooler__vm__' + vm, ($config[:redis]['data_ttl'].to_i * 60 * 60))
|
||||||
|
|
||||||
host = vsphere.find_vm(vm)
|
backingservice.destroy_vm(vm,pool)
|
||||||
|
rescue => err
|
||||||
if host
|
$logger.log('d', "[!] [#{pool}] '#{vm}' failed while destroying the VM with an error: #{err}")
|
||||||
start = Time.now
|
raise
|
||||||
|
|
||||||
if
|
|
||||||
(host.runtime) &&
|
|
||||||
(host.runtime.powerState) &&
|
|
||||||
(host.runtime.powerState == 'poweredOn')
|
|
||||||
|
|
||||||
$logger.log('d', "[ ] [#{pool}] '#{vm}' is being shut down")
|
|
||||||
host.PowerOffVM_Task.wait_for_completion
|
|
||||||
end
|
|
||||||
|
|
||||||
host.Destroy_Task.wait_for_completion
|
|
||||||
finish = '%.2f' % (Time.now - start)
|
|
||||||
|
|
||||||
$logger.log('s', "[-] [#{pool}] '#{vm}' destroyed in #{finish} seconds")
|
|
||||||
$metrics.timing("destroy.#{pool}", finish)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def create_vm_disk(vm, disk_size, vsphere)
|
def create_vm_disk(vm, disk_size, vsphere)
|
||||||
|
# TODO This is all vSphere specific
|
||||||
Thread.new do
|
Thread.new do
|
||||||
_create_vm_disk(vm, disk_size, vsphere)
|
_create_vm_disk(vm, disk_size, vsphere)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def _create_vm_disk(vm, disk_size, vsphere)
|
def _create_vm_disk(vm, disk_size, vsphere)
|
||||||
|
# TODO This is all vSphere specific
|
||||||
host = vsphere.find_vm(vm)
|
host = vsphere.find_vm(vm)
|
||||||
|
|
||||||
if (host) && ((! disk_size.nil?) && (! disk_size.empty?) && (disk_size.to_i > 0))
|
if (host) && ((! disk_size.nil?) && (! disk_size.empty?) && (disk_size.to_i > 0))
|
||||||
|
|
@ -358,12 +388,14 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
def create_vm_snapshot(vm, snapshot_name, vsphere)
|
def create_vm_snapshot(vm, snapshot_name, vsphere)
|
||||||
|
# TODO This is all vSphere specific
|
||||||
Thread.new do
|
Thread.new do
|
||||||
_create_vm_snapshot(vm, snapshot_name, vsphere)
|
_create_vm_snapshot(vm, snapshot_name, vsphere)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def _create_vm_snapshot(vm, snapshot_name, vsphere)
|
def _create_vm_snapshot(vm, snapshot_name, vsphere)
|
||||||
|
# TODO This is all vSphere specific
|
||||||
host = vsphere.find_vm(vm)
|
host = vsphere.find_vm(vm)
|
||||||
|
|
||||||
if (host) && ((! snapshot_name.nil?) && (! snapshot_name.empty?))
|
if (host) && ((! snapshot_name.nil?) && (! snapshot_name.empty?))
|
||||||
|
|
@ -387,12 +419,14 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
def revert_vm_snapshot(vm, snapshot_name, vsphere)
|
def revert_vm_snapshot(vm, snapshot_name, vsphere)
|
||||||
|
# TODO This is all vSphere specific
|
||||||
Thread.new do
|
Thread.new do
|
||||||
_revert_vm_snapshot(vm, snapshot_name, vsphere)
|
_revert_vm_snapshot(vm, snapshot_name, vsphere)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def _revert_vm_snapshot(vm, snapshot_name, vsphere)
|
def _revert_vm_snapshot(vm, snapshot_name, vsphere)
|
||||||
|
# TODO This is all vSphere specific
|
||||||
host = vsphere.find_vm(vm)
|
host = vsphere.find_vm(vm)
|
||||||
|
|
||||||
if host
|
if host
|
||||||
|
|
@ -413,6 +447,7 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
def check_disk_queue
|
def check_disk_queue
|
||||||
|
# TODO This is all vSphere specific
|
||||||
$logger.log('d', "[*] [disk_manager] starting worker thread")
|
$logger.log('d', "[*] [disk_manager] starting worker thread")
|
||||||
|
|
||||||
$vsphere['disk_manager'] ||= Vmpooler::VsphereHelper.new $config, $metrics
|
$vsphere['disk_manager'] ||= Vmpooler::VsphereHelper.new $config, $metrics
|
||||||
|
|
@ -426,6 +461,7 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
def _check_disk_queue(vsphere)
|
def _check_disk_queue(vsphere)
|
||||||
|
# TODO This is all vSphere specific
|
||||||
vm = $redis.spop('vmpooler__tasks__disk')
|
vm = $redis.spop('vmpooler__tasks__disk')
|
||||||
|
|
||||||
unless vm.nil?
|
unless vm.nil?
|
||||||
|
|
@ -439,6 +475,7 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
def check_snapshot_queue
|
def check_snapshot_queue
|
||||||
|
# TODO This is all vSphere specific
|
||||||
$logger.log('d', "[*] [snapshot_manager] starting worker thread")
|
$logger.log('d', "[*] [snapshot_manager] starting worker thread")
|
||||||
|
|
||||||
$vsphere['snapshot_manager'] ||= Vmpooler::VsphereHelper.new $config, $metrics
|
$vsphere['snapshot_manager'] ||= Vmpooler::VsphereHelper.new $config, $metrics
|
||||||
|
|
@ -452,6 +489,7 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
def _check_snapshot_queue(vsphere)
|
def _check_snapshot_queue(vsphere)
|
||||||
|
# TODO This is all vSphere specific
|
||||||
vm = $redis.spop('vmpooler__tasks__snapshot')
|
vm = $redis.spop('vmpooler__tasks__snapshot')
|
||||||
|
|
||||||
unless vm.nil?
|
unless vm.nil?
|
||||||
|
|
@ -475,23 +513,26 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# DONE
|
||||||
def migration_limit(migration_limit)
|
def migration_limit(migration_limit)
|
||||||
# Returns migration_limit setting when enabled
|
# Returns migration_limit setting when enabled
|
||||||
return false if migration_limit == 0 || ! migration_limit
|
return false if migration_limit == 0 || ! migration_limit
|
||||||
migration_limit if migration_limit >= 1
|
migration_limit if migration_limit >= 1
|
||||||
end
|
end
|
||||||
|
|
||||||
def migrate_vm(vm, pool, vsphere)
|
# DONE
|
||||||
|
def migrate_vm(vm, pool, backingservice)
|
||||||
Thread.new do
|
Thread.new do
|
||||||
_migrate_vm(vm, pool, vsphere)
|
_migrate_vm(vm, pool, backingservice)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def _migrate_vm(vm, pool, vsphere)
|
# DONE
|
||||||
|
def _migrate_vm(vm, pool, backingservice)
|
||||||
begin
|
begin
|
||||||
$redis.srem('vmpooler__migrating__' + pool, vm)
|
$redis.srem('vmpooler__migrating__' + pool, vm)
|
||||||
vm_object = vsphere.find_vm(vm)
|
|
||||||
parent_host, parent_host_name = get_vm_host_info(vm_object)
|
parent_host_name = backingservice.get_vm_host(vm)
|
||||||
migration_limit = migration_limit $config[:config]['migration_limit']
|
migration_limit = migration_limit $config[:config]['migration_limit']
|
||||||
migration_count = $redis.scard('vmpooler__migration')
|
migration_count = $redis.scard('vmpooler__migration')
|
||||||
|
|
||||||
|
|
@ -504,11 +545,11 @@ module Vmpooler
|
||||||
return
|
return
|
||||||
else
|
else
|
||||||
$redis.sadd('vmpooler__migration', vm)
|
$redis.sadd('vmpooler__migration', vm)
|
||||||
host, host_name = vsphere.find_least_used_compatible_host(vm_object)
|
host_name = backingservice.find_least_used_compatible_host(vm)
|
||||||
if host == parent_host
|
if host_name == parent_host_name
|
||||||
$logger.log('s', "[ ] [#{pool}] No migration required for '#{vm}' running on #{parent_host_name}")
|
$logger.log('s', "[ ] [#{pool}] No migration required for '#{vm}' running on #{parent_host_name}")
|
||||||
else
|
else
|
||||||
finish = migrate_vm_and_record_timing(vm_object, vm, pool, host, parent_host_name, host_name, vsphere)
|
finish = migrate_vm_and_record_timing(vm, pool, parent_host_name, host_name, backingservice)
|
||||||
$logger.log('s', "[>] [#{pool}] '#{vm}' migrated from #{parent_host_name} to #{host_name} in #{finish} seconds")
|
$logger.log('s', "[>] [#{pool}] '#{vm}' migrated from #{parent_host_name} to #{host_name} in #{finish} seconds")
|
||||||
end
|
end
|
||||||
remove_vmpooler_migration_vm(pool, vm)
|
remove_vmpooler_migration_vm(pool, vm)
|
||||||
|
|
@ -520,11 +561,13 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def get_vm_host_info(vm_object)
|
# TODO This is all vSphere specific
|
||||||
parent_host = vm_object.summary.runtime.host
|
# def get_vm_host_info(vm_object)
|
||||||
[parent_host, parent_host.name]
|
# parent_host = vm_object.summary.runtime.host
|
||||||
end
|
# [parent_host, parent_host.name]
|
||||||
|
# end
|
||||||
|
|
||||||
|
# DONE
|
||||||
def remove_vmpooler_migration_vm(pool, vm)
|
def remove_vmpooler_migration_vm(pool, vm)
|
||||||
begin
|
begin
|
||||||
$redis.srem('vmpooler__migration', vm)
|
$redis.srem('vmpooler__migration', vm)
|
||||||
|
|
@ -533,9 +576,10 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def migrate_vm_and_record_timing(vm_object, vm_name, pool, host, source_host_name, dest_host_name, vsphere)
|
# DONE
|
||||||
|
def migrate_vm_and_record_timing(vm_name, pool, source_host_name, dest_host_name, backingservice)
|
||||||
start = Time.now
|
start = Time.now
|
||||||
vsphere.migrate_vm_host(vm_object, host)
|
backingservice.migrate_vm_to_host(vm_name, dest_host_name)
|
||||||
finish = '%.2f' % (Time.now - start)
|
finish = '%.2f' % (Time.now - start)
|
||||||
$metrics.timing("migrate.#{pool}", finish)
|
$metrics.timing("migrate.#{pool}", finish)
|
||||||
$metrics.increment("migrate_from.#{source_host_name}")
|
$metrics.increment("migrate_from.#{source_host_name}")
|
||||||
|
|
@ -546,26 +590,35 @@ module Vmpooler
|
||||||
finish
|
finish
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# DONE
|
||||||
def check_pool(pool)
|
def check_pool(pool)
|
||||||
$logger.log('d', "[*] [#{pool['name']}] starting worker thread")
|
$logger.log('d', "[*] [#{pool['name']}] starting worker thread")
|
||||||
|
|
||||||
$vsphere[pool['name']] ||= Vmpooler::VsphereHelper.new $config, $metrics
|
case pool['backingservice']
|
||||||
|
when 'vsphere'
|
||||||
|
$backing_services[pool['name']] ||= Vmpooler::PoolManager::BackingService::Vsphere.new({ 'metrics' => $metrics}) # TODO Vmpooler::VsphereHelper.new $config[:vsphere], $metrics
|
||||||
|
when 'dummy'
|
||||||
|
$backing_services[pool['name']] ||= Vmpooler::PoolManager::BackingService::Dummy.new($config[:backingservice][:dummy])
|
||||||
|
else
|
||||||
|
$logger.log('s', "[!] backing service #{pool['backingservice']} is unknown for pool [#{pool['name']}]")
|
||||||
|
end
|
||||||
|
|
||||||
$threads[pool['name']] = Thread.new do
|
$threads[pool['name']] = Thread.new do
|
||||||
loop do
|
loop do
|
||||||
_check_pool(pool, $vsphere[pool['name']])
|
_check_pool(pool, $backing_services[pool['name']])
|
||||||
sleep(5)
|
# TODO Should this be configurable?
|
||||||
|
sleep(2) # Should be 5
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def _check_pool(pool, vsphere)
|
def _check_pool(pool,backingservice)
|
||||||
|
puts "CHECK POOL STARTING"
|
||||||
# INVENTORY
|
# INVENTORY
|
||||||
|
# DONE!!
|
||||||
inventory = {}
|
inventory = {}
|
||||||
begin
|
begin
|
||||||
base = vsphere.find_folder(pool['folder'])
|
backingservice.vms_in_pool(pool).each do |vm|
|
||||||
|
|
||||||
base.childEntity.each do |vm|
|
|
||||||
if
|
if
|
||||||
(! $redis.sismember('vmpooler__running__' + pool['name'], vm['name'])) &&
|
(! $redis.sismember('vmpooler__running__' + pool['name'], vm['name'])) &&
|
||||||
(! $redis.sismember('vmpooler__ready__' + pool['name'], vm['name'])) &&
|
(! $redis.sismember('vmpooler__ready__' + pool['name'], vm['name'])) &&
|
||||||
|
|
@ -586,11 +639,12 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
# RUNNING
|
# RUNNING
|
||||||
|
# DONE!!
|
||||||
$redis.smembers("vmpooler__running__#{pool['name']}").each do |vm|
|
$redis.smembers("vmpooler__running__#{pool['name']}").each do |vm|
|
||||||
if inventory[vm]
|
if inventory[vm]
|
||||||
begin
|
begin
|
||||||
vm_lifetime = $redis.hget('vmpooler__vm__' + vm, 'lifetime') || $config[:config]['vm_lifetime'] || 12
|
vm_lifetime = $redis.hget('vmpooler__vm__' + vm, 'lifetime') || $config[:config]['vm_lifetime'] || 12
|
||||||
check_running_vm(vm, pool['name'], vm_lifetime, vsphere)
|
check_running_vm(vm, pool['name'], vm_lifetime, backingservice)
|
||||||
rescue => err
|
rescue => err
|
||||||
$logger.log('d', "[!] [#{pool['name']}] _check_pool with an error while evaluating running VMs: #{err}")
|
$logger.log('d', "[!] [#{pool['name']}] _check_pool with an error while evaluating running VMs: #{err}")
|
||||||
end
|
end
|
||||||
|
|
@ -598,10 +652,11 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
# READY
|
# READY
|
||||||
|
# DONE!!
|
||||||
$redis.smembers("vmpooler__ready__#{pool['name']}").each do |vm|
|
$redis.smembers("vmpooler__ready__#{pool['name']}").each do |vm|
|
||||||
if inventory[vm]
|
if inventory[vm]
|
||||||
begin
|
begin
|
||||||
check_ready_vm(vm, pool['name'], pool['ready_ttl'] || 0, vsphere)
|
check_ready_vm(vm, pool['name'], pool['ready_ttl'] || 0, backingservice)
|
||||||
rescue => err
|
rescue => err
|
||||||
$logger.log('d', "[!] [#{pool['name']}] _check_pool failed with an error while evaluating ready VMs: #{err}")
|
$logger.log('d', "[!] [#{pool['name']}] _check_pool failed with an error while evaluating ready VMs: #{err}")
|
||||||
end
|
end
|
||||||
|
|
@ -609,11 +664,12 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
# PENDING
|
# PENDING
|
||||||
|
# DONE!!
|
||||||
$redis.smembers("vmpooler__pending__#{pool['name']}").each do |vm|
|
$redis.smembers("vmpooler__pending__#{pool['name']}").each do |vm|
|
||||||
pool_timeout = pool['timeout'] || $config[:config]['timeout'] || 15
|
pool_timeout = pool['timeout'] || $config[:config]['timeout'] || 15
|
||||||
if inventory[vm]
|
if inventory[vm]
|
||||||
begin
|
begin
|
||||||
check_pending_vm(vm, pool['name'], pool_timeout, vsphere)
|
check_pending_vm(vm, pool['name'], pool_timeout, backingservice)
|
||||||
rescue => err
|
rescue => err
|
||||||
$logger.log('d', "[!] [#{pool['name']}] _check_pool failed with an error while evaluating pending VMs: #{err}")
|
$logger.log('d', "[!] [#{pool['name']}] _check_pool failed with an error while evaluating pending VMs: #{err}")
|
||||||
end
|
end
|
||||||
|
|
@ -623,10 +679,11 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
# COMPLETED
|
# COMPLETED
|
||||||
|
# DONE!!
|
||||||
$redis.smembers("vmpooler__completed__#{pool['name']}").each do |vm|
|
$redis.smembers("vmpooler__completed__#{pool['name']}").each do |vm|
|
||||||
if inventory[vm]
|
if inventory[vm]
|
||||||
begin
|
begin
|
||||||
destroy_vm(vm, pool['name'], vsphere)
|
destroy_vm(vm, pool['name'], backingservice)
|
||||||
rescue => err
|
rescue => err
|
||||||
$redis.srem("vmpooler__completed__#{pool['name']}", vm)
|
$redis.srem("vmpooler__completed__#{pool['name']}", vm)
|
||||||
$redis.hdel("vmpooler__active__#{pool['name']}", vm)
|
$redis.hdel("vmpooler__active__#{pool['name']}", vm)
|
||||||
|
|
@ -642,6 +699,7 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
# DISCOVERED
|
# DISCOVERED
|
||||||
|
# DONE
|
||||||
begin
|
begin
|
||||||
$redis.smembers("vmpooler__discovered__#{pool['name']}").each do |vm|
|
$redis.smembers("vmpooler__discovered__#{pool['name']}").each do |vm|
|
||||||
%w(pending ready running completed).each do |queue|
|
%w(pending ready running completed).each do |queue|
|
||||||
|
|
@ -660,10 +718,11 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
# MIGRATIONS
|
# MIGRATIONS
|
||||||
|
# DONE
|
||||||
$redis.smembers("vmpooler__migrating__#{pool['name']}").each do |vm|
|
$redis.smembers("vmpooler__migrating__#{pool['name']}").each do |vm|
|
||||||
if inventory[vm]
|
if inventory[vm]
|
||||||
begin
|
begin
|
||||||
migrate_vm(vm, pool['name'], vsphere)
|
migrate_vm(vm, pool['name'], backingservice)
|
||||||
rescue => err
|
rescue => err
|
||||||
$logger.log('s', "[x] [#{pool['name']}] '#{vm}' failed to migrate: #{err}")
|
$logger.log('s', "[x] [#{pool['name']}] '#{vm}' failed to migrate: #{err}")
|
||||||
end
|
end
|
||||||
|
|
@ -671,6 +730,7 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
# REPOPULATE
|
# REPOPULATE
|
||||||
|
# DONE
|
||||||
ready = $redis.scard("vmpooler__ready__#{pool['name']}")
|
ready = $redis.scard("vmpooler__ready__#{pool['name']}")
|
||||||
total = $redis.scard("vmpooler__pending__#{pool['name']}") + ready
|
total = $redis.scard("vmpooler__pending__#{pool['name']}") + ready
|
||||||
|
|
||||||
|
|
@ -693,14 +753,7 @@ module Vmpooler
|
||||||
if $redis.get('vmpooler__tasks__clone').to_i < $config[:config]['task_limit'].to_i
|
if $redis.get('vmpooler__tasks__clone').to_i < $config[:config]['task_limit'].to_i
|
||||||
begin
|
begin
|
||||||
$redis.incr('vmpooler__tasks__clone')
|
$redis.incr('vmpooler__tasks__clone')
|
||||||
|
clone_vm(pool,backingservice)
|
||||||
clone_vm(
|
|
||||||
pool['template'],
|
|
||||||
pool['folder'],
|
|
||||||
pool['datastore'],
|
|
||||||
pool['clone_target'],
|
|
||||||
vsphere
|
|
||||||
)
|
|
||||||
rescue => err
|
rescue => err
|
||||||
$logger.log('s', "[!] [#{pool['name']}] clone failed during check_pool with an error: #{err}")
|
$logger.log('s', "[!] [#{pool['name']}] clone failed during check_pool with an error: #{err}")
|
||||||
$redis.decr('vmpooler__tasks__clone')
|
$redis.decr('vmpooler__tasks__clone')
|
||||||
|
|
@ -721,21 +774,26 @@ module Vmpooler
|
||||||
$redis.set('vmpooler__tasks__clone', 0)
|
$redis.set('vmpooler__tasks__clone', 0)
|
||||||
# Clear out vmpooler__migrations since stale entries may be left after a restart
|
# Clear out vmpooler__migrations since stale entries may be left after a restart
|
||||||
$redis.del('vmpooler__migration')
|
$redis.del('vmpooler__migration')
|
||||||
|
# Set default backingservice for all pools that do not have one defined
|
||||||
|
$config[:pools].each do |pool|
|
||||||
|
pool['backingservice'] = 'vsphere' if pool['backingservice'].nil?
|
||||||
|
end
|
||||||
|
|
||||||
loop do
|
loop do
|
||||||
if ! $threads['disk_manager']
|
# DEBUG TO DO
|
||||||
check_disk_queue
|
# if ! $threads['disk_manager']
|
||||||
elsif ! $threads['disk_manager'].alive?
|
# check_disk_queue
|
||||||
$logger.log('d', "[!] [disk_manager] worker thread died, restarting")
|
# elsif ! $threads['disk_manager'].alive?
|
||||||
check_disk_queue
|
# $logger.log('d', "[!] [disk_manager] worker thread died, restarting")
|
||||||
end
|
# check_disk_queue
|
||||||
|
# end
|
||||||
|
|
||||||
if ! $threads['snapshot_manager']
|
# if ! $threads['snapshot_manager']
|
||||||
check_snapshot_queue
|
# check_snapshot_queue
|
||||||
elsif ! $threads['snapshot_manager'].alive?
|
# elsif ! $threads['snapshot_manager'].alive?
|
||||||
$logger.log('d', "[!] [snapshot_manager] worker thread died, restarting")
|
# $logger.log('d', "[!] [snapshot_manager] worker thread died, restarting")
|
||||||
check_snapshot_queue
|
# check_snapshot_queue
|
||||||
end
|
# end
|
||||||
|
|
||||||
$config[:pools].each do |pool|
|
$config[:pools].each do |pool|
|
||||||
if ! $threads[pool['name']]
|
if ! $threads[pool['name']]
|
||||||
|
|
|
||||||
|
|
@ -1,417 +1,417 @@
|
||||||
require 'rubygems' unless defined?(Gem)
|
# require 'rubygems' unless defined?(Gem)
|
||||||
|
|
||||||
module Vmpooler
|
# module Vmpooler
|
||||||
class VsphereHelper
|
# class VsphereHelper
|
||||||
ADAPTER_TYPE = 'lsiLogic'
|
# ADAPTER_TYPE = 'lsiLogic'
|
||||||
DISK_TYPE = 'thin'
|
# DISK_TYPE = 'thin'
|
||||||
DISK_MODE = 'persistent'
|
# DISK_MODE = 'persistent'
|
||||||
|
|
||||||
def initialize(config, metrics)
|
# def initialize(config, metrics)
|
||||||
$credentials = config[:vsphere]
|
# $credentials = config[:vsphere]
|
||||||
$conf = config[:config]
|
# $conf = config[:config]
|
||||||
$metrics = metrics
|
# $metrics = metrics
|
||||||
end
|
# end
|
||||||
|
|
||||||
def ensure_connected(connection, credentials)
|
# def ensure_connected(connection, credentials)
|
||||||
connection.serviceInstance.CurrentTime
|
# connection.serviceInstance.CurrentTime
|
||||||
rescue
|
# rescue
|
||||||
$metrics.increment("connect.open")
|
# $metrics.increment("connect.open")
|
||||||
connect_to_vsphere $credentials
|
# connect_to_vsphere $credentials
|
||||||
end
|
# end
|
||||||
|
|
||||||
def connect_to_vsphere(credentials)
|
# def connect_to_vsphere(credentials)
|
||||||
max_tries = $conf['max_tries'] || 3
|
# max_tries = $conf['max_tries'] || 3
|
||||||
retry_factor = $conf['retry_factor'] || 10
|
# retry_factor = $conf['retry_factor'] || 10
|
||||||
try = 1
|
# try = 1
|
||||||
begin
|
# begin
|
||||||
@connection = RbVmomi::VIM.connect host: credentials['server'],
|
# @connection = RbVmomi::VIM.connect host: credentials['server'],
|
||||||
user: credentials['username'],
|
# user: credentials['username'],
|
||||||
password: credentials['password'],
|
# password: credentials['password'],
|
||||||
insecure: credentials['insecure'] || true
|
# insecure: credentials['insecure'] || true
|
||||||
$metrics.increment("connect.open")
|
# $metrics.increment("connect.open")
|
||||||
rescue => err
|
# rescue => err
|
||||||
try += 1
|
# try += 1
|
||||||
$metrics.increment("connect.fail")
|
# $metrics.increment("connect.fail")
|
||||||
raise err if try == max_tries
|
# raise err if try == max_tries
|
||||||
sleep(try * retry_factor)
|
# sleep(try * retry_factor)
|
||||||
retry
|
# retry
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|
||||||
def add_disk(vm, size, datastore)
|
# def add_disk(vm, size, datastore)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
|
|
||||||
return false unless size.to_i > 0
|
# return false unless size.to_i > 0
|
||||||
|
|
||||||
vmdk_datastore = find_datastore(datastore)
|
# vmdk_datastore = find_datastore(datastore)
|
||||||
vmdk_file_name = "#{vm['name']}/#{vm['name']}_#{find_vmdks(vm['name'], datastore).length + 1}.vmdk"
|
# vmdk_file_name = "#{vm['name']}/#{vm['name']}_#{find_vmdks(vm['name'], datastore).length + 1}.vmdk"
|
||||||
|
|
||||||
controller = find_disk_controller(vm)
|
# controller = find_disk_controller(vm)
|
||||||
|
|
||||||
vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec(
|
# vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec(
|
||||||
capacityKb: size.to_i * 1024 * 1024,
|
# capacityKb: size.to_i * 1024 * 1024,
|
||||||
adapterType: ADAPTER_TYPE,
|
# adapterType: ADAPTER_TYPE,
|
||||||
diskType: DISK_TYPE
|
# diskType: DISK_TYPE
|
||||||
)
|
# )
|
||||||
|
|
||||||
vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
|
# vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
|
||||||
datastore: vmdk_datastore,
|
# datastore: vmdk_datastore,
|
||||||
diskMode: DISK_MODE,
|
# diskMode: DISK_MODE,
|
||||||
fileName: "[#{vmdk_datastore.name}] #{vmdk_file_name}"
|
# fileName: "[#{vmdk_datastore.name}] #{vmdk_file_name}"
|
||||||
)
|
# )
|
||||||
|
|
||||||
device = RbVmomi::VIM::VirtualDisk(
|
# device = RbVmomi::VIM::VirtualDisk(
|
||||||
backing: vmdk_backing,
|
# backing: vmdk_backing,
|
||||||
capacityInKB: size.to_i * 1024 * 1024,
|
# capacityInKB: size.to_i * 1024 * 1024,
|
||||||
controllerKey: controller.key,
|
# controllerKey: controller.key,
|
||||||
key: -1,
|
# key: -1,
|
||||||
unitNumber: find_disk_unit_number(vm, controller)
|
# unitNumber: find_disk_unit_number(vm, controller)
|
||||||
)
|
# )
|
||||||
|
|
||||||
device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
|
# device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
|
||||||
device: device,
|
# device: device,
|
||||||
operation: RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add')
|
# operation: RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add')
|
||||||
)
|
# )
|
||||||
|
|
||||||
vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
|
# vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
|
||||||
deviceChange: [device_config_spec]
|
# deviceChange: [device_config_spec]
|
||||||
)
|
# )
|
||||||
|
|
||||||
@connection.serviceContent.virtualDiskManager.CreateVirtualDisk_Task(
|
# @connection.serviceContent.virtualDiskManager.CreateVirtualDisk_Task(
|
||||||
datacenter: @connection.serviceInstance.find_datacenter,
|
# datacenter: @connection.serviceInstance.find_datacenter,
|
||||||
name: "[#{vmdk_datastore.name}] #{vmdk_file_name}",
|
# name: "[#{vmdk_datastore.name}] #{vmdk_file_name}",
|
||||||
spec: vmdk_spec
|
# spec: vmdk_spec
|
||||||
).wait_for_completion
|
# ).wait_for_completion
|
||||||
|
|
||||||
vm.ReconfigVM_Task(spec: vm_config_spec).wait_for_completion
|
# vm.ReconfigVM_Task(spec: vm_config_spec).wait_for_completion
|
||||||
|
|
||||||
true
|
# true
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_datastore(datastorename)
|
# def find_datastore(datastorename)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
|
|
||||||
datacenter = @connection.serviceInstance.find_datacenter
|
# datacenter = @connection.serviceInstance.find_datacenter
|
||||||
datacenter.find_datastore(datastorename)
|
# datacenter.find_datastore(datastorename)
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_device(vm, deviceName)
|
# def find_device(vm, deviceName)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
|
|
||||||
vm.config.hardware.device.each do |device|
|
# vm.config.hardware.device.each do |device|
|
||||||
return device if device.deviceInfo.label == deviceName
|
# return device if device.deviceInfo.label == deviceName
|
||||||
end
|
# end
|
||||||
|
|
||||||
nil
|
# nil
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_disk_controller(vm)
|
# def find_disk_controller(vm)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
|
|
||||||
devices = find_disk_devices(vm)
|
# devices = find_disk_devices(vm)
|
||||||
|
|
||||||
devices.keys.sort.each do |device|
|
# devices.keys.sort.each do |device|
|
||||||
if devices[device]['children'].length < 15
|
# if devices[device]['children'].length < 15
|
||||||
return find_device(vm, devices[device]['device'].deviceInfo.label)
|
# return find_device(vm, devices[device]['device'].deviceInfo.label)
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|
||||||
nil
|
# nil
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_disk_devices(vm)
|
# def find_disk_devices(vm)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
|
|
||||||
devices = {}
|
# devices = {}
|
||||||
|
|
||||||
vm.config.hardware.device.each do |device|
|
# vm.config.hardware.device.each do |device|
|
||||||
if device.is_a? RbVmomi::VIM::VirtualSCSIController
|
# if device.is_a? RbVmomi::VIM::VirtualSCSIController
|
||||||
if devices[device.controllerKey].nil?
|
# if devices[device.controllerKey].nil?
|
||||||
devices[device.key] = {}
|
# devices[device.key] = {}
|
||||||
devices[device.key]['children'] = []
|
# devices[device.key]['children'] = []
|
||||||
end
|
# end
|
||||||
|
|
||||||
devices[device.key]['device'] = device
|
# devices[device.key]['device'] = device
|
||||||
end
|
# end
|
||||||
|
|
||||||
if device.is_a? RbVmomi::VIM::VirtualDisk
|
# if device.is_a? RbVmomi::VIM::VirtualDisk
|
||||||
if devices[device.controllerKey].nil?
|
# if devices[device.controllerKey].nil?
|
||||||
devices[device.controllerKey] = {}
|
# devices[device.controllerKey] = {}
|
||||||
devices[device.controllerKey]['children'] = []
|
# devices[device.controllerKey]['children'] = []
|
||||||
end
|
# end
|
||||||
|
|
||||||
devices[device.controllerKey]['children'].push(device)
|
# devices[device.controllerKey]['children'].push(device)
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|
||||||
devices
|
# devices
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_disk_unit_number(vm, controller)
|
# def find_disk_unit_number(vm, controller)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
|
|
||||||
used_unit_numbers = []
|
# used_unit_numbers = []
|
||||||
available_unit_numbers = []
|
# available_unit_numbers = []
|
||||||
|
|
||||||
devices = find_disk_devices(vm)
|
# devices = find_disk_devices(vm)
|
||||||
|
|
||||||
devices.keys.sort.each do |c|
|
# devices.keys.sort.each do |c|
|
||||||
next unless controller.key == devices[c]['device'].key
|
# next unless controller.key == devices[c]['device'].key
|
||||||
used_unit_numbers.push(devices[c]['device'].scsiCtlrUnitNumber)
|
# used_unit_numbers.push(devices[c]['device'].scsiCtlrUnitNumber)
|
||||||
devices[c]['children'].each do |disk|
|
# devices[c]['children'].each do |disk|
|
||||||
used_unit_numbers.push(disk.unitNumber)
|
# used_unit_numbers.push(disk.unitNumber)
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|
||||||
(0..15).each do |scsi_id|
|
# (0..15).each do |scsi_id|
|
||||||
if used_unit_numbers.grep(scsi_id).length <= 0
|
# if used_unit_numbers.grep(scsi_id).length <= 0
|
||||||
available_unit_numbers.push(scsi_id)
|
# available_unit_numbers.push(scsi_id)
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|
||||||
available_unit_numbers.sort[0]
|
# available_unit_numbers.sort[0]
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_folder(foldername)
|
# def find_folder(foldername)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
|
|
||||||
datacenter = @connection.serviceInstance.find_datacenter
|
# datacenter = @connection.serviceInstance.find_datacenter
|
||||||
base = datacenter.vmFolder
|
# base = datacenter.vmFolder
|
||||||
folders = foldername.split('/')
|
# folders = foldername.split('/')
|
||||||
folders.each do |folder|
|
# folders.each do |folder|
|
||||||
case base
|
# case base
|
||||||
when RbVmomi::VIM::Folder
|
# when RbVmomi::VIM::Folder
|
||||||
base = base.childEntity.find { |f| f.name == folder }
|
# base = base.childEntity.find { |f| f.name == folder }
|
||||||
else
|
# else
|
||||||
abort "Unexpected object type encountered (#{base.class}) while finding folder"
|
# abort "Unexpected object type encountered (#{base.class}) while finding folder"
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|
||||||
base
|
# base
|
||||||
end
|
# end
|
||||||
|
|
||||||
# Returns an array containing cumulative CPU and memory utilization of a host, and its object reference
|
# # Returns an array containing cumulative CPU and memory utilization of a host, and its object reference
|
||||||
# Params:
|
# # Params:
|
||||||
# +model+:: CPU arch version to match on
|
# # +model+:: CPU arch version to match on
|
||||||
# +limit+:: Hard limit for CPU or memory utilization beyond which a host is excluded for deployments
|
# # +limit+:: Hard limit for CPU or memory utilization beyond which a host is excluded for deployments
|
||||||
def get_host_utilization(host, model=nil, limit=90)
|
# def get_host_utilization(host, model=nil, limit=90)
|
||||||
if model
|
# if model
|
||||||
return nil unless host_has_cpu_model? host, model
|
# return nil unless host_has_cpu_model? host, model
|
||||||
end
|
# end
|
||||||
return nil if host.runtime.inMaintenanceMode
|
# return nil if host.runtime.inMaintenanceMode
|
||||||
return nil unless host.overallStatus == 'green'
|
# return nil unless host.overallStatus == 'green'
|
||||||
|
|
||||||
cpu_utilization = cpu_utilization_for host
|
# cpu_utilization = cpu_utilization_for host
|
||||||
memory_utilization = memory_utilization_for host
|
# memory_utilization = memory_utilization_for host
|
||||||
|
|
||||||
return nil if cpu_utilization > limit
|
# return nil if cpu_utilization > limit
|
||||||
return nil if memory_utilization > limit
|
# return nil if memory_utilization > limit
|
||||||
|
|
||||||
[ cpu_utilization + memory_utilization, host ]
|
# [ cpu_utilization + memory_utilization, host ]
|
||||||
end
|
# end
|
||||||
|
|
||||||
def host_has_cpu_model?(host, model)
|
# def host_has_cpu_model?(host, model)
|
||||||
get_host_cpu_arch_version(host) == model
|
# get_host_cpu_arch_version(host) == model
|
||||||
end
|
# end
|
||||||
|
|
||||||
def get_host_cpu_arch_version(host)
|
# def get_host_cpu_arch_version(host)
|
||||||
cpu_model = host.hardware.cpuPkg[0].description
|
# cpu_model = host.hardware.cpuPkg[0].description
|
||||||
cpu_model_parts = cpu_model.split()
|
# cpu_model_parts = cpu_model.split()
|
||||||
arch_version = cpu_model_parts[4]
|
# arch_version = cpu_model_parts[4]
|
||||||
arch_version
|
# arch_version
|
||||||
end
|
# end
|
||||||
|
|
||||||
def cpu_utilization_for(host)
|
# def cpu_utilization_for(host)
|
||||||
cpu_usage = host.summary.quickStats.overallCpuUsage
|
# cpu_usage = host.summary.quickStats.overallCpuUsage
|
||||||
cpu_size = host.summary.hardware.cpuMhz * host.summary.hardware.numCpuCores
|
# cpu_size = host.summary.hardware.cpuMhz * host.summary.hardware.numCpuCores
|
||||||
(cpu_usage.to_f / cpu_size.to_f) * 100
|
# (cpu_usage.to_f / cpu_size.to_f) * 100
|
||||||
end
|
# end
|
||||||
|
|
||||||
def memory_utilization_for(host)
|
# def memory_utilization_for(host)
|
||||||
memory_usage = host.summary.quickStats.overallMemoryUsage
|
# memory_usage = host.summary.quickStats.overallMemoryUsage
|
||||||
memory_size = host.summary.hardware.memorySize / 1024 / 1024
|
# memory_size = host.summary.hardware.memorySize / 1024 / 1024
|
||||||
(memory_usage.to_f / memory_size.to_f) * 100
|
# (memory_usage.to_f / memory_size.to_f) * 100
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_least_used_host(cluster)
|
# def find_least_used_host(cluster)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
|
|
||||||
cluster_object = find_cluster(cluster)
|
# cluster_object = find_cluster(cluster)
|
||||||
target_hosts = get_cluster_host_utilization(cluster_object)
|
# target_hosts = get_cluster_host_utilization(cluster_object)
|
||||||
least_used_host = target_hosts.sort[0][1]
|
# least_used_host = target_hosts.sort[0][1]
|
||||||
least_used_host
|
# least_used_host
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_cluster(cluster)
|
# def find_cluster(cluster)
|
||||||
datacenter = @connection.serviceInstance.find_datacenter
|
# datacenter = @connection.serviceInstance.find_datacenter
|
||||||
datacenter.hostFolder.children.find { |cluster_object| cluster_object.name == cluster }
|
# datacenter.hostFolder.children.find { |cluster_object| cluster_object.name == cluster }
|
||||||
end
|
# end
|
||||||
|
|
||||||
def get_cluster_host_utilization(cluster)
|
# def get_cluster_host_utilization(cluster)
|
||||||
cluster_hosts = []
|
# cluster_hosts = []
|
||||||
cluster.host.each do |host|
|
# cluster.host.each do |host|
|
||||||
host_usage = get_host_utilization(host)
|
# host_usage = get_host_utilization(host)
|
||||||
cluster_hosts << host_usage if host_usage
|
# cluster_hosts << host_usage if host_usage
|
||||||
end
|
# end
|
||||||
cluster_hosts
|
# cluster_hosts
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_least_used_compatible_host(vm)
|
# def find_least_used_compatible_host(vm)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
|
|
||||||
source_host = vm.summary.runtime.host
|
# source_host = vm.summary.runtime.host
|
||||||
model = get_host_cpu_arch_version(source_host)
|
# model = get_host_cpu_arch_version(source_host)
|
||||||
cluster = source_host.parent
|
# cluster = source_host.parent
|
||||||
target_hosts = []
|
# target_hosts = []
|
||||||
cluster.host.each do |host|
|
# cluster.host.each do |host|
|
||||||
host_usage = get_host_utilization(host, model)
|
# host_usage = get_host_utilization(host, model)
|
||||||
target_hosts << host_usage if host_usage
|
# target_hosts << host_usage if host_usage
|
||||||
end
|
# end
|
||||||
target_host = target_hosts.sort[0][1]
|
# target_host = target_hosts.sort[0][1]
|
||||||
[target_host, target_host.name]
|
# [target_host, target_host.name]
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_pool(poolname)
|
# def find_pool(poolname)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
|
|
||||||
datacenter = @connection.serviceInstance.find_datacenter
|
# datacenter = @connection.serviceInstance.find_datacenter
|
||||||
base = datacenter.hostFolder
|
# base = datacenter.hostFolder
|
||||||
pools = poolname.split('/')
|
# pools = poolname.split('/')
|
||||||
pools.each do |pool|
|
# pools.each do |pool|
|
||||||
case base
|
# case base
|
||||||
when RbVmomi::VIM::Folder
|
# when RbVmomi::VIM::Folder
|
||||||
base = base.childEntity.find { |f| f.name == pool }
|
# base = base.childEntity.find { |f| f.name == pool }
|
||||||
when RbVmomi::VIM::ClusterComputeResource
|
# when RbVmomi::VIM::ClusterComputeResource
|
||||||
base = base.resourcePool.resourcePool.find { |f| f.name == pool }
|
# base = base.resourcePool.resourcePool.find { |f| f.name == pool }
|
||||||
when RbVmomi::VIM::ResourcePool
|
# when RbVmomi::VIM::ResourcePool
|
||||||
base = base.resourcePool.find { |f| f.name == pool }
|
# base = base.resourcePool.find { |f| f.name == pool }
|
||||||
else
|
# else
|
||||||
abort "Unexpected object type encountered (#{base.class}) while finding resource pool"
|
# abort "Unexpected object type encountered (#{base.class}) while finding resource pool"
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|
||||||
base = base.resourcePool unless base.is_a?(RbVmomi::VIM::ResourcePool) && base.respond_to?(:resourcePool)
|
# base = base.resourcePool unless base.is_a?(RbVmomi::VIM::ResourcePool) && base.respond_to?(:resourcePool)
|
||||||
base
|
# base
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_snapshot(vm, snapshotname)
|
# def find_snapshot(vm, snapshotname)
|
||||||
if vm.snapshot
|
# if vm.snapshot
|
||||||
get_snapshot_list(vm.snapshot.rootSnapshotList, snapshotname)
|
# get_snapshot_list(vm.snapshot.rootSnapshotList, snapshotname)
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_vm(vmname)
|
# def find_vm(vmname)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
find_vm_light(vmname) || find_vm_heavy(vmname)[vmname]
|
# find_vm_light(vmname) || find_vm_heavy(vmname)[vmname]
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_vm_light(vmname)
|
# def find_vm_light(vmname)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
|
|
||||||
@connection.searchIndex.FindByDnsName(vmSearch: true, dnsName: vmname)
|
# @connection.searchIndex.FindByDnsName(vmSearch: true, dnsName: vmname)
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_vm_heavy(vmname)
|
# def find_vm_heavy(vmname)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
|
|
||||||
vmname = vmname.is_a?(Array) ? vmname : [vmname]
|
# vmname = vmname.is_a?(Array) ? vmname : [vmname]
|
||||||
containerView = get_base_vm_container_from @connection
|
# containerView = get_base_vm_container_from @connection
|
||||||
propertyCollector = @connection.propertyCollector
|
# propertyCollector = @connection.propertyCollector
|
||||||
|
|
||||||
objectSet = [{
|
# objectSet = [{
|
||||||
obj: containerView,
|
# obj: containerView,
|
||||||
skip: true,
|
# skip: true,
|
||||||
selectSet: [RbVmomi::VIM::TraversalSpec.new(
|
# selectSet: [RbVmomi::VIM::TraversalSpec.new(
|
||||||
name: 'gettingTheVMs',
|
# name: 'gettingTheVMs',
|
||||||
path: 'view',
|
# path: 'view',
|
||||||
skip: false,
|
# skip: false,
|
||||||
type: 'ContainerView'
|
# type: 'ContainerView'
|
||||||
)]
|
# )]
|
||||||
}]
|
# }]
|
||||||
|
|
||||||
propSet = [{
|
# propSet = [{
|
||||||
pathSet: ['name'],
|
# pathSet: ['name'],
|
||||||
type: 'VirtualMachine'
|
# type: 'VirtualMachine'
|
||||||
}]
|
# }]
|
||||||
|
|
||||||
results = propertyCollector.RetrievePropertiesEx(
|
# results = propertyCollector.RetrievePropertiesEx(
|
||||||
specSet: [{
|
# specSet: [{
|
||||||
objectSet: objectSet,
|
# objectSet: objectSet,
|
||||||
propSet: propSet
|
# propSet: propSet
|
||||||
}],
|
# }],
|
||||||
options: { maxObjects: nil }
|
# options: { maxObjects: nil }
|
||||||
)
|
# )
|
||||||
|
|
||||||
vms = {}
|
# vms = {}
|
||||||
results.objects.each do |result|
|
# results.objects.each do |result|
|
||||||
name = result.propSet.first.val
|
# name = result.propSet.first.val
|
||||||
next unless vmname.include? name
|
# next unless vmname.include? name
|
||||||
vms[name] = result.obj
|
# vms[name] = result.obj
|
||||||
end
|
# end
|
||||||
|
|
||||||
while results.token
|
# while results.token
|
||||||
results = propertyCollector.ContinueRetrievePropertiesEx(token: results.token)
|
# results = propertyCollector.ContinueRetrievePropertiesEx(token: results.token)
|
||||||
results.objects.each do |result|
|
# results.objects.each do |result|
|
||||||
name = result.propSet.first.val
|
# name = result.propSet.first.val
|
||||||
next unless vmname.include? name
|
# next unless vmname.include? name
|
||||||
vms[name] = result.obj
|
# vms[name] = result.obj
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|
||||||
vms
|
# vms
|
||||||
end
|
# end
|
||||||
|
|
||||||
def find_vmdks(vmname, datastore)
|
# def find_vmdks(vmname, datastore)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
|
|
||||||
disks = []
|
# disks = []
|
||||||
|
|
||||||
vmdk_datastore = find_datastore(datastore)
|
# vmdk_datastore = find_datastore(datastore)
|
||||||
|
|
||||||
vm_files = vmdk_datastore._connection.serviceContent.propertyCollector.collectMultiple vmdk_datastore.vm, 'layoutEx.file'
|
# vm_files = vmdk_datastore._connection.serviceContent.propertyCollector.collectMultiple vmdk_datastore.vm, 'layoutEx.file'
|
||||||
vm_files.keys.each do |f|
|
# vm_files.keys.each do |f|
|
||||||
vm_files[f]['layoutEx.file'].each do |l|
|
# vm_files[f]['layoutEx.file'].each do |l|
|
||||||
if l.name.match(/^\[#{vmdk_datastore.name}\] #{vmname}\/#{vmname}_([0-9]+).vmdk/)
|
# if l.name.match(/^\[#{vmdk_datastore.name}\] #{vmname}\/#{vmname}_([0-9]+).vmdk/)
|
||||||
disks.push(l)
|
# disks.push(l)
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|
||||||
disks
|
# disks
|
||||||
end
|
# end
|
||||||
|
|
||||||
def get_base_vm_container_from(connection)
|
# def get_base_vm_container_from(connection)
|
||||||
ensure_connected @connection, $credentials
|
# ensure_connected @connection, $credentials
|
||||||
|
|
||||||
viewManager = connection.serviceContent.viewManager
|
# viewManager = connection.serviceContent.viewManager
|
||||||
viewManager.CreateContainerView(
|
# viewManager.CreateContainerView(
|
||||||
container: connection.serviceContent.rootFolder,
|
# container: connection.serviceContent.rootFolder,
|
||||||
recursive: true,
|
# recursive: true,
|
||||||
type: ['VirtualMachine']
|
# type: ['VirtualMachine']
|
||||||
)
|
# )
|
||||||
end
|
# end
|
||||||
|
|
||||||
def get_snapshot_list(tree, snapshotname)
|
# def get_snapshot_list(tree, snapshotname)
|
||||||
snapshot = nil
|
# snapshot = nil
|
||||||
|
|
||||||
tree.each do |child|
|
# tree.each do |child|
|
||||||
if child.name == snapshotname
|
# if child.name == snapshotname
|
||||||
snapshot ||= child.snapshot
|
# snapshot ||= child.snapshot
|
||||||
else
|
# else
|
||||||
snapshot ||= get_snapshot_list(child.childSnapshotList, snapshotname)
|
# snapshot ||= get_snapshot_list(child.childSnapshotList, snapshotname)
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|
||||||
snapshot
|
# snapshot
|
||||||
end
|
# end
|
||||||
|
|
||||||
def migrate_vm_host(vm, host)
|
# def migrate_vm_host(vm, host)
|
||||||
relospec = RbVmomi::VIM.VirtualMachineRelocateSpec(host: host)
|
# relospec = RbVmomi::VIM.VirtualMachineRelocateSpec(host: host)
|
||||||
vm.RelocateVM_Task(spec: relospec).wait_for_completion
|
# vm.RelocateVM_Task(spec: relospec).wait_for_completion
|
||||||
end
|
# end
|
||||||
|
|
||||||
def close
|
# def close
|
||||||
@connection.close
|
# @connection.close
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
end
|
# end
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue