Merge pull request #147 from sschneid/add_disk

Allow new disks to be added to running VMs via vmpooler API
This commit is contained in:
Colin 2016-01-14 14:38:24 -08:00
commit d4f3eb3c5f
5 changed files with 320 additions and 1 deletions

40
API.md
View file

@ -226,6 +226,46 @@ $ curl -X DELETE --url vmpooler.company.com/api/v1/vm/fq6qlpjlsskycq6
}
```
#### Adding additional disk(s)
##### POST /vm/<hostname>/disk/<size>
Add an additional disk to a running VM.
````
$ curl -X POST -H X-AUTH-TOKEN:a9znth9dn01t416hrguu56ze37t790bl --url vmpooler.company.com/api/v1/vm/fq6qlpjlsskycq6/disk/8
````
````json
{
"ok": true,
"fq6qlpjlsskycq6": {
"disk": "+8gb"
}
}
````
Provisioning and attaching disks can take a moment, but once the task completes it will be reflected in a `GET /vm/<hostname>` query:
````
$ curl --url vmpooler.company.com/api/v1/vm/fq6qlpjlsskycq6
````
````json
{
"ok": true,
"fq6qlpjlsskycq6": {
"template": "debian-7-x86_64",
"lifetime": 2,
"running": 0.08,
"state": "running",
"disk": [
"+8gb"
],
"domain": "delivery.puppetlabs.net"
}
}
````
#### VM snapshots
##### POST /vm/&lt;hostname&gt;/snapshot

View file

@ -62,6 +62,10 @@ module Vmpooler
post '/vm/:hostname/snapshot/:snapshot/?' do
call env.merge("PATH_INFO" => "/api/v#{api_version}/vm/#{params[:hostname]}/snapshot/#{params[:snapshot]}")
end
put '/vm/:hostname/disk/:size/?' do
call env.merge("PATH_INFO" => "/api/v#{api_version}/vm/#{params[:hostname]}/disk/#{params[:size]}")
end
end
end
end

View file

@ -455,6 +455,10 @@ module Vmpooler
end
end
if rdata['disk']
result[params[:hostname]]['disk'] = rdata['disk'].split(':')
end
if config['domain']
result[params[:hostname]]['domain'] = config['domain']
end
@ -552,6 +556,29 @@ module Vmpooler
JSON.pretty_generate(result)
end
post "#{api_prefix}/vm/:hostname/disk/:size/?" do
content_type :json
need_token! if Vmpooler::API.settings.config[:auth]
status 404
result = { 'ok' => false }
params[:hostname] = hostname_shorten(params[:hostname], config['domain'])
if ((params[:size].to_i > 0 )and (backend.exists('vmpooler__vm__' + params[:hostname])))
result[params[:hostname]] = {}
result[params[:hostname]]['disk'] = "+#{params[:size]}gb"
backend.sadd('vmpooler__tasks__disk', params[:hostname] + ':' + params[:size])
status 202
result['ok'] = true
end
JSON.pretty_generate(result)
end
post "#{api_prefix}/vm/:hostname/snapshot/?" do
content_type :json

View file

@ -299,6 +299,47 @@ module Vmpooler
end
end
def create_vm_disk(vm, disk_size)
Thread.new do
_create_vm_disk(vm, disk_size)
end
end
def _create_vm_disk(vm, disk_size)
host = $vsphere['disk_manager'].find_vm(vm) ||
$vsphere['disk_manager'].find_vm_heavy(vm)[vm]
if (host) && ((! disk_size.nil?) && (! disk_size.empty?) && (disk_size.to_i > 0))
$logger.log('s', "[ ] [disk_manager] '#{vm}' is attaching a #{disk_size}gb disk")
start = Time.now
template = $redis.hget('vmpooler__vm__' + vm, 'template')
datastore = nil
$config[:pools].each do |pool|
if pool['name'] == template
datastore = pool['datastore']
end
end
if ((! datastore.nil?) && (! datastore.empty?))
$vsphere['disk_manager'].add_disk(host, disk_size, datastore)
rdisks = $redis.hget('vmpooler__vm__' + vm, 'disk')
disks = rdisks ? rdisks.split(':') : []
disks.push("+#{disk_size}gb")
$redis.hset('vmpooler__vm__' + vm, 'disk', disks.join(':'))
finish = '%.2f' % (Time.now - start)
$logger.log('s', "[+] [disk_manager] '#{vm}' attached #{disk_size}gb disk in #{finish} seconds")
else
$logger.log('s', "[+] [disk_manager] '#{vm}' failed to attach disk")
end
end
end
def create_vm_snapshot(vm, snapshot_name)
Thread.new do
_create_vm_snapshot(vm, snapshot_name)
@ -309,7 +350,7 @@ module Vmpooler
host = $vsphere['snapshot_manager'].find_vm(vm) ||
$vsphere['snapshot_manager'].find_vm_heavy(vm)[vm]
if (host) && ((! snapshot_name.nil?) || (! snapshot_name.empty?))
if (host) && ((! snapshot_name.nil?) && (! snapshot_name.empty?))
$logger.log('s', "[ ] [snapshot_manager] '#{vm}' is being snapshotted")
start = Time.now
@ -356,6 +397,32 @@ module Vmpooler
end
end
def check_disk_queue
$logger.log('d', "[*] [disk_manager] starting worker thread")
$vsphere['disk_manager'] ||= Vmpooler::VsphereHelper.new
$threads['disk_manager'] = Thread.new do
loop do
_check_disk_queue
sleep(5)
end
end
end
def _check_disk_queue
vm = $redis.spop('vmpooler__tasks__disk')
unless vm.nil?
begin
vm_name, disk_size = vm.split(':')
create_vm_disk(vm_name, disk_size)
rescue
$logger.log('s', "[!] [disk_manager] disk creation appears to have failed")
end
end
end
def check_snapshot_queue
$logger.log('d', "[*] [snapshot_manager] starting worker thread")
@ -544,6 +611,13 @@ module Vmpooler
$redis.set('vmpooler__tasks__clone', 0)
loop do
if ! $threads['disk_manager']
check_disk_queue
elsif ! $threads['disk_manager'].alive?
$logger.log('d', "[!] [disk_manager] worker thread died, restarting")
check_disk_queue
end
if ! $threads['snapshot_manager']
check_snapshot_queue
elsif ! $threads['snapshot_manager'].alive?

View file

@ -2,6 +2,10 @@ require 'rubygems' unless defined?(Gem)
module Vmpooler
class VsphereHelper
ADAPTER_TYPE = 'lsiLogic'
DISK_TYPE = 'thin'
DISK_MODE = 'persistent'
def initialize(_vInfo = {})
config_file = File.expand_path('vmpooler.yaml')
vsphere = YAML.load_file(config_file)[:vsphere]
@ -12,6 +16,60 @@ module Vmpooler
insecure: true
end
def add_disk(vm, size, datastore)
begin
@connection.serviceInstance.CurrentTime
rescue
initialize
end
return false unless size.to_i > 0
vmdk_datastore = find_datastore(datastore)
vmdk_file_name = "#{vm['name']}/#{vm['name']}_#{find_vmdks(vm['name'], datastore).length + 1}.vmdk"
controller = find_disk_controller(vm)
vmdk_spec = RbVmomi::VIM::FileBackedVirtualDiskSpec(
capacityKb: size.to_i * 1024 * 1024,
adapterType: ADAPTER_TYPE,
diskType: DISK_TYPE
)
vmdk_backing = RbVmomi::VIM::VirtualDiskFlatVer2BackingInfo(
datastore: vmdk_datastore,
diskMode: DISK_MODE,
fileName: "[#{vmdk_datastore.name}] #{vmdk_file_name}"
)
device = RbVmomi::VIM::VirtualDisk(
backing: vmdk_backing,
capacityInKB: size.to_i * 1024 * 1024,
controllerKey: controller.key,
key: -1,
unitNumber: find_disk_unit_number(vm, controller)
)
device_config_spec = RbVmomi::VIM::VirtualDeviceConfigSpec(
device: device,
operation: RbVmomi::VIM::VirtualDeviceConfigSpecOperation('add')
)
vm_config_spec = RbVmomi::VIM::VirtualMachineConfigSpec(
deviceChange: [device_config_spec]
)
@connection.serviceContent.virtualDiskManager.CreateVirtualDisk_Task(
datacenter: @connection.serviceInstance.find_datacenter,
name: "[#{vmdk_datastore.name}] #{vmdk_file_name}",
spec: vmdk_spec
).wait_for_completion
vm.ReconfigVM_Task(spec: vm_config_spec).wait_for_completion
true
end
def find_datastore(datastorename)
begin
@connection.serviceInstance.CurrentTime
@ -23,6 +81,99 @@ module Vmpooler
datacenter.find_datastore(datastorename)
end
def find_device(vm, deviceName)
begin
@connection.serviceInstance.CurrentTime
rescue
initialize
end
vm.config.hardware.device.each do |device|
return device if device.deviceInfo.label == deviceName
end
nil
end
def find_disk_controller(vm)
begin
@connection.serviceInstance.CurrentTime
rescue
initialize
end
devices = find_disk_devices(vm)
devices.keys.sort.each do |device|
if devices[device]['children'].length < 15
return find_device(vm, devices[device]['device'].deviceInfo.label)
end
end
nil
end
def find_disk_devices(vm)
begin
@connection.serviceInstance.CurrentTime
rescue
initialize
end
devices = {}
vm.config.hardware.device.each do |device|
if device.is_a? RbVmomi::VIM::VirtualSCSIController
if devices[device.controllerKey].nil?
devices[device.key] = {}
devices[device.key]['children'] = []
end
devices[device.key]['device'] = device
end
if device.is_a? RbVmomi::VIM::VirtualDisk
if devices[device.controllerKey].nil?
devices[device.controllerKey] = {}
devices[device.controllerKey]['children'] = []
end
devices[device.controllerKey]['children'].push(device)
end
end
devices
end
def find_disk_unit_number(vm, controller)
begin
@connection.serviceInstance.CurrentTime
rescue
initialize
end
used_unit_numbers = []
available_unit_numbers = []
devices = find_disk_devices(vm)
devices.keys.sort.each do |c|
next unless controller.key == devices[c]['device'].key
used_unit_numbers.push(devices[c]['device'].scsiCtlrUnitNumber)
devices[c]['children'].each do |disk|
used_unit_numbers.push(disk.unitNumber)
end
end
(0..15).each do |scsi_id|
if used_unit_numbers.grep(scsi_id).length <= 0
available_unit_numbers.push(scsi_id)
end
end
available_unit_numbers.sort[0]
end
def find_folder(foldername)
begin
@connection.serviceInstance.CurrentTime
@ -169,6 +320,29 @@ module Vmpooler
vms
end
def find_vmdks(vmname, datastore)
begin
connection.serviceInstance.CurrentTime
rescue
initialize
end
disks = []
vmdk_datastore = find_datastore(datastore)
vm_files = vmdk_datastore._connection.serviceContent.propertyCollector.collectMultiple vmdk_datastore.vm, 'layoutEx.file'
vm_files.keys.each do |f|
vm_files[f]['layoutEx.file'].each do |l|
if l.name.match(/^\[#{vmdk_datastore.name}\] #{vmname}\/#{vmname}_([0-9]+).vmdk/)
disks.push(l)
end
end
end
disks
end
def get_base_vm_container_from(connection)
begin
connection.serviceInstance.CurrentTime