mirror of
https://github.com/puppetlabs/vmpooler-provider-gce.git
synced 2026-01-26 11:28:41 -05:00
refactor the connection and add debug logs
This commit is contained in:
parent
04cc86689d
commit
662f965c0f
2 changed files with 282 additions and 264 deletions
|
|
@ -49,6 +49,12 @@ module Vmpooler
|
||||||
'gce'
|
'gce'
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def connection
|
||||||
|
@connection_pool.with_metrics do |pool_object|
|
||||||
|
return ensured_gce_connection(pool_object)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
# main configuration options
|
# main configuration options
|
||||||
def project
|
def project
|
||||||
provider_config['project']
|
provider_config['project']
|
||||||
|
|
@ -81,21 +87,20 @@ module Vmpooler
|
||||||
# [Hashtable]
|
# [Hashtable]
|
||||||
# [String] name : the name of the VM instance (unique for whole project)
|
# [String] name : the name of the VM instance (unique for whole project)
|
||||||
def vms_in_pool(pool_name)
|
def vms_in_pool(pool_name)
|
||||||
|
debug_logger("vms_in_pool")
|
||||||
vms = []
|
vms = []
|
||||||
pool = pool_config(pool_name)
|
pool = pool_config(pool_name)
|
||||||
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
||||||
zone = zone(pool_name)
|
zone = zone(pool_name)
|
||||||
@connection_pool.with_metrics do |pool_object|
|
filter = "(labels.pool = #{pool_name})"
|
||||||
connection = ensured_gce_connection(pool_object)
|
instance_list = connection.list_instances(project, zone, filter: filter)
|
||||||
filter = "(labels.pool = #{pool_name})"
|
|
||||||
instance_list = connection.list_instances(project, zone, filter: filter)
|
|
||||||
|
|
||||||
return vms if instance_list.items.nil?
|
return vms if instance_list.items.nil?
|
||||||
|
|
||||||
instance_list.items.each do |vm|
|
instance_list.items.each do |vm|
|
||||||
vms << { 'name' => vm.name }
|
vms << { 'name' => vm.name }
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
debug_logger(vms)
|
||||||
vms
|
vms
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -116,21 +121,20 @@ module Vmpooler
|
||||||
# [String] zone : URL of the zone where the instance resides.
|
# [String] zone : URL of the zone where the instance resides.
|
||||||
# [String] machine_type : Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/machine-type.
|
# [String] machine_type : Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/machine-type.
|
||||||
def get_vm(pool_name, vm_name)
|
def get_vm(pool_name, vm_name)
|
||||||
|
debug_logger("get_vm")
|
||||||
vm_hash = nil
|
vm_hash = nil
|
||||||
@connection_pool.with_metrics do |pool_object|
|
begin
|
||||||
connection = ensured_gce_connection(pool_object)
|
vm_object = connection.get_instance(project, zone(pool_name), vm_name)
|
||||||
begin
|
rescue ::Google::Apis::ClientError => e
|
||||||
vm_object = connection.get_instance(project, zone(pool_name), vm_name)
|
raise e unless e.status_code == 404
|
||||||
rescue ::Google::Apis::ClientError => e
|
#swallow the ClientError error 404 and return nil when the VM was not found
|
||||||
raise e unless e.status_code == 404
|
return nil
|
||||||
#swallow the ClientError error 404 and return nil when the VM was not found
|
|
||||||
return nil
|
|
||||||
end
|
|
||||||
|
|
||||||
return vm_hash if vm_object.nil?
|
|
||||||
|
|
||||||
vm_hash = generate_vm_hash(vm_object, pool_name)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
|
return vm_hash if vm_object.nil?
|
||||||
|
|
||||||
|
vm_hash = generate_vm_hash(vm_object, pool_name)
|
||||||
|
debug_logger("vm_hash #{vm_hash}")
|
||||||
vm_hash
|
vm_hash
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -146,9 +150,9 @@ module Vmpooler
|
||||||
# returns
|
# returns
|
||||||
# [Hashtable] of the VM as per get_vm(pool_name, vm_name)
|
# [Hashtable] of the VM as per get_vm(pool_name, vm_name)
|
||||||
def create_vm(pool_name, new_vmname)
|
def create_vm(pool_name, new_vmname)
|
||||||
|
debug_logger("create_vm")
|
||||||
pool = pool_config(pool_name)
|
pool = pool_config(pool_name)
|
||||||
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
||||||
|
|
||||||
vm_hash = nil
|
vm_hash = nil
|
||||||
# harcoded network info
|
# harcoded network info
|
||||||
network_interfaces = Google::Apis::ComputeV1::NetworkInterface.new(
|
network_interfaces = Google::Apis::ComputeV1::NetworkInterface.new(
|
||||||
|
|
@ -164,20 +168,19 @@ module Vmpooler
|
||||||
:boot => true,
|
:boot => true,
|
||||||
:initialize_params => Google::Apis::ComputeV1::AttachedDiskInitializeParams.new(initParams)
|
:initialize_params => Google::Apis::ComputeV1::AttachedDiskInitializeParams.new(initParams)
|
||||||
)
|
)
|
||||||
@connection_pool.with_metrics do |pool_object|
|
|
||||||
connection = ensured_gce_connection(pool_object)
|
# Assume all pool config is valid i.e. not missing
|
||||||
# Assume all pool config is valid i.e. not missing
|
client = ::Google::Apis::ComputeV1::Instance.new(
|
||||||
client = ::Google::Apis::ComputeV1::Instance.new(
|
:name => new_vmname,
|
||||||
:name => new_vmname,
|
:machine_type => pool['machine_type'],
|
||||||
:machine_type => pool['machine_type'],
|
:disks => [disk],
|
||||||
:disks => [disk],
|
:network_interfaces => [network_interfaces],
|
||||||
:network_interfaces => [network_interfaces],
|
:labels => {'vm' => new_vmname, 'pool' => pool_name}
|
||||||
:labels => {'vm' => new_vmname, 'pool' => pool_name}
|
)
|
||||||
)
|
debug_logger("trigger insert_instance")
|
||||||
result = connection.insert_instance(project, zone(pool_name), client)
|
result = connection.insert_instance(project, zone(pool_name), client)
|
||||||
result = wait_for_operation(project, pool_name, result, connection)
|
result = wait_for_operation(project, pool_name, result)
|
||||||
vm_hash = get_vm(pool_name, new_vmname)
|
vm_hash = get_vm(pool_name, new_vmname)
|
||||||
end
|
|
||||||
vm_hash
|
vm_hash
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -197,40 +200,41 @@ module Vmpooler
|
||||||
# returns
|
# returns
|
||||||
# [boolean] true : once the operations are finished
|
# [boolean] true : once the operations are finished
|
||||||
def create_disk(pool_name, vm_name, disk_size)
|
def create_disk(pool_name, vm_name, disk_size)
|
||||||
|
debug_logger("create_disk")
|
||||||
pool = pool_config(pool_name)
|
pool = pool_config(pool_name)
|
||||||
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
||||||
|
|
||||||
@connection_pool.with_metrics do |pool_object|
|
begin
|
||||||
connection = ensured_gce_connection(pool_object)
|
vm_object = connection.get_instance(project, zone(pool_name), vm_name)
|
||||||
begin
|
rescue ::Google::Apis::ClientError => e
|
||||||
vm_object = connection.get_instance(project, zone(pool_name), vm_name)
|
raise e unless e.status_code == 404
|
||||||
rescue ::Google::Apis::ClientError => e
|
#if it does not exist
|
||||||
raise e unless e.status_code == 404
|
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}")
|
||||||
#if it does not exist
|
|
||||||
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}")
|
|
||||||
end
|
|
||||||
# this number should start at 1 when there is only the boot disk,
|
|
||||||
# eg the new disk will be named spicy-proton-disk1
|
|
||||||
number_disk = vm_object.disks.length()
|
|
||||||
|
|
||||||
disk_name = "#{vm_name}-disk#{number_disk}"
|
|
||||||
disk = Google::Apis::ComputeV1::Disk.new(
|
|
||||||
:name => disk_name,
|
|
||||||
:size_gb => disk_size,
|
|
||||||
:labels => {"pool" => pool_name, "vm" => vm_name}
|
|
||||||
)
|
|
||||||
result = connection.insert_disk(project, zone(pool_name), disk)
|
|
||||||
wait_for_operation(project, pool_name, result, connection)
|
|
||||||
new_disk = connection.get_disk(project, zone(pool_name), disk_name)
|
|
||||||
|
|
||||||
attached_disk = Google::Apis::ComputeV1::AttachedDisk.new(
|
|
||||||
:auto_delete => true,
|
|
||||||
:boot => false,
|
|
||||||
:source => new_disk.self_link
|
|
||||||
)
|
|
||||||
result = connection.attach_disk(project, zone(pool_name), vm_object.name, attached_disk)
|
|
||||||
wait_for_operation(project, pool_name, result, connection)
|
|
||||||
end
|
end
|
||||||
|
# this number should start at 1 when there is only the boot disk,
|
||||||
|
# eg the new disk will be named spicy-proton-disk1
|
||||||
|
number_disk = vm_object.disks.length()
|
||||||
|
|
||||||
|
disk_name = "#{vm_name}-disk#{number_disk}"
|
||||||
|
disk = Google::Apis::ComputeV1::Disk.new(
|
||||||
|
:name => disk_name,
|
||||||
|
:size_gb => disk_size,
|
||||||
|
:labels => {"pool" => pool_name, "vm" => vm_name}
|
||||||
|
)
|
||||||
|
debug_logger("trigger insert_disk #{disk_name} for #{vm_name}")
|
||||||
|
result = connection.insert_disk(project, zone(pool_name), disk)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
debug_logger("trigger get_disk #{disk_name} for #{vm_name}")
|
||||||
|
new_disk = connection.get_disk(project, zone(pool_name), disk_name)
|
||||||
|
|
||||||
|
attached_disk = Google::Apis::ComputeV1::AttachedDisk.new(
|
||||||
|
:auto_delete => true,
|
||||||
|
:boot => false,
|
||||||
|
:source => new_disk.self_link
|
||||||
|
)
|
||||||
|
debug_logger("trigger attach_disk #{disk_name} for #{vm_name}")
|
||||||
|
result = connection.attach_disk(project, zone(pool_name), vm_object.name, attached_disk)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -250,40 +254,39 @@ module Vmpooler
|
||||||
# RuntimeError if the vm_name cannot be found
|
# RuntimeError if the vm_name cannot be found
|
||||||
# RuntimeError if the snapshot_name already exists for this VM
|
# RuntimeError if the snapshot_name already exists for this VM
|
||||||
def create_snapshot(pool_name, vm_name, new_snapshot_name)
|
def create_snapshot(pool_name, vm_name, new_snapshot_name)
|
||||||
@connection_pool.with_metrics do |pool_object|
|
debug_logger("create_snapshot")
|
||||||
connection = ensured_gce_connection(pool_object)
|
begin
|
||||||
begin
|
vm_object = connection.get_instance(project, zone(pool_name), vm_name)
|
||||||
vm_object = connection.get_instance(project, zone(pool_name), vm_name)
|
rescue ::Google::Apis::ClientError => e
|
||||||
rescue ::Google::Apis::ClientError => e
|
raise e unless e.status_code == 404
|
||||||
raise e unless e.status_code == 404
|
#if it does not exist
|
||||||
#if it does not exist
|
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}")
|
||||||
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}")
|
end
|
||||||
end
|
|
||||||
|
|
||||||
old_snap = find_snapshot(vm_name, new_snapshot_name, connection)
|
old_snap = find_snapshot(vm_name, new_snapshot_name)
|
||||||
raise("Snapshot #{new_snapshot_name} for VM #{vm_name} in pool #{pool_name} already exists for the provider #{name}") unless old_snap.nil?
|
raise("Snapshot #{new_snapshot_name} for VM #{vm_name} in pool #{pool_name} already exists for the provider #{name}") unless old_snap.nil?
|
||||||
|
|
||||||
result_list = []
|
result_list = []
|
||||||
vm_object.disks.each do |attached_disk|
|
vm_object.disks.each do |attached_disk|
|
||||||
disk_name = disk_name_from_source(attached_disk)
|
disk_name = disk_name_from_source(attached_disk)
|
||||||
snapshot_obj = ::Google::Apis::ComputeV1::Snapshot.new(
|
snapshot_obj = ::Google::Apis::ComputeV1::Snapshot.new(
|
||||||
name: "#{new_snapshot_name}-#{disk_name}",
|
name: "#{new_snapshot_name}-#{disk_name}",
|
||||||
labels: {
|
labels: {
|
||||||
"snapshot_name" => new_snapshot_name,
|
"snapshot_name" => new_snapshot_name,
|
||||||
"vm" => vm_name,
|
"vm" => vm_name,
|
||||||
"pool" => pool_name,
|
"pool" => pool_name,
|
||||||
"diskname" => disk_name,
|
"diskname" => disk_name,
|
||||||
"boot" => attached_disk.boot.to_s
|
"boot" => attached_disk.boot.to_s
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
result = connection.create_disk_snapshot(project, zone(pool_name), disk_name, snapshot_obj)
|
debug_logger("trigger async create_disk_snapshot #{vm_name}: #{new_snapshot_name}-#{disk_name}")
|
||||||
# do them all async, keep a list, check later
|
result = connection.create_disk_snapshot(project, zone(pool_name), disk_name, snapshot_obj)
|
||||||
result_list << result
|
# do them all async, keep a list, check later
|
||||||
end
|
result_list << result
|
||||||
#now check they are done
|
end
|
||||||
result_list.each do |result|
|
#now check they are done
|
||||||
wait_for_operation(project, pool_name, result, connection)
|
result_list.each do |result|
|
||||||
end
|
wait_for_operation(project, pool_name, result)
|
||||||
end
|
end
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
|
|
@ -307,61 +310,65 @@ module Vmpooler
|
||||||
# RuntimeError if the vm_name cannot be found
|
# RuntimeError if the vm_name cannot be found
|
||||||
# RuntimeError if the snapshot_name already exists for this VM
|
# RuntimeError if the snapshot_name already exists for this VM
|
||||||
def revert_snapshot(pool_name, vm_name, snapshot_name)
|
def revert_snapshot(pool_name, vm_name, snapshot_name)
|
||||||
@connection_pool.with_metrics do |pool_object|
|
debug_logger("revert_snapshot")
|
||||||
connection = ensured_gce_connection(pool_object)
|
begin
|
||||||
begin
|
vm_object = connection.get_instance(project, zone(pool_name), vm_name)
|
||||||
vm_object = connection.get_instance(project, zone(pool_name), vm_name)
|
rescue ::Google::Apis::ClientError => e
|
||||||
rescue ::Google::Apis::ClientError => e
|
raise e unless e.status_code == 404
|
||||||
raise e unless e.status_code == 404
|
#if it does not exist
|
||||||
#if it does not exist
|
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}")
|
||||||
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}")
|
|
||||||
end
|
|
||||||
|
|
||||||
snapshot_object = find_snapshot(vm_name, snapshot_name, connection)
|
|
||||||
raise("Snapshot #{snapshot_name} for VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if snapshot_object.nil?
|
|
||||||
|
|
||||||
# Shutdown instance
|
|
||||||
result = connection.stop_instance(project, zone(pool_name), vm_name)
|
|
||||||
wait_for_operation(project, pool_name, result, connection)
|
|
||||||
|
|
||||||
# Delete existing disks
|
|
||||||
if vm_object.disks
|
|
||||||
vm_object.disks.each do |attached_disk|
|
|
||||||
result = connection.detach_disk(project, zone(pool_name), vm_name, attached_disk.device_name)
|
|
||||||
wait_for_operation(project, pool_name, result, connection)
|
|
||||||
current_disk_name = disk_name_from_source(attached_disk)
|
|
||||||
result = connection.delete_disk(project, zone(pool_name), current_disk_name)
|
|
||||||
wait_for_operation(project, pool_name, result, connection)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# this block is sensitive to disruptions, for example if vmpooler is stopped while this is running
|
|
||||||
snapshot_object.each do |snapshot|
|
|
||||||
current_disk_name = snapshot.labels['diskname']
|
|
||||||
bootable = (snapshot.labels['boot'] == "true")
|
|
||||||
disk = Google::Apis::ComputeV1::Disk.new(
|
|
||||||
:name => current_disk_name,
|
|
||||||
:labels => {"pool" => pool_name, "vm" => vm_name},
|
|
||||||
:source_snapshot => snapshot.self_link
|
|
||||||
)
|
|
||||||
# create disk in GCE as a separate resource
|
|
||||||
result = connection.insert_disk(project, zone(pool_name), disk)
|
|
||||||
wait_for_operation(project, pool_name, result, connection)
|
|
||||||
# read the new disk info
|
|
||||||
new_disk_info = connection.get_disk(project, zone(pool_name), current_disk_name)
|
|
||||||
new_attached_disk = Google::Apis::ComputeV1::AttachedDisk.new(
|
|
||||||
:auto_delete => true,
|
|
||||||
:boot => bootable,
|
|
||||||
:source => new_disk_info.self_link
|
|
||||||
)
|
|
||||||
# attach the new disk to existing instance
|
|
||||||
result = connection.attach_disk(project, zone(pool_name), vm_name, new_attached_disk)
|
|
||||||
wait_for_operation(project, pool_name, result, connection)
|
|
||||||
end
|
|
||||||
|
|
||||||
result = connection.start_instance(project, zone(pool_name), vm_name)
|
|
||||||
wait_for_operation(project, pool_name, result, connection)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
|
snapshot_object = find_snapshot(vm_name, snapshot_name)
|
||||||
|
raise("Snapshot #{snapshot_name} for VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if snapshot_object.nil?
|
||||||
|
|
||||||
|
# Shutdown instance
|
||||||
|
debug_logger("trigger stop_instance #{vm_name}")
|
||||||
|
result = connection.stop_instance(project, zone(pool_name), vm_name)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
|
||||||
|
# Delete existing disks
|
||||||
|
if vm_object.disks
|
||||||
|
vm_object.disks.each do |attached_disk|
|
||||||
|
debug_logger("trigger detach_disk #{vm_name}: #{attached_disk.device_name}")
|
||||||
|
result = connection.detach_disk(project, zone(pool_name), vm_name, attached_disk.device_name)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
current_disk_name = disk_name_from_source(attached_disk)
|
||||||
|
debug_logger("trigger delete_disk #{vm_name}: #{current_disk_name}")
|
||||||
|
result = connection.delete_disk(project, zone(pool_name), current_disk_name)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# this block is sensitive to disruptions, for example if vmpooler is stopped while this is running
|
||||||
|
snapshot_object.each do |snapshot|
|
||||||
|
current_disk_name = snapshot.labels['diskname']
|
||||||
|
bootable = (snapshot.labels['boot'] == "true")
|
||||||
|
disk = Google::Apis::ComputeV1::Disk.new(
|
||||||
|
:name => current_disk_name,
|
||||||
|
:labels => {"pool" => pool_name, "vm" => vm_name},
|
||||||
|
:source_snapshot => snapshot.self_link
|
||||||
|
)
|
||||||
|
# create disk in GCE as a separate resource
|
||||||
|
debug_logger("trigger insert_disk #{vm_name}: #{current_disk_name} based on #{snapshot.self_link}")
|
||||||
|
result = connection.insert_disk(project, zone(pool_name), disk)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
# read the new disk info
|
||||||
|
new_disk_info = connection.get_disk(project, zone(pool_name), current_disk_name)
|
||||||
|
new_attached_disk = Google::Apis::ComputeV1::AttachedDisk.new(
|
||||||
|
:auto_delete => true,
|
||||||
|
:boot => bootable,
|
||||||
|
:source => new_disk_info.self_link
|
||||||
|
)
|
||||||
|
# attach the new disk to existing instance
|
||||||
|
debug_logger("trigger attach_disk #{vm_name}: #{current_disk_name}")
|
||||||
|
result = connection.attach_disk(project, zone(pool_name), vm_name, new_attached_disk)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
end
|
||||||
|
|
||||||
|
debug_logger("trigger start_instance #{vm_name}")
|
||||||
|
result = connection.start_instance(project, zone(pool_name), vm_name)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -373,54 +380,56 @@ module Vmpooler
|
||||||
# returns
|
# returns
|
||||||
# [boolean] true : once the operations are finished
|
# [boolean] true : once the operations are finished
|
||||||
def destroy_vm(pool_name, vm_name)
|
def destroy_vm(pool_name, vm_name)
|
||||||
@connection_pool.with_metrics do |pool_object|
|
debug_logger("destroy_vm")
|
||||||
connection = ensured_gce_connection(pool_object)
|
deleted = false
|
||||||
deleted = false
|
begin
|
||||||
begin
|
vm_object = connection.get_instance(project, zone(pool_name), vm_name)
|
||||||
vm_object = connection.get_instance(project, zone(pool_name), vm_name)
|
rescue ::Google::Apis::ClientError => e
|
||||||
rescue ::Google::Apis::ClientError => e
|
raise e unless e.status_code == 404
|
||||||
raise e unless e.status_code == 404
|
# If a VM doesn't exist then it is effectively deleted
|
||||||
# If a VM doesn't exist then it is effectively deleted
|
deleted = true
|
||||||
deleted = true
|
debug_logger("instance #{vm_name} already deleted")
|
||||||
end
|
end
|
||||||
|
|
||||||
if(!deleted)
|
if(!deleted)
|
||||||
result = connection.delete_instance(project, zone(pool_name), vm_name)
|
debug_logger("trigger delete_instance #{vm_name}")
|
||||||
wait_for_operation(project, pool_name, result, connection, 10)
|
result = connection.delete_instance(project, zone(pool_name), vm_name)
|
||||||
end
|
wait_for_operation(project, pool_name, result, 10)
|
||||||
|
end
|
||||||
|
|
||||||
# list and delete any leftover disk, for instance if they were detached from the instance
|
# list and delete any leftover disk, for instance if they were detached from the instance
|
||||||
filter = "(labels.vm = #{vm_name})"
|
filter = "(labels.vm = #{vm_name})"
|
||||||
disk_list = connection.list_disks(project, zone(pool_name), filter: filter)
|
disk_list = connection.list_disks(project, zone(pool_name), filter: filter)
|
||||||
result_list = []
|
result_list = []
|
||||||
unless disk_list.items.nil?
|
unless disk_list.items.nil?
|
||||||
disk_list.items.each do |disk|
|
disk_list.items.each do |disk|
|
||||||
result = connection.delete_disk(project, zone(pool_name), disk.name)
|
debug_logger("trigger delete_disk #{disk.name}")
|
||||||
# do them all async, keep a list, check later
|
result = connection.delete_disk(project, zone(pool_name), disk.name)
|
||||||
result_list << result
|
# do them all async, keep a list, check later
|
||||||
end
|
result_list << result
|
||||||
end
|
|
||||||
#now check they are done
|
|
||||||
result_list.each do |result|
|
|
||||||
wait_for_operation(project, pool_name, result, connection)
|
|
||||||
end
|
end
|
||||||
|
end
|
||||||
|
#now check they are done
|
||||||
|
result_list.each do |result|
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
end
|
||||||
|
|
||||||
# list and delete leftover snapshots, this could happen if snapshots were taken,
|
# list and delete leftover snapshots, this could happen if snapshots were taken,
|
||||||
# as they are not removed when the original disk is deleted or the instance is detroyed
|
# as they are not removed when the original disk is deleted or the instance is detroyed
|
||||||
snapshot_list = find_all_snapshots(vm_name, connection)
|
snapshot_list = find_all_snapshots(vm_name)
|
||||||
result_list = []
|
result_list = []
|
||||||
unless snapshot_list.nil?
|
unless snapshot_list.nil?
|
||||||
snapshot_list.each do |snapshot|
|
snapshot_list.each do |snapshot|
|
||||||
result = connection.delete_snapshot(project, snapshot.name)
|
debug_logger("trigger delete_snapshot #{snapshot.name}")
|
||||||
# do them all async, keep a list, check later
|
result = connection.delete_snapshot(project, snapshot.name)
|
||||||
result_list << result
|
# do them all async, keep a list, check later
|
||||||
end
|
result_list << result
|
||||||
end
|
|
||||||
#now check they are done
|
|
||||||
result_list.each do |result|
|
|
||||||
wait_for_operation(project, pool_name, result, connection)
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
#now check they are done
|
||||||
|
result_list.each do |result|
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
end
|
||||||
true
|
true
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -437,52 +446,53 @@ module Vmpooler
|
||||||
# Scans zones that are configured for list of resources (VM, disks, snapshots) that do not have the label.pool set
|
# Scans zones that are configured for list of resources (VM, disks, snapshots) that do not have the label.pool set
|
||||||
# to one of the configured pools. If it is also not in the allowlist, the resource is destroyed
|
# to one of the configured pools. If it is also not in the allowlist, the resource is destroyed
|
||||||
def purge_unconfigured_resources(allowlist)
|
def purge_unconfigured_resources(allowlist)
|
||||||
@connection_pool.with_metrics do |pool_object|
|
debug_logger("purge_unconfigured_resources")
|
||||||
connection = ensured_gce_connection(pool_object)
|
pools_array = provided_pools
|
||||||
pools_array = provided_pools
|
filter = {}
|
||||||
filter = {}
|
# we have to group things by zone, because the API search feature is done against a zone and not global
|
||||||
# we have to group things by zone, because the API search feature is done against a zone and not global
|
# so we will do the searches in each configured zone
|
||||||
# so we will do the searches in each configured zone
|
pools_array.each do |pool|
|
||||||
pools_array.each do |pool|
|
filter[zone(pool)] = [] if filter[zone(pool)].nil?
|
||||||
filter[zone(pool)] = [] if filter[zone(pool)].nil?
|
filter[zone(pool)] << "(labels.pool != #{pool})"
|
||||||
filter[zone(pool)] << "(labels.pool != #{pool})"
|
end
|
||||||
|
filter.keys.each do |zone|
|
||||||
|
# this filter should return any item that have a labels.pool that is not in the config OR
|
||||||
|
# do not have a pool label at all
|
||||||
|
filter_string = filter[zone].join(" AND ") + " OR -labels.pool:*"
|
||||||
|
#VMs
|
||||||
|
instance_list = connection.list_instances(project, zone, filter: filter_string)
|
||||||
|
|
||||||
|
result_list = []
|
||||||
|
unless instance_list.items.nil?
|
||||||
|
instance_list.items.each do |vm|
|
||||||
|
next if should_be_ignored(vm, allowlist)
|
||||||
|
debug_logger("trigger async delete_instance #{vm.name}")
|
||||||
|
result = connection.delete_instance(project, zone, vm.name)
|
||||||
|
result_list << result
|
||||||
|
end
|
||||||
|
end
|
||||||
|
#now check they are done
|
||||||
|
result_list.each do |result|
|
||||||
|
wait_for_zone_operation(project, zone, result)
|
||||||
end
|
end
|
||||||
filter.keys.each do |zone|
|
|
||||||
# this filter should return any item that have a labels.pool that is not in the config OR
|
|
||||||
# do not have a pool label at all
|
|
||||||
filter_string = filter[zone].join(" AND ") + " OR -labels.pool:*"
|
|
||||||
#VMs
|
|
||||||
instance_list = connection.list_instances(project, zone, filter: filter_string)
|
|
||||||
|
|
||||||
result_list = []
|
#Disks
|
||||||
unless instance_list.items.nil?
|
disks_list = connection.list_disks(project, zone, filter: filter_string)
|
||||||
instance_list.items.each do |vm|
|
unless disks_list.items.nil?
|
||||||
next if should_be_ignored(vm, allowlist)
|
disks_list.items.each do |disk|
|
||||||
result = connection.delete_instance(project, zone, vm.name)
|
next if should_be_ignored(disk, allowlist)
|
||||||
result_list << result
|
debug_logger("trigger async no wait delete_disk #{disk.name}")
|
||||||
end
|
result = connection.delete_disk(project, zone, disk.name)
|
||||||
end
|
|
||||||
#now check they are done
|
|
||||||
result_list.each do |result|
|
|
||||||
wait_for_zone_operation(project, zone, result, connection)
|
|
||||||
end
|
end
|
||||||
|
end
|
||||||
|
|
||||||
#Disks
|
#Snapshots
|
||||||
disks_list = connection.list_disks(project, zone, filter: filter_string)
|
snapshot_list = connection.list_snapshots(project, filter: filter_string)
|
||||||
unless disks_list.items.nil?
|
unless snapshot_list.items.nil?
|
||||||
disks_list.items.each do |disk|
|
snapshot_list.items.each do |sn|
|
||||||
next if should_be_ignored(disk, allowlist)
|
next if should_be_ignored(sn, allowlist)
|
||||||
result = connection.delete_disk(project, zone, disk.name)
|
debug_logger("trigger async no wait delete_snapshot #{sn.name}")
|
||||||
end
|
result = connection.delete_snapshot(project, sn.name)
|
||||||
end
|
|
||||||
|
|
||||||
#Snapshots
|
|
||||||
snapshot_list = connection.list_snapshots(project, filter: filter_string)
|
|
||||||
unless snapshot_list.items.nil?
|
|
||||||
snapshot_list.items.each do |sn|
|
|
||||||
next if should_be_ignored(sn, allowlist)
|
|
||||||
result = connection.delete_snapshot(project, sn.name)
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
@ -497,30 +507,29 @@ module Vmpooler
|
||||||
# returns
|
# returns
|
||||||
# [Boolean] : true if successful, false if an error occurred and it should retry
|
# [Boolean] : true if successful, false if an error occurred and it should retry
|
||||||
def tag_vm_user(pool, vm)
|
def tag_vm_user(pool, vm)
|
||||||
@redis.with_metrics do |redis|
|
user = get_current_user(vm)
|
||||||
user = get_current_user(vm)
|
vm_hash = get_vm(pool, vm)
|
||||||
vm_hash = get_vm(pool, vm)
|
return false if vm_hash.nil?
|
||||||
return false if vm_hash.nil?
|
new_labels = vm_hash['labels']
|
||||||
new_labels = vm_hash['labels']
|
# bailing in this case since labels should exist, and continuing would mean losing them
|
||||||
# bailing in this case since labels should exist, and continuing would mean losing them
|
return false if new_labels.nil?
|
||||||
return false if new_labels.nil?
|
# add new label called token-user, with value as user
|
||||||
# add new label called token-user, with value as user
|
new_labels['token-user'] = user
|
||||||
new_labels['token-user'] = user
|
begin
|
||||||
begin
|
instances_set_labels_request_object = Google::Apis::ComputeV1::InstancesSetLabelsRequest.new(label_fingerprint:vm_hash['label_fingerprint'], labels: new_labels)
|
||||||
instances_set_labels_request_object = Google::Apis::ComputeV1::InstancesSetLabelsRequest.new(label_fingerprint:vm_hash['label_fingerprint'], labels: new_labels)
|
result = connection.set_instance_labels(project, zone(pool), vm, instances_set_labels_request_object)
|
||||||
result = connection.set_instance_labels(project, zone(pool), vm, instances_set_labels_request_object)
|
wait_for_zone_operation(project, zone(pool), result)
|
||||||
wait_for_zone_operation(project, zone(pool), result, connection)
|
rescue StandardError => _e
|
||||||
rescue StandardError => _e
|
return false
|
||||||
return false
|
|
||||||
end
|
|
||||||
return true
|
|
||||||
end
|
end
|
||||||
|
return true
|
||||||
end
|
end
|
||||||
|
|
||||||
# END BASE METHODS
|
# END BASE METHODS
|
||||||
|
|
||||||
def should_be_ignored(item, allowlist)
|
def should_be_ignored(item, allowlist)
|
||||||
allowlist.map!(&:downcase) # remove uppercase from configured values because its not valid as resource label
|
return false if allowlist.nil?
|
||||||
|
allowlist.map!(&:downcase) # remove uppercase from configured values because its not valid as resource label
|
||||||
array_flattened_labels = []
|
array_flattened_labels = []
|
||||||
unless item.labels.nil?
|
unless item.labels.nil?
|
||||||
item.labels.each do |k,v|
|
item.labels.each do |k,v|
|
||||||
|
|
@ -546,9 +555,10 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
|
|
||||||
# Compute resource wait for operation to be DONE (synchronous operation)
|
# Compute resource wait for operation to be DONE (synchronous operation)
|
||||||
def wait_for_zone_operation(project, zone, result, connection, retries=5)
|
def wait_for_zone_operation(project, zone, result, retries=5)
|
||||||
while result.status != 'DONE'
|
while result.status != 'DONE'
|
||||||
result = connection.wait_zone_operation(project, zone, result.name)
|
result = connection.wait_zone_operation(project, zone, result.name)
|
||||||
|
debug_logger(" -> wait_for_zone_operation status #{result.status} (#{result.name})")
|
||||||
end
|
end
|
||||||
if result.error # unsure what kind of error can be stored here
|
if result.error # unsure what kind of error can be stored here
|
||||||
error_message = ""
|
error_message = ""
|
||||||
|
|
@ -573,8 +583,8 @@ module Vmpooler
|
||||||
puts "waited on #{result.name} but was not found, so skipping"
|
puts "waited on #{result.name} but was not found, so skipping"
|
||||||
end
|
end
|
||||||
|
|
||||||
def wait_for_operation(project, pool_name, result, connection, retries=5)
|
def wait_for_operation(project, pool_name, result, retries=5)
|
||||||
wait_for_zone_operation(project, zone(pool_name), result, connection, retries)
|
wait_for_zone_operation(project, zone(pool_name), result, retries)
|
||||||
end
|
end
|
||||||
|
|
||||||
# Return a hash of VM data
|
# Return a hash of VM data
|
||||||
|
|
@ -643,7 +653,7 @@ module Vmpooler
|
||||||
|
|
||||||
# this is used because for one vm, with the same snapshot name there could be multiple snapshots,
|
# this is used because for one vm, with the same snapshot name there could be multiple snapshots,
|
||||||
# one for each disk
|
# one for each disk
|
||||||
def find_snapshot(vm, snapshotname, connection)
|
def find_snapshot(vm, snapshotname)
|
||||||
filter = "(labels.vm = #{vm}) AND (labels.snapshot_name = #{snapshotname})"
|
filter = "(labels.vm = #{vm}) AND (labels.snapshot_name = #{snapshotname})"
|
||||||
snapshot_list = connection.list_snapshots(project,filter: filter)
|
snapshot_list = connection.list_snapshots(project,filter: filter)
|
||||||
return snapshot_list.items #array of snapshot objects
|
return snapshot_list.items #array of snapshot objects
|
||||||
|
|
@ -651,7 +661,7 @@ module Vmpooler
|
||||||
|
|
||||||
# find all snapshots ever created for one vm,
|
# find all snapshots ever created for one vm,
|
||||||
# regardless of snapshot name, for example when deleting it all
|
# regardless of snapshot name, for example when deleting it all
|
||||||
def find_all_snapshots(vm, connection)
|
def find_all_snapshots(vm)
|
||||||
filter = "(labels.vm = #{vm})"
|
filter = "(labels.vm = #{vm})"
|
||||||
snapshot_list = connection.list_snapshots(project,filter: filter)
|
snapshot_list = connection.list_snapshots(project,filter: filter)
|
||||||
return snapshot_list.items #array of snapshot objects
|
return snapshot_list.items #array of snapshot objects
|
||||||
|
|
@ -660,6 +670,14 @@ module Vmpooler
|
||||||
def disk_name_from_source(attached_disk)
|
def disk_name_from_source(attached_disk)
|
||||||
attached_disk.source.split('/')[-1] # disk name is after the last / of the full source URL
|
attached_disk.source.split('/')[-1] # disk name is after the last / of the full source URL
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# used in local dev environment, set DEBUG_FLAG=true
|
||||||
|
# this way the upstream vmpooler manager does not get polluted with logs
|
||||||
|
def debug_logger(message, send_to_upstream=false)
|
||||||
|
#the default logger is simple and does not enforce debug levels (the first argument)
|
||||||
|
puts message if ENV['DEBUG_FLAG']
|
||||||
|
logger.log("[g]", message) if send_to_upstream
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,7 @@ end
|
||||||
describe 'Vmpooler::PoolManager::Provider::Gce' do
|
describe 'Vmpooler::PoolManager::Provider::Gce' do
|
||||||
let(:logger) { MockLogger.new }
|
let(:logger) { MockLogger.new }
|
||||||
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
|
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
|
||||||
let(:poolname) { 'pool1' }
|
let(:poolname) { 'debian-9' }
|
||||||
let(:provider_options) { { 'param' => 'value' } }
|
let(:provider_options) { { 'param' => 'value' } }
|
||||||
let(:project) { 'dio-samuel-dev' }
|
let(:project) { 'dio-samuel-dev' }
|
||||||
let(:zone){ 'us-west1-b' }
|
let(:zone){ 'us-west1-b' }
|
||||||
|
|
@ -38,7 +38,7 @@ EOT
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
let(:vmname) { 'vm15' }
|
let(:vmname) { 'vm16' }
|
||||||
let(:connection) { MockComputeServiceConnection.new }
|
let(:connection) { MockComputeServiceConnection.new }
|
||||||
let(:redis_connection_pool) { Vmpooler::PoolManager::GenericConnectionPool.new(
|
let(:redis_connection_pool) { Vmpooler::PoolManager::GenericConnectionPool.new(
|
||||||
metrics: metrics,
|
metrics: metrics,
|
||||||
|
|
@ -62,7 +62,8 @@ EOT
|
||||||
puts "creating"
|
puts "creating"
|
||||||
result = subject.create_vm(poolname, vmname)
|
result = subject.create_vm(poolname, vmname)
|
||||||
subject.get_vm(poolname, vmname)
|
subject.get_vm(poolname, vmname)
|
||||||
=begin
|
subject.vms_in_pool(poolname)
|
||||||
|
|
||||||
puts "create snapshot w/ one disk"
|
puts "create snapshot w/ one disk"
|
||||||
result = subject.create_snapshot(poolname, vmname, "sams")
|
result = subject.create_snapshot(poolname, vmname, "sams")
|
||||||
puts "create disk"
|
puts "create disk"
|
||||||
|
|
@ -71,8 +72,7 @@ EOT
|
||||||
result = subject.create_snapshot(poolname, vmname, "sams2")
|
result = subject.create_snapshot(poolname, vmname, "sams2")
|
||||||
puts "revert snapshot"
|
puts "revert snapshot"
|
||||||
result = subject.revert_snapshot(poolname, vmname, "sams")
|
result = subject.revert_snapshot(poolname, vmname, "sams")
|
||||||
=end
|
result = subject.destroy_vm(poolname, vmname)
|
||||||
#result = subject.destroy_vm(poolname, vmname)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
skip 'runs existing' do
|
skip 'runs existing' do
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue