setup tags properly, and retireve then in get_vm

This commit is contained in:
Samuel Beaulieu 2022-07-04 13:18:48 -05:00
parent 85e0d14b10
commit 2783adc32e
No known key found for this signature in database
GPG key ID: 12030F74136D0F34
3 changed files with 115 additions and 253 deletions

View file

@ -1,193 +0,0 @@
PATH
remote: .
specs:
vmpooler-provider-aws (0.0.1)
aws-sdk-ec2 (~> 1)
GEM
remote: https://rubygems.org/
specs:
ast (2.4.2)
aws-eventstream (1.2.0)
aws-partitions (1.598.0)
aws-sdk-core (3.131.1)
aws-eventstream (~> 1, >= 1.0.2)
aws-partitions (~> 1, >= 1.525.0)
aws-sigv4 (~> 1.1)
jmespath (~> 1, >= 1.6.1)
aws-sdk-ec2 (1.317.0)
aws-sdk-core (~> 3, >= 3.127.0)
aws-sigv4 (~> 1.1)
aws-sigv4 (1.5.0)
aws-eventstream (~> 1, >= 1.0.2)
bindata (2.4.10)
builder (3.2.4)
climate_control (1.1.1)
coderay (1.1.3)
concurrent-ruby (1.1.10)
connection_pool (2.2.5)
deep_merge (1.2.2)
diff-lcs (1.5.0)
docile (1.4.0)
faraday (2.3.0)
faraday-net_http (~> 2.0)
ruby2_keywords (>= 0.0.4)
faraday-net_http (2.0.3)
google-cloud-env (1.6.0)
faraday (>= 0.17.3, < 3.0)
jmespath (1.6.1)
method_source (1.0.0)
mock_redis (0.32.0)
ruby2_keywords
mustermann (1.1.1)
ruby2_keywords (~> 0.0.1)
net-ldap (0.17.1)
nio4r (2.5.8)
nokogiri (1.13.6-x86_64-darwin)
racc (~> 1.4)
opentelemetry-api (1.0.2)
opentelemetry-common (0.19.6)
opentelemetry-api (~> 1.0)
opentelemetry-exporter-jaeger (0.20.1)
opentelemetry-api (~> 1.0)
opentelemetry-common (~> 0.19.2)
opentelemetry-sdk (~> 1.0)
thrift
opentelemetry-instrumentation-base (0.19.0)
opentelemetry-api (~> 1.0)
opentelemetry-instrumentation-concurrent_ruby (0.19.2)
opentelemetry-api (~> 1.0)
opentelemetry-instrumentation-base (~> 0.19.0)
opentelemetry-instrumentation-http_client (0.19.3)
opentelemetry-api (~> 1.0)
opentelemetry-common (~> 0.19.3)
opentelemetry-instrumentation-base (~> 0.19.0)
opentelemetry-instrumentation-redis (0.21.2)
opentelemetry-api (~> 1.0)
opentelemetry-common (~> 0.19.3)
opentelemetry-instrumentation-base (~> 0.19.0)
opentelemetry-instrumentation-sinatra (0.19.3)
opentelemetry-api (~> 1.0)
opentelemetry-common (~> 0.19.3)
opentelemetry-instrumentation-base (~> 0.19.0)
opentelemetry-registry (0.1.0)
opentelemetry-api (~> 1.0.1)
opentelemetry-resource_detectors (0.19.1)
google-cloud-env
opentelemetry-sdk
opentelemetry-sdk (1.1.0)
opentelemetry-api (~> 1.0)
opentelemetry-common (~> 0.19.3)
opentelemetry-registry (~> 0.1)
opentelemetry-semantic_conventions
opentelemetry-semantic_conventions (1.8.0)
opentelemetry-api (~> 1.0)
parallel (1.22.1)
parser (3.1.2.0)
ast (~> 2.4.1)
pickup (0.0.11)
prometheus-client (2.1.0)
pry (0.14.1)
coderay (~> 1.1)
method_source (~> 1.0)
puma (5.6.4)
nio4r (~> 2.0)
racc (1.6.0)
rack (2.2.3.1)
rack-protection (2.2.0)
rack
rack-test (1.1.0)
rack (>= 1.0, < 3)
rainbow (3.1.1)
rake (13.0.6)
redis (4.6.0)
regexp_parser (2.5.0)
rexml (3.2.5)
rspec (3.11.0)
rspec-core (~> 3.11.0)
rspec-expectations (~> 3.11.0)
rspec-mocks (~> 3.11.0)
rspec-core (3.11.0)
rspec-support (~> 3.11.0)
rspec-expectations (3.11.0)
diff-lcs (>= 1.2.0, < 2.0)
rspec-support (~> 3.11.0)
rspec-mocks (3.11.1)
diff-lcs (>= 1.2.0, < 2.0)
rspec-support (~> 3.11.0)
rspec-support (3.11.0)
rubocop (1.1.0)
parallel (~> 1.10)
parser (>= 2.7.1.5)
rainbow (>= 2.2.2, < 4.0)
regexp_parser (>= 1.8)
rexml
rubocop-ast (>= 1.0.1)
ruby-progressbar (~> 1.7)
unicode-display_width (>= 1.4.0, < 2.0)
rubocop-ast (1.18.0)
parser (>= 3.1.1.0)
ruby-progressbar (1.11.0)
ruby2_keywords (0.0.5)
simplecov (0.21.2)
docile (~> 1.1)
simplecov-html (~> 0.11)
simplecov_json_formatter (~> 0.1)
simplecov-html (0.12.3)
simplecov_json_formatter (0.1.4)
sinatra (2.2.0)
mustermann (~> 1.0)
rack (~> 2.2)
rack-protection (= 2.2.0)
tilt (~> 2.0)
spicy-proton (2.1.14)
bindata (~> 2.3)
statsd-ruby (1.5.0)
thor (1.2.1)
thrift (0.14.2)
tilt (2.0.10)
unicode-display_width (1.8.0)
vmpooler (2.3.0)
concurrent-ruby (~> 1.1)
connection_pool (~> 2.2)
deep_merge (~> 1.2)
net-ldap (~> 0.16)
nokogiri (~> 1.10)
opentelemetry-exporter-jaeger (= 0.20.1)
opentelemetry-instrumentation-concurrent_ruby (= 0.19.2)
opentelemetry-instrumentation-http_client (= 0.19.3)
opentelemetry-instrumentation-redis (= 0.21.2)
opentelemetry-instrumentation-sinatra (= 0.19.3)
opentelemetry-resource_detectors (= 0.19.1)
opentelemetry-sdk (~> 1.0, >= 1.0.2)
pickup (~> 0.0.11)
prometheus-client (~> 2.0)
puma (~> 5.0, >= 5.0.4)
rack (~> 2.2)
rake (~> 13.0)
redis (~> 4.1)
sinatra (~> 2.0)
spicy-proton (~> 2.1)
statsd-ruby (~> 1.4)
yarjuf (2.0.0)
builder
rspec (~> 3)
PLATFORMS
x86_64-darwin-19
DEPENDENCIES
climate_control (>= 0.2.0)
mock_redis (>= 0.17.0)
pry
rack-test (>= 0.6)
rspec (>= 3.2)
rubocop (~> 1.1.0)
simplecov (>= 0.11.2)
thor (~> 1.0, >= 1.0.1)
vmpooler (~> 2.3, >= 1.3.0)
vmpooler-provider-aws!
yarjuf (>= 2.0)
BUNDLED WITH
2.3.6

View file

@ -65,16 +65,8 @@ module Vmpooler
end end
# main configuration options # main configuration options
def project def region
provider_config['project'] return provider_config['region'] if provider_config['region']
end
def network_name
provider_config['network_name']
end
def subnetwork_name(pool_name)
return pool_config(pool_name)['subnetwork_name'] if pool_config(pool_name)['subnetwork_name']
end end
# main configuration options, overridable for each pool # main configuration options, overridable for each pool
@ -83,15 +75,17 @@ module Vmpooler
return provider_config['zone'] if provider_config['zone'] return provider_config['zone'] if provider_config['zone']
end end
def region def amisize(pool_name)
return provider_config['region'] if provider_config['region'] return pool_config(pool_name)['amisize'] if pool_config(pool_name)['amisize']
return provider_config['amisize'] if provider_config['amisize']
end end
def machine_type(pool_name) def volume_size(pool_name)
return pool_config(pool_name)['machine_type'] if pool_config(pool_name)['machine_type'] return pool_config(pool_name)['volume_size'] if pool_config(pool_name)['volume_size']
return provider_config['machine_type'] if provider_config['machine_type'] return provider_config['volume_size'] if provider_config['volume_size']
end end
#dns
def domain def domain
provider_config['domain'] provider_config['domain']
end end
@ -100,10 +94,20 @@ module Vmpooler
provider_config['dns_zone_resource_name'] provider_config['dns_zone_resource_name']
end end
#subnets
def get_subnet_id(pool_name)
case zone(pool_name)
when 'us-west-2b'
return 'subnet-0fe90a688844f6f26'
when 'us-west-2a'
return 'subnet-091b436f'
end
end
# Base methods that are implemented: # Base methods that are implemented:
# vms_in_pool lists all the VM names in a pool, which is based on the VMs # vms_in_pool lists all the VM names in a pool, which is based on the VMs
# having a label "pool" that match a pool config name. # having a tag "pool" that match a pool config name.
# inputs # inputs
# [String] pool_name : Name of the pool # [String] pool_name : Name of the pool
# returns # returns
@ -118,13 +122,16 @@ module Vmpooler
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil? raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
zone = zone(pool_name) zone = zone(pool_name)
filter = "(labels.pool = #{pool_name})" filters = [{
name: "tag:pool",
values: [pool_name],
}]
instance_list = connection.list_instances(project, zone, filter: filter) instance_list = connection.list_instances(project, zone, filter: filter)
return vms if instance_list.items.nil? return vms if instance_list.size.nil? || instance_list.size == 0
instance_list.items.each do |vm| instance_list.items.each do |vm|
vms << { 'name' => vm.name } vms << { 'name' => vm.tags.detect {|f| f.key == 'vm_name' }&.value || "vm_name not found in tags" }
end end
debug_logger(vms) debug_logger(vms)
vms vms
@ -134,23 +141,24 @@ module Vmpooler
# [String] pool_name : Name of the pool # [String] pool_name : Name of the pool
# [String] vm_name : Name of the VM to find # [String] vm_name : Name of the VM to find
# returns # returns
# nil if VM doesn't exist # nil if VM doesn't exist name, template, poolname, boottime, status, image_size, private_ip_address
# [Hastable] of the VM # [Hastable] of the VM
# [String] name : The name of the resource, provided by the client when initially creating the resource # [String] name : The name of the resource, provided by the client when initially creating the resource
# [String] hostname : Specifies the hostname of the instance. The specified hostname must be RFC1035 compliant. If hostname is not specified,
# the default hostname is [ INSTANCE_NAME].c.[PROJECT_ID].internal when using the global DNS, and
# [ INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal when using zonal DNS
# [String] template : This is the name of template # [String] template : This is the name of template
# [String] poolname : Name of the pool the VM as per labels # [String] poolname : Name of the pool the VM
# [Time] boottime : Time when the VM was created/booted # [Time] boottime : Time when the VM was created/booted
# [String] status : One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED # [String] status : One of the following values: pending, running, shutting-down, terminated, stopping, stopped
# [String] zone : URL of the zone where the instance resides. # [String] image_size : The EC2 image size eg a1.large
# [String] machine_type : Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/machine-type. # [String] private_ip_address: The private IPv4 address
def get_vm(pool_name, vm_name) def get_vm(pool_name, vm_name)
debug_logger('get_vm') debug_logger('get_vm')
vm_hash = nil vm_hash = nil
begin begin
vm_object = connection.get_instance(project, zone(pool_name), vm_name) filters = [{
name: "tag:vm_name",
values: [vm_name],
}]
instances = connection.instances(filters: filters)
rescue ::Aws::EC2::ClientError => e rescue ::Aws::EC2::ClientError => e
raise e unless e.status_code == 404 raise e unless e.status_code == 404
@ -158,9 +166,9 @@ module Vmpooler
return nil return nil
end end
return vm_hash if vm_object.nil? return vm_hash if instances.size.nil? || instances.size == 0
vm_hash = generate_vm_hash(vm_object, pool_name) vm_hash = generate_vm_hash(instances.first, pool_name)
debug_logger("vm_hash #{vm_hash}") debug_logger("vm_hash #{vm_hash}")
vm_hash vm_hash
end end
@ -180,27 +188,32 @@ module Vmpooler
debug_logger('create_vm') debug_logger('create_vm')
pool = pool_config(pool_name) pool = pool_config(pool_name)
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil? raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
raise("Instance creation not attempted, #{new_vmname} already exists") if get_vm(pool_name, new_vmname)
if zone(pool_name) == 'us-west-2b' subnet_id = get_subnet_id(pool_name)
subnet_id = 'subnet-0fe90a688844f6f26'
else
subnet_id = 'subnet-091b436f'
end
tag = [ tag = [
{ {
resource_type: "instance", # accepts capacity-reservation, client-vpn-endpoint, customer-gateway, carrier-gateway, dedicated-host, dhcp-options, egress-only-internet-gateway, elastic-ip, elastic-gpu, export-image-task, export-instance-task, fleet, fpga-image, host-reservation, image, import-image-task, import-snapshot-task, instance, instance-event-window, internet-gateway, ipam, ipam-pool, ipam-scope, ipv4pool-ec2, ipv6pool-ec2, key-pair, launch-template, local-gateway, local-gateway-route-table, local-gateway-virtual-interface, local-gateway-virtual-interface-group, local-gateway-route-table-vpc-association, local-gateway-route-table-virtual-interface-group-association, natgateway, network-acl, network-interface, network-insights-analysis, network-insights-path, network-insights-access-scope, network-insights-access-scope-analysis, placement-group, prefix-list, replace-root-volume-task, reserved-instances, route-table, security-group, security-group-rule, snapshot, spot-fleet-request, spot-instances-request, subnet, subnet-cidr-reservation, traffic-mirror-filter, traffic-mirror-session, traffic-mirror-target, transit-gateway, transit-gateway-attachment, transit-gateway-connect-peer, transit-gateway-multicast-domain, transit-gateway-route-table, volume, vpc, vpc-endpoint, vpc-endpoint-service, vpc-peering-connection, vpn-connection, vpn-gateway, vpc-flow-log resource_type: "instance", # accepts capacity-reservation, client-vpn-endpoint, customer-gateway, carrier-gateway, dedicated-host, dhcp-options, egress-only-internet-gateway, elastic-ip, elastic-gpu, export-image-task, export-instance-task, fleet, fpga-image, host-reservation, image, import-image-task, import-snapshot-task, instance, instance-event-window, internet-gateway, ipam, ipam-pool, ipam-scope, ipv4pool-ec2, ipv6pool-ec2, key-pair, launch-template, local-gateway, local-gateway-route-table, local-gateway-virtual-interface, local-gateway-virtual-interface-group, local-gateway-route-table-vpc-association, local-gateway-route-table-virtual-interface-group-association, natgateway, network-acl, network-interface, network-insights-analysis, network-insights-path, network-insights-access-scope, network-insights-access-scope-analysis, placement-group, prefix-list, replace-root-volume-task, reserved-instances, route-table, security-group, security-group-rule, snapshot, spot-fleet-request, spot-instances-request, subnet, subnet-cidr-reservation, traffic-mirror-filter, traffic-mirror-session, traffic-mirror-target, transit-gateway, transit-gateway-attachment, transit-gateway-connect-peer, transit-gateway-multicast-domain, transit-gateway-route-table, volume, vpc, vpc-endpoint, vpc-endpoint-service, vpc-peering-connection, vpn-connection, vpn-gateway, vpc-flow-log
tags: [ tags: [
{
key: "vm_name",
value: new_vmname,
},
{
key: "pool",
value: pool_name,
},
{ {
key: "lifetime", key: "lifetime",
value: "1d", value: get_current_lifetime(new_vmname),
}, },
{ {
key: "created_by", key: "created_by",
value: "Tanisha", value: get_current_user(new_vmname),
}, },
{ {
key: "job_url", key: "job_url",
value: "", value: get_current_job_url(new_vmname),
}, },
{ {
key: "organization", key: "organization",
@ -221,21 +234,51 @@ module Vmpooler
monitoring: {:enabled => true}, monitoring: {:enabled => true},
key_name: 'always-be-scheduling', key_name: 'always-be-scheduling',
security_group_ids: ['sg-697fb015'], security_group_ids: ['sg-697fb015'],
instance_type: pool['amisize'], instance_type: amisize(pool_name),
disable_api_termination: false, disable_api_termination: false,
instance_initiated_shutdown_behavior: 'terminate', instance_initiated_shutdown_behavior: 'terminate',
tag_specifications: tag, tag_specifications: tag,
subnet_id: subnet_id subnet_id: subnet_id
} }
# if volume_size if volume_size(pool_name)
# config[:block_device_mappings] = get_block_device_mappings(image_id, volume_size) config[:block_device_mappings] = get_block_device_mappings(config['image_id'], volume_size(pool_name))
# end end
debug_logger('trigger insert_instance') debug_logger('trigger insert_instance')
result = connection.create_instances(config) batch_instance = connection.create_instances(config)
# created_instance = get_vm(pool_name, new_vmname instance_id = batch_instance.first.instance_id
# created_instance connection.client.wait_until(:instance_running, {instance_ids: [instance_id]}, {max_attempts: 10})
created_instance = get_vm(pool_name, new_vmname)
created_instance
end
def get_block_device_mappings(image_id, volume_size)
ec2_client = connection.client
image = ec2_client.describe_images(:image_ids => [image_id]).images.first
raise RuntimeError, "Image not found: #{image_id}" if image.nil?
# Transform the images block_device_mappings output into a format
# ready for a create.
block_device_mappings = []
if image.root_device_type == "ebs"
orig_bdm = image.block_device_mappings
orig_bdm.each do |block_device|
block_device_mappings << {
:device_name => block_device.device_name,
:ebs => {
# Change the default size of the root volume.
:volume_size => volume_size,
# This is required to override the images default for
# delete_on_termination, forcing all volumes to be deleted once the
# instance is terminated.
:delete_on_termination => true
}
}
end
else
raise "#{image_id} does not have an ebs root device type"
end
block_device_mappings
end end
# create_disk creates an additional disk for an existing VM. It will name the new # create_disk creates an additional disk for an existing VM. It will name the new
@ -637,6 +680,20 @@ module Vmpooler
end end
end end
def get_current_lifetime(vm_name)
@redis.with_metrics do |redis|
lifetime = redis.hget("vmpooler__vm__#{vm_name}", 'lifetime') || '1h'
return lifetime
end
end
def get_current_job_url(vm_name)
@redis.with_metrics do |redis|
job = redis.hget("vmpooler__vm__#{vm_name}", 'tag:jenkins_build_url') || ''
return job
end
end
# Compute resource wait for operation to be DONE (synchronous operation) # Compute resource wait for operation to be DONE (synchronous operation)
def wait_for_zone_operation(project, zone, result, retries = 5) def wait_for_zone_operation(project, zone, result, retries = 5)
while result.status != 'DONE' while result.status != 'DONE'
@ -672,23 +729,21 @@ module Vmpooler
end end
# Return a hash of VM data # Return a hash of VM data
# Provides vmname, hostname, template, poolname, boottime, status, zone, machine_type, labels, label_fingerprint, ip information # Provides name, template, poolname, boottime, status, image_size, private_ip_address
def generate_vm_hash(vm_object, pool_name) def generate_vm_hash(vm_object, pool_name)
pool_configuration = pool_config(pool_name) pool_configuration = pool_config(pool_name)
return nil if pool_configuration.nil? return nil if pool_configuration.nil?
{ {
'name' => vm_object.name, 'name' => vm_object.tags.detect {|f| f.key == 'vm_name' }&.value,
'hostname' => vm_object.hostname, #'hostname' => vm_object.hostname,
'template' => pool_configuration&.key?('template') ? pool_configuration['template'] : nil, # was expecting to get it from API, not from config, but this is what vSphere does too! 'template' => pool_configuration&.key?('template') ? pool_configuration['template'] : nil, # was expecting to get it from API, not from config, but this is what vSphere does too!
'poolname' => vm_object.labels&.key?('pool') ? vm_object.labels['pool'] : nil, 'poolname' => vm_object.tags.detect {|f| f.key == 'pool' }&.value,
'boottime' => vm_object.creation_timestamp, 'boottime' => vm_object.launch_time,
'status' => vm_object.status, # One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED 'status' => vm_object.state.name, # One of the following values: pending, running, shutting-down, terminated, stopping, stopped
'zone' => vm_object.zone, #'zone' => vm_object.zone,
'machine_type' => vm_object.machine_type, 'image_size' => vm_object.instance_type,
'labels' => vm_object.labels, 'private_ip_address' => vm_object.private_ip_address
'label_fingerprint' => vm_object.label_fingerprint,
'ip' => vm_object.network_interfaces ? vm_object.network_interfaces.first.network_ip : nil
} }
end end

View file

@ -54,12 +54,12 @@ EOT
describe '#manual tests live' do describe '#manual tests live' do
context 'in itsysops' do context 'in itsysops' do
before(:each) { allow(subject).to receive(:dns).and_call_original } before(:each) { allow(subject).to receive(:dns).and_call_original }
let(:vmname) { "instance-24" } let(:vmname) { "instance-25" }
let(:project) { 'vmpooler-test' } let(:project) { 'vmpooler-test' }
it 'gets a vm' do skip 'gets a vm' do
result = subject.create_vm(poolname, vmname) result = subject.create_vm(poolname, vmname)
# result = subject.destroy_vm(poolname, vmname) # result = subject.destroy_vm(poolname, vmname)
# subject.get_vm(poolname, vmname) subject.get_vm(poolname, vmname)
# subject.dns_teardown({'name' => vmname}) # subject.dns_teardown({'name' => vmname})
# subject.dns_setup({'name' => vmname, 'ip' => '1.2.3.5'}) # subject.dns_setup({'name' => vmname, 'ip' => '1.2.3.5'})
end end