mirror of
https://github.com/puppetlabs/vmpooler-provider-ec2.git
synced 2026-01-26 02:28:40 -05:00
fix rubocop offenses
This commit is contained in:
parent
bd1b21736a
commit
ee36ee868d
5 changed files with 109 additions and 113 deletions
|
|
@ -51,3 +51,5 @@ Layout/LineLength:
|
||||||
Enabled: false
|
Enabled: false
|
||||||
Metrics/BlockLength:
|
Metrics/BlockLength:
|
||||||
Enabled: false
|
Enabled: false
|
||||||
|
Style/CaseLikeIf:
|
||||||
|
Enabled: false
|
||||||
|
|
@ -1,15 +1,17 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
require 'net/ssh'
|
require 'net/ssh'
|
||||||
|
module Vmpooler
|
||||||
|
class PoolManager
|
||||||
# This class connects to existing running VMs via NET:SSH
|
# This class connects to existing running VMs via NET:SSH
|
||||||
# it uses a local key to do so and then setup SSHD on the hosts to enable
|
# it uses a local key to do so and then setup SSHD on the hosts to enable
|
||||||
# dev and CI users to connect.
|
# dev and CI users to connect.
|
||||||
module Vmpooler
|
|
||||||
class PoolManager
|
|
||||||
class AwsSetup
|
class AwsSetup
|
||||||
ROOT_KEYS_SCRIPT = ENV["ROOT_KEYS_SCRIPT"]
|
ROOT_KEYS_SCRIPT = ENV['ROOT_KEYS_SCRIPT']
|
||||||
ROOT_KEYS_SYNC_CMD = "curl -k -o - -L #{ROOT_KEYS_SCRIPT} | %s"
|
ROOT_KEYS_SYNC_CMD = "curl -k -o - -L #{ROOT_KEYS_SCRIPT} | %s"
|
||||||
|
|
||||||
def self.setup_node_by_ssh(host, platform)
|
def self.setup_node_by_ssh(host, platform)
|
||||||
@key_file = ENV["KEY_FILE_LOCATION"] || '/app/abs/.ssh/abs-aws-ec2.rsa'
|
@key_file = ENV['KEY_FILE_LOCATION'] || '/app/abs/.ssh/abs-aws-ec2.rsa'
|
||||||
conn = check_ssh_accepting_connections(host, platform)
|
conn = check_ssh_accepting_connections(host, platform)
|
||||||
configure_host(host, platform, conn)
|
configure_host(host, platform, conn)
|
||||||
end
|
end
|
||||||
|
|
@ -30,15 +32,14 @@ module Vmpooler
|
||||||
|
|
||||||
def self.get_user(platform)
|
def self.get_user(platform)
|
||||||
if platform =~ /centos/
|
if platform =~ /centos/
|
||||||
user = 'centos'
|
'centos'
|
||||||
elsif platform =~ /ubuntu/
|
elsif platform =~ /ubuntu/
|
||||||
user = 'ubuntu'
|
'ubuntu'
|
||||||
elsif platform =~ /debian/
|
elsif platform =~ /debian/
|
||||||
user = 'root'
|
'root'
|
||||||
else
|
else
|
||||||
user = 'ec2-user'
|
'ec2-user'
|
||||||
end
|
end
|
||||||
user
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def self.check_ssh_accepting_connections(host, platform)
|
def self.check_ssh_accepting_connections(host, platform)
|
||||||
|
|
@ -46,10 +47,9 @@ module Vmpooler
|
||||||
begin
|
begin
|
||||||
user = get_user(platform)
|
user = get_user(platform)
|
||||||
netssh_jruby_workaround
|
netssh_jruby_workaround
|
||||||
conn = Net::SSH.start(host, user, :keys => @key_file, :timeout => 10)
|
Net::SSH.start(host, user, keys: @key_file, timeout: 10)
|
||||||
return conn
|
rescue Net::SSH::ConnectionTimeout, Errno::ECONNREFUSED => e
|
||||||
rescue Net::SSH::ConnectionTimeout, Errno::ECONNREFUSED => err
|
puts "Requested instances do not have sshd ready yet, try again: #{e}"
|
||||||
puts "Requested instances do not have sshd ready yet, try again: #{err}"
|
|
||||||
sleep 1
|
sleep 1
|
||||||
retry if (retries += 1) < 300
|
retry if (retries += 1) < 300
|
||||||
end
|
end
|
||||||
|
|
@ -73,6 +73,7 @@ module Vmpooler
|
||||||
ssh.open_channel do |channel|
|
ssh.open_channel do |channel|
|
||||||
channel.request_pty do |ch, success|
|
channel.request_pty do |ch, success|
|
||||||
raise "can't get pty request" unless success
|
raise "can't get pty request" unless success
|
||||||
|
|
||||||
if platform =~ /centos|el-|redhat|fedora|eos|amazon/
|
if platform =~ /centos|el-|redhat|fedora|eos|amazon/
|
||||||
ch.exec('sudo -E /sbin/service sshd reload')
|
ch.exec('sudo -E /sbin/service sshd reload')
|
||||||
elsif platform =~ /debian|ubuntu|cumulus/
|
elsif platform =~ /debian|ubuntu|cumulus/
|
||||||
|
|
@ -87,13 +88,13 @@ module Vmpooler
|
||||||
ssh.loop
|
ssh.loop
|
||||||
end
|
end
|
||||||
|
|
||||||
def self.sync_root_keys(host, platform)
|
def self.sync_root_keys(host, _platform)
|
||||||
unless ROOT_KEYS_SCRIPT.nil?
|
return if ROOT_KEYS_SCRIPT.nil?
|
||||||
user = "root"
|
|
||||||
|
user = 'root'
|
||||||
netssh_jruby_workaround
|
netssh_jruby_workaround
|
||||||
Net::SSH.start(host, user, :keys => @key_file) do |ssh|
|
Net::SSH.start(host, user, keys: @key_file) do |ssh|
|
||||||
ssh.exec!(ROOT_KEYS_SYNC_CMD % "env PATH=\"/usr/gnu/bin:$PATH\" bash")
|
ssh.exec!(ROOT_KEYS_SYNC_CMD % 'env PATH="/usr/gnu/bin:$PATH" bash')
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -101,7 +102,7 @@ module Vmpooler
|
||||||
# https://github.com/jruby/jruby-openssl/issues/105
|
# https://github.com/jruby/jruby-openssl/issues/105
|
||||||
# this will turn off some algos that match /^ecd(sa|h)-sha2/
|
# this will turn off some algos that match /^ecd(sa|h)-sha2/
|
||||||
def self.netssh_jruby_workaround
|
def self.netssh_jruby_workaround
|
||||||
Net::SSH::Transport::Algorithms::ALGORITHMS.values.each { |algs| algs.reject! { |a| a =~ /^ecd(sa|h)-sha2/ } }
|
Net::SSH::Transport::Algorithms::ALGORITHMS.each_value { |algs| algs.reject! { |a| a =~ /^ecd(sa|h)-sha2/ } }
|
||||||
Net::SSH::KnownHosts::SUPPORTED_TYPE.reject! { |t| t =~ /^ecd(sa|h)-sha2/ }
|
Net::SSH::KnownHosts::SUPPORTED_TYPE.reject! { |t| t =~ /^ecd(sa|h)-sha2/ }
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,7 @@ module Vmpooler
|
||||||
# the object reference for the connection, which means it cannot "reconnect" by creating an entirely new connection
|
# the object reference for the connection, which means it cannot "reconnect" by creating an entirely new connection
|
||||||
# object. Instead by wrapping it in a Hash, the Hash object reference itself never changes but the content of the
|
# object. Instead by wrapping it in a Hash, the Hash object reference itself never changes but the content of the
|
||||||
# Hash can change, and is preserved across invocations.
|
# Hash can change, and is preserved across invocations.
|
||||||
new_conn = #connect to aws
|
new_conn = connect_to_aws
|
||||||
{ connection: new_conn }
|
{ connection: new_conn }
|
||||||
end
|
end
|
||||||
@redis = redis_connection_pool
|
@redis = redis_connection_pool
|
||||||
|
|
@ -60,9 +60,7 @@ module Vmpooler
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def dns
|
attr_reader :dns
|
||||||
@dns
|
|
||||||
end
|
|
||||||
|
|
||||||
# main configuration options
|
# main configuration options
|
||||||
def region
|
def region
|
||||||
|
|
@ -98,9 +96,9 @@ module Vmpooler
|
||||||
def get_subnet_id(pool_name)
|
def get_subnet_id(pool_name)
|
||||||
case zone(pool_name)
|
case zone(pool_name)
|
||||||
when 'us-west-2b'
|
when 'us-west-2b'
|
||||||
return 'subnet-0fe90a688844f6f26'
|
'subnet-0fe90a688844f6f26'
|
||||||
when 'us-west-2a'
|
when 'us-west-2a'
|
||||||
return 'subnet-091b436f'
|
'subnet-091b436f'
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -125,17 +123,16 @@ module Vmpooler
|
||||||
pool = pool_config(pool_name)
|
pool = pool_config(pool_name)
|
||||||
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
||||||
|
|
||||||
zone = zone(pool_name)
|
|
||||||
filters = [{
|
filters = [{
|
||||||
name: "tag:pool",
|
name: 'tag:pool',
|
||||||
values: [pool_name],
|
values: [pool_name]
|
||||||
}]
|
}]
|
||||||
instance_list = connection.instances(filters: filters)
|
instance_list = connection.instances(filters: filters)
|
||||||
|
|
||||||
return vms if instance_list.first.nil?
|
return vms if instance_list.first.nil?
|
||||||
|
|
||||||
instance_list.each do |vm|
|
instance_list.each do |vm|
|
||||||
vms << { 'name' => vm.tags.detect {|f| f.key == 'vm_name' }&.value || "vm_name not found in tags" }
|
vms << { 'name' => vm.tags.detect { |f| f.key == 'vm_name' }&.value || 'vm_name not found in tags' }
|
||||||
end
|
end
|
||||||
debug_logger(vms)
|
debug_logger(vms)
|
||||||
vms
|
vms
|
||||||
|
|
@ -159,8 +156,8 @@ module Vmpooler
|
||||||
vm_hash = nil
|
vm_hash = nil
|
||||||
|
|
||||||
filters = [{
|
filters = [{
|
||||||
name: "tag:vm_name",
|
name: 'tag:vm_name',
|
||||||
values: [vm_name],
|
values: [vm_name]
|
||||||
}]
|
}]
|
||||||
instances = connection.instances(filters: filters).first
|
instances = connection.instances(filters: filters).first
|
||||||
return vm_hash if instances.nil?
|
return vm_hash if instances.nil?
|
||||||
|
|
@ -190,45 +187,45 @@ module Vmpooler
|
||||||
subnet_id = get_subnet_id(pool_name)
|
subnet_id = get_subnet_id(pool_name)
|
||||||
tag = [
|
tag = [
|
||||||
{
|
{
|
||||||
resource_type: "instance", # accepts capacity-reservation, client-vpn-endpoint, customer-gateway, carrier-gateway, dedicated-host, dhcp-options, egress-only-internet-gateway, elastic-ip, elastic-gpu, export-image-task, export-instance-task, fleet, fpga-image, host-reservation, image, import-image-task, import-snapshot-task, instance, instance-event-window, internet-gateway, ipam, ipam-pool, ipam-scope, ipv4pool-ec2, ipv6pool-ec2, key-pair, launch-template, local-gateway, local-gateway-route-table, local-gateway-virtual-interface, local-gateway-virtual-interface-group, local-gateway-route-table-vpc-association, local-gateway-route-table-virtual-interface-group-association, natgateway, network-acl, network-interface, network-insights-analysis, network-insights-path, network-insights-access-scope, network-insights-access-scope-analysis, placement-group, prefix-list, replace-root-volume-task, reserved-instances, route-table, security-group, security-group-rule, snapshot, spot-fleet-request, spot-instances-request, subnet, subnet-cidr-reservation, traffic-mirror-filter, traffic-mirror-session, traffic-mirror-target, transit-gateway, transit-gateway-attachment, transit-gateway-connect-peer, transit-gateway-multicast-domain, transit-gateway-route-table, volume, vpc, vpc-endpoint, vpc-endpoint-service, vpc-peering-connection, vpn-connection, vpn-gateway, vpc-flow-log
|
resource_type: 'instance', # accepts capacity-reservation, client-vpn-endpoint, customer-gateway, carrier-gateway, dedicated-host, dhcp-options, egress-only-internet-gateway, elastic-ip, elastic-gpu, export-image-task, export-instance-task, fleet, fpga-image, host-reservation, image, import-image-task, import-snapshot-task, instance, instance-event-window, internet-gateway, ipam, ipam-pool, ipam-scope, ipv4pool-ec2, ipv6pool-ec2, key-pair, launch-template, local-gateway, local-gateway-route-table, local-gateway-virtual-interface, local-gateway-virtual-interface-group, local-gateway-route-table-vpc-association, local-gateway-route-table-virtual-interface-group-association, natgateway, network-acl, network-interface, network-insights-analysis, network-insights-path, network-insights-access-scope, network-insights-access-scope-analysis, placement-group, prefix-list, replace-root-volume-task, reserved-instances, route-table, security-group, security-group-rule, snapshot, spot-fleet-request, spot-instances-request, subnet, subnet-cidr-reservation, traffic-mirror-filter, traffic-mirror-session, traffic-mirror-target, transit-gateway, transit-gateway-attachment, transit-gateway-connect-peer, transit-gateway-multicast-domain, transit-gateway-route-table, volume, vpc, vpc-endpoint, vpc-endpoint-service, vpc-peering-connection, vpn-connection, vpn-gateway, vpc-flow-log
|
||||||
tags: [
|
tags: [
|
||||||
{
|
{
|
||||||
key: "vm_name",
|
key: 'vm_name',
|
||||||
value: new_vmname,
|
value: new_vmname
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
key: "pool",
|
key: 'pool',
|
||||||
value: pool_name,
|
value: pool_name
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
key: "lifetime",
|
key: 'lifetime',
|
||||||
value: get_current_lifetime(new_vmname),
|
value: get_current_lifetime(new_vmname)
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
key: "created_by",
|
key: 'created_by',
|
||||||
value: get_current_user(new_vmname),
|
value: get_current_user(new_vmname)
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
key: "job_url",
|
key: 'job_url',
|
||||||
value: get_current_job_url(new_vmname),
|
value: get_current_job_url(new_vmname)
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
key: "organization",
|
key: 'organization',
|
||||||
value: "engineering",
|
value: 'engineering'
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
key: "portfolio",
|
key: 'portfolio',
|
||||||
value: "ds-ci",
|
value: 'ds-ci'
|
||||||
},
|
}
|
||||||
|
|
||||||
],
|
]
|
||||||
},
|
}
|
||||||
]
|
]
|
||||||
config = {
|
config = {
|
||||||
min_count: 1,
|
min_count: 1,
|
||||||
max_count: 1,
|
max_count: 1,
|
||||||
image_id: pool['template'],
|
image_id: pool['template'],
|
||||||
monitoring: {:enabled => true},
|
monitoring: { enabled: true },
|
||||||
key_name: 'always-be-scheduling',
|
key_name: 'always-be-scheduling',
|
||||||
security_group_ids: ['sg-697fb015'],
|
security_group_ids: ['sg-697fb015'],
|
||||||
instance_type: amisize(pool_name),
|
instance_type: amisize(pool_name),
|
||||||
|
|
@ -238,9 +235,7 @@ module Vmpooler
|
||||||
subnet_id: subnet_id
|
subnet_id: subnet_id
|
||||||
}
|
}
|
||||||
|
|
||||||
if volume_size(pool_name)
|
config[:block_device_mappings] = get_block_device_mappings(config['image_id'], volume_size(pool_name)) if volume_size(pool_name)
|
||||||
config[:block_device_mappings] = get_block_device_mappings(config['image_id'], volume_size(pool_name))
|
|
||||||
end
|
|
||||||
|
|
||||||
debug_logger('trigger insert_instance')
|
debug_logger('trigger insert_instance')
|
||||||
batch_instance = connection.create_instances(config)
|
batch_instance = connection.create_instances(config)
|
||||||
|
|
@ -249,9 +244,7 @@ module Vmpooler
|
||||||
created_instance = get_vm(pool_name, new_vmname)
|
created_instance = get_vm(pool_name, new_vmname)
|
||||||
|
|
||||||
# extra setup steps
|
# extra setup steps
|
||||||
if to_provision(pool_name) == "true" || to_provision(pool_name) == true
|
provision_node_aws(created_instance['private_dns_name'], pool_name) if to_provision(pool_name) == 'true' || to_provision(pool_name) == true
|
||||||
provision_node_aws(created_instance['private_dns_name'], pool_name)
|
|
||||||
end
|
|
||||||
|
|
||||||
created_instance
|
created_instance
|
||||||
end
|
end
|
||||||
|
|
@ -262,29 +255,27 @@ module Vmpooler
|
||||||
|
|
||||||
def get_block_device_mappings(image_id, volume_size)
|
def get_block_device_mappings(image_id, volume_size)
|
||||||
ec2_client = connection.client
|
ec2_client = connection.client
|
||||||
image = ec2_client.describe_images(:image_ids => [image_id]).images.first
|
image = ec2_client.describe_images(image_ids: [image_id]).images.first
|
||||||
raise RuntimeError, "Image not found: #{image_id}" if image.nil?
|
raise "Image not found: #{image_id}" if image.nil?
|
||||||
|
raise "#{image_id} does not have an ebs root device type" unless image.root_device_type == 'ebs'
|
||||||
|
|
||||||
# Transform the images block_device_mappings output into a format
|
# Transform the images block_device_mappings output into a format
|
||||||
# ready for a create.
|
# ready for a create.
|
||||||
block_device_mappings = []
|
block_device_mappings = []
|
||||||
if image.root_device_type == "ebs"
|
|
||||||
orig_bdm = image.block_device_mappings
|
orig_bdm = image.block_device_mappings
|
||||||
orig_bdm.each do |block_device|
|
orig_bdm.each do |block_device|
|
||||||
block_device_mappings << {
|
block_device_mappings << {
|
||||||
:device_name => block_device.device_name,
|
device_name: block_device.device_name,
|
||||||
:ebs => {
|
ebs: {
|
||||||
# Change the default size of the root volume.
|
# Change the default size of the root volume.
|
||||||
:volume_size => volume_size,
|
volume_size: volume_size,
|
||||||
# This is required to override the images default for
|
# This is required to override the images default for
|
||||||
# delete_on_termination, forcing all volumes to be deleted once the
|
# delete_on_termination, forcing all volumes to be deleted once the
|
||||||
# instance is terminated.
|
# instance is terminated.
|
||||||
:delete_on_termination => true
|
delete_on_termination: true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
else
|
|
||||||
raise "#{image_id} does not have an ebs root device type"
|
|
||||||
end
|
|
||||||
block_device_mappings
|
block_device_mappings
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
@ -346,13 +337,13 @@ module Vmpooler
|
||||||
# [String] vm_name : Name of the existing VM
|
# [String] vm_name : Name of the existing VM
|
||||||
# returns
|
# returns
|
||||||
# [boolean] true : once the operations are finished
|
# [boolean] true : once the operations are finished
|
||||||
def destroy_vm(pool_name, vm_name)
|
def destroy_vm(_pool_name, vm_name)
|
||||||
debug_logger('destroy_vm')
|
debug_logger('destroy_vm')
|
||||||
deleted = false
|
deleted = false
|
||||||
|
|
||||||
filters = [{
|
filters = [{
|
||||||
name: "tag:vm_name",
|
name: 'tag:vm_name',
|
||||||
values: [vm_name],
|
values: [vm_name]
|
||||||
}]
|
}]
|
||||||
instances = connection.instances(filters: filters).first
|
instances = connection.instances(filters: filters).first
|
||||||
return true if instances.nil?
|
return true if instances.nil?
|
||||||
|
|
@ -363,22 +354,22 @@ module Vmpooler
|
||||||
begin
|
begin
|
||||||
connection.client.wait_until(:instance_terminated, { instance_ids: [instances.id] })
|
connection.client.wait_until(:instance_terminated, { instance_ids: [instances.id] })
|
||||||
deleted = true
|
deleted = true
|
||||||
rescue ::Aws::Waiters::Errors => error
|
rescue ::Aws::Waiters::Errors => e
|
||||||
debug_logger("failed waiting for instance terminated #{vm_name}: #{error}")
|
debug_logger("failed waiting for instance terminated #{vm_name}: #{e}")
|
||||||
end
|
end
|
||||||
|
|
||||||
return deleted
|
deleted
|
||||||
end
|
end
|
||||||
|
|
||||||
# check if a vm is ready by opening a socket on port 22
|
# check if a vm is ready by opening a socket on port 22
|
||||||
# if a domain is set, it will use vn_name.domain,
|
# if a domain is set, it will use vn_name.domain,
|
||||||
# if not then it will use the ip directly (AWS workaround)
|
# if not then it will use the ip directly (AWS workaround)
|
||||||
def vm_ready?(_pool_name, vm_name)
|
def vm_ready?(pool_name, vm_name)
|
||||||
begin
|
begin
|
||||||
# TODO: we could use a healthcheck resource attached to instance
|
# TODO: we could use a healthcheck resource attached to instance
|
||||||
domain_set = domain || global_config[:config]['domain']
|
domain_set = domain || global_config[:config]['domain']
|
||||||
if domain_set.nil?
|
if domain_set.nil?
|
||||||
vm_ip = get_vm(_pool_name, vm_name)['private_ip_address']
|
vm_ip = get_vm(pool_name, vm_name)['private_ip_address']
|
||||||
vm_name = vm_ip unless vm_ip.nil?
|
vm_name = vm_ip unless vm_ip.nil?
|
||||||
end
|
end
|
||||||
open_socket(vm_name, domain_set)
|
open_socket(vm_name, domain_set)
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ MockOperationErrorError = Struct.new(
|
||||||
|
|
||||||
MockInstance = Struct.new(
|
MockInstance = Struct.new(
|
||||||
# https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/EC2/Instance.html
|
# https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/EC2/Instance.html
|
||||||
:instance_type, :launch_time, :private_ip_address, :state, :tags, :zone,
|
:instance_type, :launch_time, :private_ip_address, :state, :tags, :zone, :private_dns_name,
|
||||||
keyword_init: true
|
keyword_init: true
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,6 @@ describe 'Vmpooler::PoolManager::Provider::Aws' do
|
||||||
timeout: 10
|
timeout: 10
|
||||||
ready_ttl: 1440
|
ready_ttl: 1440
|
||||||
provider: 'aws'
|
provider: 'aws'
|
||||||
provision: true
|
|
||||||
EOT
|
EOT
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
@ -54,7 +53,10 @@ EOT
|
||||||
|
|
||||||
describe '#manual tests live' do
|
describe '#manual tests live' do
|
||||||
context 'in itsysops' do
|
context 'in itsysops' do
|
||||||
before(:each) { allow(subject).to receive(:dns).and_call_original }
|
before(:each) {
|
||||||
|
config['provision'] = "true"
|
||||||
|
allow(subject).to receive(:dns).and_call_original
|
||||||
|
}
|
||||||
let(:vmname) { "instance-46" }
|
let(:vmname) { "instance-46" }
|
||||||
let(:poolname) { "ubuntu-2004-arm64" }
|
let(:poolname) { "ubuntu-2004-arm64" }
|
||||||
skip 'gets a vm' do
|
skip 'gets a vm' do
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue