Merge pull request #1 from puppetlabs/dio-3163

Added aws dependency and renamed directories
This commit is contained in:
Samuel 2022-07-07 09:08:52 -05:00 committed by GitHub
commit af05279b42
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
20 changed files with 1880 additions and 1 deletions

8
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,8 @@
version: 2
updates:
- package-ecosystem: bundler
directory: "/"
schedule:
interval: daily
time: "13:00"
open-pull-requests-limit: 10

37
.github/workflows/release.yml vendored Normal file
View file

@ -0,0 +1,37 @@
name: Release
on: workflow_dispatch
jobs:
release:
runs-on: ubuntu-latest
if: github.repository == 'puppetlabs/vmpooler-provider-ec2'
steps:
- uses: actions/checkout@v2
- name: Get Version
id: gv
run: |
echo "::set-output name=ver::$(grep VERSION lib/vmpooler-provider-ec2/version.rb |rev |cut -d "'" -f2 |rev)"
- name: Tag Release
uses: ncipollo/release-action@v1
with:
tag: ${{ steps.gv.outputs.ver }}
token: ${{ secrets.GITHUB_TOKEN }}
draft: false
prerelease: false
generateReleaseNotes: true
- name: Install Ruby 2.5.8
uses: ruby/setup-ruby@v1
with:
ruby-version: '2.5.8'
- name: Build gem
run: gem build *.gemspec
- name: Publish gem
run: |
mkdir -p $HOME/.gem
touch $HOME/.gem/credentials
chmod 0600 $HOME/.gem/credentials
printf -- "---\n:rubygems_api_key: ${GEM_HOST_API_KEY}\n" > $HOME/.gem/credentials
gem push *.gem
env:
GEM_HOST_API_KEY: '${{ secrets.RUBYGEMS_AUTH_TOKEN }}'

47
.github/workflows/testing.yml vendored Normal file
View file

@ -0,0 +1,47 @@
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
# This workflow will download a prebuilt Ruby version, install dependencies and run tests with Rake
# For more information see: https://github.com/marketplace/actions/setup-ruby-jruby-and-truffleruby
name: Testing
on:
pull_request:
branches:
- main
jobs:
rubocop:
runs-on: ubuntu-latest
strategy:
matrix:
ruby-version:
- '2.5.8'
steps:
- uses: actions/checkout@v2
- name: Set up Ruby
uses: ruby/setup-ruby@v1
with:
ruby-version: ${{ matrix.ruby-version }}
bundler-cache: true # runs 'bundle install' and caches installed gems automatically
- name: Run Rubocop
run: bundle exec rake rubocop
spec_tests:
runs-on: ubuntu-latest
strategy:
matrix:
ruby-version:
- '2.5.8'
- 'jruby-9.2.12.0'
steps:
- uses: actions/checkout@v2
- name: Set up Ruby
uses: ruby/setup-ruby@v1
with:
ruby-version: ${{ matrix.ruby-version }}
bundler-cache: true # runs 'bundle install' and caches installed gems automatically
- name: Run spec tests
run: bundle exec rake test

12
.gitignore vendored Normal file
View file

@ -0,0 +1,12 @@
.bundle/
.vagrant/
coverage/
vendor/
.dccache
.ruby-version
Gemfile.local
results.xml
/vmpooler.yaml
.idea
*.json
.secrets/

2
.jrubyrc Normal file
View file

@ -0,0 +1,2 @@
# for simplecov to work in jruby, without this we are getting errors when debugging spec tests
debug.fullTrace=true

55
.rubocop.yml Normal file
View file

@ -0,0 +1,55 @@
AllCops:
Include:
- 'lib/**/*.rb'
Exclude:
- 'scripts/**/*'
- 'spec/**/*'
- 'vendor/**/*'
- Gemfile
- Rakefile
# These short variable names make sense as exceptions to the rule, but generally I think short variable names do hurt readability
Naming/MethodParameterName:
AllowedNames:
- vm
- dc
- s
- x
- f
#new cops:
Lint/DuplicateRegexpCharacterClassElement: # (new in 1.1)
Enabled: true
Lint/EmptyBlock: # (new in 1.1)
Enabled: true
Lint/ToEnumArguments: # (new in 1.1)
Enabled: true
Lint/UnmodifiedReduceAccumulator: # (new in 1.1)
Enabled: true
Style/ArgumentsForwarding: # (new in 1.1)
Enabled: false
Style/DocumentDynamicEvalDefinition: # (new in 1.1)
Enabled: true
Style/SwapValues: # (new in 1.1)
Enabled: false
#disabled
Metrics/AbcSize:
Enabled: false
Metrics/ClassLength:
Enabled: false
Metrics/CyclomaticComplexity:
Enabled: false
Metrics/MethodLength:
Enabled: false
Metrics/PerceivedComplexity:
Enabled: false
Metrics/ParameterLists:
Enabled: false
Layout/LineLength:
Enabled: false
Metrics/BlockLength:
Enabled: false
Style/CaseLikeIf:
Enabled: false

13
Gemfile Normal file
View file

@ -0,0 +1,13 @@
source ENV['GEM_SOURCE'] || 'https://rubygems.org'
gemspec
# Evaluate Gemfile.local if it exists
if File.exists? "#{__FILE__}.local"
instance_eval(File.read("#{__FILE__}.local"))
end
# Evaluate ~/.gemfile if it exists
if File.exists?(File.join(Dir.home, '.gemfile'))
instance_eval(File.read(File.join(Dir.home, '.gemfile')))
end

201
LICENSE Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1 +1,52 @@
# vmpooler-provider-aws
This is a provider for [VMPooler](https://github.com/puppetlabs/vmpooler) allows using aws to create instances, disks,
snapshots, or destroy instances for specific pools.
## Usage
Include this gem in the same Gemfile that you use to install VMPooler itself and then define one or more pools with the `provider` key set to `aws`. VMPooler will take care of the rest.
See what configuration is needed for this provider in the [example file](https://github.com/puppetlabs/vmpooler-provider-aws/blob/main/vmpooler.yaml.example).
Examples of deploying VMPooler with extra providers can be found in the [puppetlabs/vmpooler-deployment](https://github.com/puppetlabs/vmpooler-deployment) repository.
aws authorization is handled via two required ENV vars
1. ABS_AWS_ACCESS_KEY
2. ABS_AWS_SECRET_KEY
### Provisioning the new nodes
When you add the pool config `provision: true` to a pool, the new VMs will also get initialized with extra steps to setup the sshd config via NET:SSH
These steps expect two environment vars
1. ROOT_KEYS_SCRIPT: (optional) the URI location of a script (eg https in github) that will be run to setup keys. If not set, this will be skipped
2. KEY_FILE_LOCATION: (required) the location on local disk where the ssh key resides for VMPooler to connect via SSH to the EC2 node
### DNS
AWS will setup a private ip and private dns hostname for the VM once running. Optionally we can setup a human readable DNS entry to resolve the VMPooler provider `spicy-proton` fqdn
DNS is integrated via Google's CloudDNS service. To enable, a CloudDNS zone name must be provided in the config (see the example yaml file dns_zone_resource_name)
An A record is then created in that zone upon instance creation with the VM's internal IP, and deleted when the instance is destroyed.
### Labels
This provider adds tags to all resources that are managed
|resource|labels|note|
|---|---|---|
|instance|vm=$vm_name, pool=$pool_name|for example vm=foo-bar, pool=pool1|
|disk|vm=$vm_name, pool=$pool_name|for example vm=foo-bar and pool=pool1|
|snapshot|snapshot_name=$snapshot_name, vm=$vm_name, pool=$pool_name| for example snapshot_name=snap1, vm=foo-bar, pool=pool1|
Also see the usage of vmpooler's optional purge_unconfigured_resources, which is used to delete any resource found that
do not have the pool label, and can be configured to allow a specific list of unconfigured pool names.
### Pre-requisite
- An IAM user must exist in the target AWS account with permissions to create, delete vms etc
- if using DNS, a DNS zone needs to be created in CloudDNS, and configured in the provider's config section with the name of that zone (dns_zone_resource_name). When not specified, the DNS setup and teardown is skipped.
## License
vmpooler-provider-aws is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html). See the [LICENSE](LICENSE) file for more details.

25
Rakefile Normal file
View file

@ -0,0 +1,25 @@
require 'rspec/core/rake_task'
rubocop_available = Gem::Specification::find_all_by_name('rubocop').any?
require 'rubocop/rake_task' if rubocop_available
desc 'Run rspec tests with coloring.'
RSpec::Core::RakeTask.new(:test) do |t|
t.rspec_opts = %w[--color --format documentation]
t.pattern = 'spec/'
end
desc 'Run rspec tests and save JUnit output to results.xml.'
RSpec::Core::RakeTask.new(:junit) do |t|
t.rspec_opts = %w[-r yarjuf -f JUnit -o results.xml]
t.pattern = 'spec/'
end
if rubocop_available
desc 'Run RuboCop'
RuboCop::RakeTask.new(:rubocop) do |task|
task.options << '--display-cop-names'
end
end
task :default => [:test]

View file

@ -0,0 +1,5 @@
# frozen_string_literal: true
module VmpoolerProviderEc2
VERSION = '0.0.1'
end

110
lib/vmpooler/aws_setup.rb Normal file
View file

@ -0,0 +1,110 @@
# frozen_string_literal: true
require 'net/ssh'
module Vmpooler
class PoolManager
# This class connects to existing running VMs via NET:SSH
# it uses a local key to do so and then setup SSHD on the hosts to enable
# dev and CI users to connect.
class AwsSetup
ROOT_KEYS_SCRIPT = ENV['ROOT_KEYS_SCRIPT']
ROOT_KEYS_SYNC_CMD = "curl -k -o - -L #{ROOT_KEYS_SCRIPT} | %s"
def self.setup_node_by_ssh(host, platform)
@key_file = ENV['KEY_FILE_LOCATION'] || '/app/abs/.ssh/abs-aws-ec2.rsa'
conn = check_ssh_accepting_connections(host, platform)
configure_host(host, platform, conn)
end
# For an Amazon Linux AMI, the user name is ec2-user.
#
# For a Centos AMI, the user name is centos.
#
# For a Debian AMI, the user name is admin or root.
#
# For a Fedora AMI, the user name is ec2-user or fedora.
#
# For a RHEL AMI, the user name is ec2-user or root.
#
# For a SUSE AMI, the user name is ec2-user or root.
#
# For an Ubuntu AMI, the user name is ubuntu.
def self.get_user(platform)
if platform =~ /centos/
'centos'
elsif platform =~ /ubuntu/
'ubuntu'
elsif platform =~ /debian/
'root'
else
'ec2-user'
end
end
def self.check_ssh_accepting_connections(host, platform)
retries = 0
begin
user = get_user(platform)
netssh_jruby_workaround
Net::SSH.start(host, user, keys: @key_file, timeout: 10)
rescue Net::SSH::ConnectionTimeout, Errno::ECONNREFUSED => e
puts "Requested instances do not have sshd ready yet, try again: #{e}"
sleep 1
retry if (retries += 1) < 300
end
end
# Configure the aws host by enabling root and setting the hostname
# @param host [String] the internal dns name of the instance
def self.configure_host(host, platform, ssh)
ssh.exec!('sudo cp -r .ssh /root/.')
ssh.exec!("sudo sed -ri 's/^#?PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config")
ssh.exec!("sudo hostname #{host}")
if platform =~ /amazon/
# Amazon Linux requires this to preserve host name changes across reboots.
ssh.exec!("sudo sed -ie '/^HOSTNAME/ s/=.*/=#{host}/' /etc/sysconfig/network")
end
restart_sshd(host, platform, ssh)
sync_root_keys(host, platform)
end
def self.restart_sshd(host, platform, ssh)
ssh.open_channel do |channel|
channel.request_pty do |ch, success|
raise "can't get pty request" unless success
if platform =~ /centos|el-|redhat|fedora|eos|amazon/
ch.exec('sudo -E /sbin/service sshd reload')
elsif platform =~ /debian|ubuntu|cumulus/
ch.exec('sudo su -c \"service sshd restart\"')
elsif platform =~ /arch|centos-7|el-7|redhat-7|fedora-(1[4-9]|2[0-9])/
ch.exec('sudo -E systemctl restart sshd.service')
else
services.logger.error("Attempting to update ssh on non-supported platform: #{host}: #{platform}")
end
end
end
ssh.loop
end
def self.sync_root_keys(host, _platform)
return if ROOT_KEYS_SCRIPT.nil?
user = 'root'
netssh_jruby_workaround
Net::SSH.start(host, user, keys: @key_file) do |ssh|
ssh.exec!(ROOT_KEYS_SYNC_CMD % 'env PATH="/usr/gnu/bin:$PATH" bash')
end
end
# issue when using net ssh 6.1.0 with jruby
# https://github.com/jruby/jruby-openssl/issues/105
# this will turn off some algos that match /^ecd(sa|h)-sha2/
def self.netssh_jruby_workaround
Net::SSH::Transport::Algorithms::ALGORITHMS.each_value { |algs| algs.reject! { |a| a =~ /^ecd(sa|h)-sha2/ } }
Net::SSH::KnownHosts::SUPPORTED_TYPE.reject! { |t| t =~ /^ecd(sa|h)-sha2/ }
end
end
end
end

View file

@ -0,0 +1,513 @@
# frozen_string_literal: true
require 'bigdecimal'
require 'bigdecimal/util'
require 'vmpooler/providers/base'
require 'aws-sdk-ec2'
require 'vmpooler/aws_setup'
module Vmpooler
class PoolManager
class Provider
# This class represent a GCE provider to CRUD resources in a gce cloud.
class Ec2 < Vmpooler::PoolManager::Provider::Base
# The connection_pool method is normally used only for testing
attr_reader :connection_pool
def initialize(config, logger, metrics, redis_connection_pool, name, options)
super(config, logger, metrics, redis_connection_pool, name, options)
@aws_access_key = ENV['ABS_AWS_ACCESS_KEY']
@aws_secret_key = ENV['ABS_AWS_SECRET_KEY']
task_limit = global_config[:config].nil? || global_config[:config]['task_limit'].nil? ? 10 : global_config[:config]['task_limit'].to_i
# The default connection pool size is:
# Whatever is biggest from:
# - How many pools this provider services
# - Maximum number of cloning tasks allowed
# - Need at least 2 connections so that a pool can have inventory functions performed while cloning etc.
default_connpool_size = [provided_pools.count, task_limit, 2].max
connpool_size = provider_config['connection_pool_size'].nil? ? default_connpool_size : provider_config['connection_pool_size'].to_i
# The default connection pool timeout should be quite large - 60 seconds
connpool_timeout = provider_config['connection_pool_timeout'].nil? ? 60 : provider_config['connection_pool_timeout'].to_i
logger.log('d', "[#{name}] ConnPool - Creating a connection pool of size #{connpool_size} with timeout #{connpool_timeout}")
@connection_pool = Vmpooler::PoolManager::GenericConnectionPool.new(
metrics: metrics,
connpool_type: 'provider_connection_pool',
connpool_provider: name,
size: connpool_size,
timeout: connpool_timeout
) do
logger.log('d', "[#{name}] Connection Pool - Creating a connection object")
# Need to wrap the vSphere connection object in another object. The generic connection pooler will preserve
# the object reference for the connection, which means it cannot "reconnect" by creating an entirely new connection
# object. Instead by wrapping it in a Hash, the Hash object reference itself never changes but the content of the
# Hash can change, and is preserved across invocations.
new_conn = connect_to_aws
{ connection: new_conn }
end
@redis = redis_connection_pool
end
# name of the provider class
def name
'ec2'
end
def connection
@connection_pool.with_metrics do |pool_object|
return ensured_aws_connection(pool_object)
end
end
attr_reader :dns
# main configuration options
def region
return provider_config['region'] if provider_config['region']
end
# main configuration options, overridable for each pool
def zone(pool_name)
return pool_config(pool_name)['zone'] if pool_config(pool_name)['zone']
return provider_config['zone'] if provider_config['zone']
end
def amisize(pool_name)
return pool_config(pool_name)['amisize'] if pool_config(pool_name)['amisize']
return provider_config['amisize'] if provider_config['amisize']
end
def volume_size(pool_name)
return pool_config(pool_name)['volume_size'] if pool_config(pool_name)['volume_size']
return provider_config['volume_size'] if provider_config['volume_size']
end
# dns
def domain
provider_config['domain']
end
def dns_zone_resource_name
provider_config['dns_zone_resource_name']
end
# subnets
def get_subnet_id(pool_name)
case zone(pool_name)
when 'us-west-2b'
'subnet-0fe90a688844f6f26'
when 'us-west-2a'
'subnet-091b436f'
end
end
def to_provision(pool_name)
return pool_config(pool_name)['provision'] if pool_config(pool_name)['provision']
end
# Base methods that are implemented:
# vms_in_pool lists all the VM names in a pool, which is based on the VMs
# having a tag "pool" that match a pool config name.
# inputs
# [String] pool_name : Name of the pool
# returns
# empty array [] if no VMs found in the pool
# [Array]
# [Hashtable]
# [String] name : the name of the VM instance (unique for whole project)
def vms_in_pool(pool_name)
debug_logger('vms_in_pool')
vms = []
pool = pool_config(pool_name)
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
filters = [{
name: 'tag:pool',
values: [pool_name]
}]
instance_list = connection.instances(filters: filters)
return vms if instance_list.first.nil?
instance_list.each do |vm|
vms << { 'name' => vm.tags.detect { |f| f.key == 'vm_name' }&.value || 'vm_name not found in tags' }
end
debug_logger(vms)
vms
end
# inputs
# [String] pool_name : Name of the pool
# [String] vm_name : Name of the VM to find
# returns
# nil if VM doesn't exist name, template, poolname, boottime, status, image_size, private_ip_address
# [Hastable] of the VM
# [String] name : The name of the resource, provided by the client when initially creating the resource
# [String] template : This is the name of template
# [String] poolname : Name of the pool the VM
# [Time] boottime : Time when the VM was created/booted
# [String] status : One of the following values: pending, running, shutting-down, terminated, stopping, stopped
# [String] image_size : The EC2 image size eg a1.large
# [String] private_ip_address: The private IPv4 address
def get_vm(pool_name, vm_name)
debug_logger('get_vm')
vm_hash = nil
filters = [{
name: 'tag:vm_name',
values: [vm_name]
}]
instances = connection.instances(filters: filters).first
return vm_hash if instances.nil?
vm_hash = generate_vm_hash(instances, pool_name)
debug_logger("vm_hash #{vm_hash}")
vm_hash
end
# create_vm creates a new VM with a default network from the config,
# a initial disk named #{new_vmname}-disk0 that uses the 'template' as its source image
# and labels added for vm and pool
# and an instance configuration for machine_type from the config and
# labels vm and pool
# having a label "pool" that match a pool config name.
# inputs
# [String] pool : Name of the pool
# [String] new_vmname : Name to give the new VM
# returns
# [Hashtable] of the VM as per get_vm(pool_name, vm_name)
def create_vm(pool_name, new_vmname)
debug_logger('create_vm')
pool = pool_config(pool_name)
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
raise("Instance creation not attempted, #{new_vmname} already exists") if get_vm(pool_name, new_vmname)
subnet_id = get_subnet_id(pool_name)
tag = [
{
resource_type: 'instance', # accepts capacity-reservation, client-vpn-endpoint, customer-gateway, carrier-gateway, dedicated-host, dhcp-options, egress-only-internet-gateway, elastic-ip, elastic-gpu, export-image-task, export-instance-task, fleet, fpga-image, host-reservation, image, import-image-task, import-snapshot-task, instance, instance-event-window, internet-gateway, ipam, ipam-pool, ipam-scope, ipv4pool-ec2, ipv6pool-ec2, key-pair, launch-template, local-gateway, local-gateway-route-table, local-gateway-virtual-interface, local-gateway-virtual-interface-group, local-gateway-route-table-vpc-association, local-gateway-route-table-virtual-interface-group-association, natgateway, network-acl, network-interface, network-insights-analysis, network-insights-path, network-insights-access-scope, network-insights-access-scope-analysis, placement-group, prefix-list, replace-root-volume-task, reserved-instances, route-table, security-group, security-group-rule, snapshot, spot-fleet-request, spot-instances-request, subnet, subnet-cidr-reservation, traffic-mirror-filter, traffic-mirror-session, traffic-mirror-target, transit-gateway, transit-gateway-attachment, transit-gateway-connect-peer, transit-gateway-multicast-domain, transit-gateway-route-table, volume, vpc, vpc-endpoint, vpc-endpoint-service, vpc-peering-connection, vpn-connection, vpn-gateway, vpc-flow-log
tags: [
{
key: 'vm_name',
value: new_vmname
},
{
key: 'pool',
value: pool_name
},
{
key: 'lifetime',
value: get_current_lifetime(new_vmname)
},
{
key: 'created_by',
value: get_current_user(new_vmname)
},
{
key: 'job_url',
value: get_current_job_url(new_vmname)
},
{
key: 'organization',
value: 'engineering'
},
{
key: 'portfolio',
value: 'ds-ci'
}
]
}
]
config = {
min_count: 1,
max_count: 1,
image_id: pool['template'],
monitoring: { enabled: true },
key_name: 'always-be-scheduling',
security_group_ids: ['sg-697fb015'],
instance_type: amisize(pool_name),
disable_api_termination: false,
instance_initiated_shutdown_behavior: 'terminate',
tag_specifications: tag,
subnet_id: subnet_id
}
config[:block_device_mappings] = get_block_device_mappings(config['image_id'], volume_size(pool_name)) if volume_size(pool_name)
debug_logger('trigger insert_instance')
batch_instance = connection.create_instances(config)
instance_id = batch_instance.first.instance_id
connection.client.wait_until(:instance_running, { instance_ids: [instance_id] })
created_instance = get_vm(pool_name, new_vmname)
# extra setup steps
provision_node_aws(created_instance['private_dns_name'], pool_name) if to_provision(pool_name) == 'true' || to_provision(pool_name) == true
created_instance
end
def provision_node_aws(vm, pool_name)
AwsSetup.setup_node_by_ssh(vm, pool_name)
end
def get_block_device_mappings(image_id, volume_size)
ec2_client = connection.client
image = ec2_client.describe_images(image_ids: [image_id]).images.first
raise "Image not found: #{image_id}" if image.nil?
raise "#{image_id} does not have an ebs root device type" unless image.root_device_type == 'ebs'
# Transform the images block_device_mappings output into a format
# ready for a create.
block_device_mappings = []
orig_bdm = image.block_device_mappings
orig_bdm.each do |block_device|
block_device_mappings << {
device_name: block_device.device_name,
ebs: {
# Change the default size of the root volume.
volume_size: volume_size,
# This is required to override the images default for
# delete_on_termination, forcing all volumes to be deleted once the
# instance is terminated.
delete_on_termination: true
}
}
end
block_device_mappings
end
# create_disk creates an additional disk for an existing VM. It will name the new
# disk #{vm_name}-disk#{number_disk} where number_disk is the next logical disk number
# starting with 1 when adding an additional disk to a VM with only the boot disk:
# #{vm_name}-disk0 == boot disk
# #{vm_name}-disk1 == additional disk added via create_disk
# #{vm_name}-disk2 == additional disk added via create_disk if run a second time etc
# the new disk has labels added for vm and pool
# The AWS lifecycle is to create a new disk (lives independently of the instance) then to attach
# it to the existing instance.
# inputs
# [String] pool_name : Name of the pool
# [String] vm_name : Name of the existing VM
# [String] disk_size : The new disk size in GB
# returns
# [boolean] true : once the operations are finished
# create_snapshot creates new snapshots with the unique name {new_snapshot_name}-#{disk.name}
# for one vm, and one create_snapshot() there could be multiple snapshots created, one for each drive.
# since the snapshot resource needs a unique name in the gce project,
# we create a unique name by concatenating {new_snapshot_name}-#{disk.name}
# the disk name is based on vm_name which makes it unique.
# The snapshot is added tags snapshot_name, vm, pool, diskname and boot
# inputs
# [String] pool_name : Name of the pool
# [String] vm_name : Name of the existing VM
# [String] new_snapshot_name : a unique name for this snapshot, which would be used to refer to it when reverting
# returns
# [boolean] true : once the operations are finished
# raises
# RuntimeError if the vm_name cannot be found
# RuntimeError if the snapshot_name already exists for this VM
# revert_snapshot reverts an existing VM's disks to an existing snapshot_name
# reverting in aws entails
# 1. shutting down the VM,
# 2. detaching and deleting the drives,
# 3. creating new disks with the same name from the snapshot for each disk
# 4. attach disks and start instance
# for one vm, there might be multiple snapshots in time. We select the ones referred to by the
# snapshot_name, but that may be multiple snapshots, one for each disks
# The new disk is added tags vm and pool
# inputs
# [String] pool_name : Name of the pool
# [String] vm_name : Name of the existing VM
# [String] snapshot_name : Name of an existing snapshot
# returns
# [boolean] true : once the operations are finished
# raises
# RuntimeError if the vm_name cannot be found
# RuntimeError if the snapshot_name already exists for this VM
# destroy_vm deletes an existing VM instance and any disks and snapshots via the labels
# in gce instances, disks and snapshots are resources that can exist independent of each other
# inputs
# [String] pool_name : Name of the pool
# [String] vm_name : Name of the existing VM
# returns
# [boolean] true : once the operations are finished
def destroy_vm(_pool_name, vm_name)
debug_logger('destroy_vm')
deleted = false
filters = [{
name: 'tag:vm_name',
values: [vm_name]
}]
instances = connection.instances(filters: filters).first
return true if instances.nil?
debug_logger("trigger delete_instance #{vm_name}")
# vm_hash = get_vm(pool_name, vm_name)
instances.terminate
begin
connection.client.wait_until(:instance_terminated, { instance_ids: [instances.id] })
deleted = true
rescue ::Aws::Waiters::Errors => e
debug_logger("failed waiting for instance terminated #{vm_name}: #{e}")
end
deleted
end
# check if a vm is ready by opening a socket on port 22
# if a domain is set, it will use vn_name.domain,
# if not then it will use the ip directly (AWS workaround)
def vm_ready?(pool_name, vm_name)
begin
# TODO: we could use a healthcheck resource attached to instance
domain_set = domain || global_config[:config]['domain']
if domain_set.nil?
vm_ip = get_vm(pool_name, vm_name)['private_ip_address']
vm_name = vm_ip unless vm_ip.nil?
end
open_socket(vm_name, domain_set)
rescue StandardError => _e
return false
end
true
end
# tag_vm_user This method is called once we know who is using the VM (it is running). This method enables seeing
# who is using what in the provider pools.
#
# inputs
# [String] pool_name : Name of the pool
# [String] vm_name : Name of the VM to check if ready
# returns
# [Boolean] : true if successful, false if an error occurred and it should retry
def tag_vm_user(pool, vm_name)
user = get_current_user(vm_name)
vm_hash = get_vm(pool, vm_name)
return false if vm_hash.nil?
new_labels = vm_hash['labels']
# bailing in this case since labels should exist, and continuing would mean losing them
return false if new_labels.nil?
# add new label called token-user, with value as user
new_labels['token-user'] = user
begin
instances_set_labels_request_object = Google::Apis::ComputeV1::InstancesSetLabelsRequest.new(label_fingerprint: vm_hash['label_fingerprint'], labels: new_labels)
result = connection.set_instance_labels(project, zone(pool), vm_name, instances_set_labels_request_object)
wait_for_zone_operation(project, zone(pool), result)
rescue StandardError => _e
return false
end
true
end
# END BASE METHODS
def get_current_user(vm_name)
@redis.with_metrics do |redis|
user = redis.hget("vmpooler__vm__#{vm_name}", 'token:user')
return '' if user.nil?
# cleanup so it's a valid label value
# can't have upercase
user = user.downcase
# replace invalid chars with dash
user = user.gsub(/[^0-9a-z_-]/, '-')
return user
end
end
def get_current_lifetime(vm_name)
@redis.with_metrics do |redis|
lifetime = redis.hget("vmpooler__vm__#{vm_name}", 'lifetime') || '1h'
return lifetime
end
end
def get_current_job_url(vm_name)
@redis.with_metrics do |redis|
job = redis.hget("vmpooler__vm__#{vm_name}", 'tag:jenkins_build_url') || ''
return job
end
end
# Return a hash of VM data
# Provides name, template, poolname, boottime, status, image_size, private_ip_address
def generate_vm_hash(vm_object, pool_name)
pool_configuration = pool_config(pool_name)
return nil if pool_configuration.nil?
{
'name' => vm_object.tags.detect { |f| f.key == 'vm_name' }&.value,
# 'hostname' => vm_object.hostname,
'template' => pool_configuration&.key?('template') ? pool_configuration['template'] : nil, # was expecting to get it from API, not from config, but this is what vSphere does too!
'poolname' => vm_object.tags.detect { |f| f.key == 'pool' }&.value,
'boottime' => vm_object.launch_time,
'status' => vm_object.state&.name, # One of the following values: pending, running, shutting-down, terminated, stopping, stopped
# 'zone' => vm_object.zone,
'image_size' => vm_object.instance_type,
'private_ip_address' => vm_object.private_ip_address,
'private_dns_name' => vm_object.private_dns_name
}
end
def ensured_aws_connection(connection_pool_object)
connection_pool_object[:connection] = connect_to_aws unless connection_pool_object[:connection]
connection_pool_object[:connection]
end
def connect_to_aws
max_tries = global_config[:config]['max_tries'] || 3
retry_factor = global_config[:config]['retry_factor'] || 10
try = 1
begin
compute = ::Aws::EC2::Resource.new(
region: region,
credentials: ::Aws::Credentials.new(@aws_access_key, @aws_secret_key),
log_level: :debug
)
metrics.increment('connect.open')
compute
rescue StandardError => e # is that even a thing?
metrics.increment('connect.fail')
raise e if try >= max_tries
sleep(try * retry_factor)
try += 1
retry
end
end
# This should supercede the open_socket method in the Pool Manager
def open_socket(host, domain = nil, timeout = 5, port = 22, &_block)
Timeout.timeout(timeout) do
target_host = host
target_host = "#{host}.#{domain}" if domain
sock = TCPSocket.new target_host, port
begin
yield sock if block_given?
ensure
sock.close
end
end
end
# used in local dev environment, set DEBUG_FLAG=true
# this way the upstream vmpooler manager does not get polluted with logs
def debug_logger(message, send_to_upstream: false)
# the default logger is simple and does not enforce debug levels (the first argument)
puts message if ENV['DEBUG_FLAG']
logger.log('[g]', message) if send_to_upstream
end
end
end
end
end

110
spec/ec2_helper.rb Normal file
View file

@ -0,0 +1,110 @@
# frozen_string_literal: true
# this file is used to Mock the GCE objects, for example the main ComputeService object
MockResult = Struct.new(
# https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/Operation.html
:client_operation_id, :creation_timestamp, :description, :end_time, :error, :http_error_message,
:http_error_status_code, :id, :insert_time, :kind, :name, :operation_type, :progress, :region,
:self_link, :start_time, :status, :status_message, :target_id, :target_link, :user, :warnings, :zone,
keyword_init: true
)
MockOperationError = [].freeze
MockOperationErrorError = Struct.new(
# https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/Operation/Error/Error.html
:code, :location, :message,
keyword_init: true
)
MockInstance = Struct.new(
# https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/EC2/Instance.html
:instance_type, :launch_time, :private_ip_address, :state, :tags, :zone, :private_dns_name,
keyword_init: true
)
MockTag = Struct.new(
:key, :value,
keyword_init: true
)
MockInstanceList = Struct.new(
# https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/InstanceList.html
:id, :items, :kind, :next_page_token, :self_link, :warning,
keyword_init: true
)
MockDiskList = Struct.new(
# https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/DiskList.html
:id, :items, :kind, :next_page_token, :self_link, :warning,
keyword_init: true
)
MockDisk = Struct.new(
# https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/Disk.html
:creation_timestamp, :description, :disk_encryption_key, :guest_os_features, :id, :kind, :label_fingerprint, :labels,
:last_attach_timestamp, :last_detach_timestamp, :license_codes, :licenses, :name, :options,
:physical_block_size_bytes, :region, :replica_zones, :resource_policies, :self_link, :size_gb, :source_disk,
:source_disk_id, :source_image, :source_image_encryption_key, :source_image_id, :source_snapshot,
:source_snapshot_encryption_key, :source_snapshot_id, :status, :type, :users, :zone,
keyword_init: true
)
MockSnapshotList = Struct.new(
# https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/DiskList.html
:id, :items, :kind, :next_page_token, :self_link, :warning,
keyword_init: true
)
MockSnapshot = Struct.new(
# https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/Snapshot.html
:auto_created, :chain_name, :creation_timestamp, :description, :disk_size_gb, :download_bytes, :id, :kind,
:label_fingerprint, :labels, :license_codes, :licenses, :name, :self_link, :snapshot_encryption_key, :source_disk,
:source_disk_encryption_key, :source_disk_id, :status, :storage_bytes, :storage_bytes_status, :storage_locations,
keyword_init: true
)
MockAttachedDisk = Struct.new(
# https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/AttachedDisk.html
:auto_delete, :boot, :device_name, :disk_encryption_key, :disk_size_gb, :guest_os_features, :index,
:initialize_params, :interface, :kind, :licenses, :mode, :shielded_instance_initial_state, :source, :type,
keyword_init: true
)
# --------------------
# Main ComputeService Object
# --------------------
MockComputeServiceConnection = Struct.new(
# https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/ComputeService.html
:key, :quota_user, :user_ip
) do
# Onlly methods we use are listed here
def get_instance
MockInstance.new
end
# Alias to serviceContent.propertyCollector
def insert_instance
MockResult.new
end
end
# -------------------------------------------------------------------------------------------------------------
# Mocking Methods
# -------------------------------------------------------------------------------------------------------------
# def mock_RbVmomi_VIM_ClusterComputeResource(options = {})
# options[:name] = 'Cluster' + rand(65536).to_s if options[:name].nil?
#
# mock = MockClusterComputeResource.new()
#
# mock.name = options[:name]
# # All cluster compute resources have a root Resource Pool
# mock.resourcePool = mock_RbVmomi_VIM_ResourcePool({:name => options[:name]})
#
# allow(mock).to receive(:is_a?) do |expected_type|
# expected_type == RbVmomi::VIM::ClusterComputeResource
# end
#
# mock
# end

153
spec/helpers.rb Normal file
View file

@ -0,0 +1,153 @@
# frozen_string_literal: true
require 'mock_redis'
def redis
@redis ||= MockRedis.new
@redis
end
# Mock an object which represents a Logger. This stops the proliferation
# of allow(logger).to .... expectations in tests.
class MockLogger
def log(_level, string); end
end
def expect_json(ok = true, http = 200)
expect(last_response.header['Content-Type']).to eq('application/json')
if ok == true
expect(JSON.parse(last_response.body)['ok']).to eq(true)
else
expect(JSON.parse(last_response.body)['ok']).to eq(false)
end
expect(last_response.status).to eq(http)
end
def create_token(token, user, timestamp)
redis.hset("vmpooler__token__#{token}", 'user', user)
redis.hset("vmpooler__token__#{token}", 'created', timestamp)
end
def get_token_data(token)
redis.hgetall("vmpooler__token__#{token}")
end
def token_exists?(_token)
result = get_token_data
result && !result.empty?
end
def create_ready_vm(template, name, redis, token = nil)
create_vm(name, redis, token)
redis.sadd("vmpooler__ready__#{template}", name)
redis.hset("vmpooler__vm__#{name}", 'template', template)
end
def create_running_vm(template, name, redis, token = nil, user = nil)
create_vm(name, redis, token, user)
redis.sadd("vmpooler__running__#{template}", name)
redis.hset("vmpooler__vm__#{name}", 'template', template)
redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
redis.hset("vmpooler__vm__#{name}", 'host', 'host1')
end
def create_pending_vm(template, name, redis, token = nil)
create_vm(name, redis, token)
redis.sadd("vmpooler__pending__#{template}", name)
redis.hset("vmpooler__vm__#{name}", 'template', template)
end
def create_vm(name, redis, token = nil, user = nil)
redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
redis.hset("vmpooler__vm__#{name}", 'clone', Time.now)
redis.hset("vmpooler__vm__#{name}", 'token:token', token) if token
redis.hset("vmpooler__vm__#{name}", 'token:user', user) if user
end
def create_completed_vm(name, pool, redis, active = false)
redis.sadd("vmpooler__completed__#{pool}", name)
redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
redis.hset("vmpooler__active__#{pool}", name, Time.now) if active
end
def create_discovered_vm(name, pool, redis)
redis.sadd("vmpooler__discovered__#{pool}", name)
end
def create_migrating_vm(name, pool, redis)
redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
redis.sadd("vmpooler__migrating__#{pool}", name)
end
def create_tag(vm, tag_name, tag_value, redis)
redis.hset("vmpooler__vm__#{vm}", "tag:#{tag_name}", tag_value)
end
def add_vm_to_migration_set(name, redis)
redis.sadd('vmpooler__migration', name)
end
def fetch_vm(vm)
redis.hgetall("vmpooler__vm__#{vm}")
end
def set_vm_data(vm, key, value, redis)
redis.hset("vmpooler__vm__#{vm}", key, value)
end
def snapshot_revert_vm(vm, snapshot = '12345678901234567890123456789012', redis)
redis.sadd('vmpooler__tasks__snapshot-revert', "#{vm}:#{snapshot}")
redis.hset("vmpooler__vm__#{vm}", "snapshot:#{snapshot}", '1')
end
def snapshot_vm(vm, snapshot = '12345678901234567890123456789012', redis)
redis.sadd('vmpooler__tasks__snapshot', "#{vm}:#{snapshot}")
redis.hset("vmpooler__vm__#{vm}", "snapshot:#{snapshot}", '1')
end
def disk_task_vm(vm, disk_size = '10', redis)
redis.sadd('vmpooler__tasks__disk', "#{vm}:#{disk_size}")
end
def has_vm_snapshot?(vm, redis)
redis.smembers('vmpooler__tasks__snapshot').any? do |snapshot|
instance, _sha = snapshot.split(':')
vm == instance
end
end
def vm_reverted_to_snapshot?(vm, redis, snapshot = nil)
redis.smembers('vmpooler__tasks__snapshot-revert').any? do |action|
instance, sha = action.split(':')
instance == vm and (snapshot ? (sha == snapshot) : true)
end
end
def pool_has_ready_vm?(pool, vm, redis)
!!redis.sismember("vmpooler__ready__#{pool}", vm)
end
def create_ondemand_request_for_test(request_id, score, platforms_string, redis, user = nil, token = nil)
redis.zadd('vmpooler__provisioning__request', score, request_id)
redis.hset("vmpooler__odrequest__#{request_id}", 'requested', platforms_string)
redis.hset("vmpooler__odrequest__#{request_id}", 'token:token', token) if token
redis.hset("vmpooler__odrequest__#{request_id}", 'token:user', user) if user
end
def set_ondemand_request_status(request_id, status, redis)
redis.hset("vmpooler__odrequest__#{request_id}", 'status', status)
end
def create_ondemand_vm(vmname, request_id, pool, pool_alias, redis)
redis.sadd("vmpooler__#{request_id}__#{pool_alias}__#{pool}", vmname)
end
def create_ondemand_creationtask(request_string, score, redis)
redis.zadd('vmpooler__odcreate__task', score, request_string)
end
def create_ondemand_processing(request_id, score, redis)
redis.zadd('vmpooler__provisioning__processing', score, request_id)
end

19
spec/spec_helper.rb Normal file
View file

@ -0,0 +1,19 @@
# frozen_string_literal: true
require 'simplecov'
SimpleCov.start do
add_filter '/spec/'
end
require 'helpers'
require 'rspec'
require 'vmpooler'
require 'redis'
require 'vmpooler/metrics'
def project_root_dir
File.dirname(File.dirname(__FILE__))
end
def fixtures_dir
File.join(project_root_dir, 'spec', 'fixtures')
end

View file

@ -0,0 +1,311 @@
require 'spec_helper'
require 'mock_redis'
require 'ec2_helper'
require 'vmpooler/providers/ec2'
RSpec::Matchers.define :relocation_spec_with_host do |value|
match { |actual| actual[:spec].host == value }
end
describe 'Vmpooler::PoolManager::Provider::Ec2' do
let(:logger) { MockLogger.new }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:poolname) { 'debian-9' }
let(:provider_options) { { 'param' => 'value' } }
let(:zone) { 'us-west-2b' }
let(:region) { 'us-west-2'}
let(:config) { YAML.load(<<~EOT
---
:config:
max_tries: 3
retry_factor: 10
:providers:
:ec2:
connection_pool_timeout: 1
zone: '#{zone}'
region: '#{region}'
:pools:
- name: '#{poolname}'
alias: [ 'mockpool' ]
amisize: 'a1.large'
template: 'ami-03c1b544a7566b3e5'
size: 5
timeout: 10
ready_ttl: 1440
provider: 'ec2'
EOT
)
}
let(:vmname) { 'vm17' }
let(:connection) { MockComputeServiceConnection.new }
let(:redis_connection_pool) do
Vmpooler::PoolManager::GenericConnectionPool.new(
metrics: metrics,
connpool_type: 'redis_connection_pool',
connpool_provider: 'testprovider',
size: 1,
timeout: 5
) { MockRedis.new }
end
subject { Vmpooler::PoolManager::Provider::Ec2.new(config, logger, metrics, redis_connection_pool, 'ec2', provider_options) }
describe '#manual tests live' do
context 'in itsysops' do
before(:each) {
config['provision'] = "true"
allow(subject).to receive(:dns).and_call_original
}
let(:vmname) { "instance-46" }
let(:poolname) { "ubuntu-2004-arm64" }
skip 'gets a vm' do
# result = subject.create_vm(poolname, vmname)
subject.provision_node_aws("ip-10-227-4-27.amz-dev.puppet.net", poolname)
# subject.create_snapshot(poolname, vmname, "foo")
#subject.create_disk(poolname, vmname, 10)
# a = subject.destroy_vm(poolname, vmname)
# b = subject.get_vm(poolname, vmname)
puts "done"
# subject.dns_teardown({'name' => vmname})
# subject.dns_setup({'name' => vmname, 'ip' => '1.2.3.5'})
end
end
end
describe '#vms_in_pool' do
let(:pool_config) { config[:pools][0] }
before(:each) do
allow(subject).to receive(:connect_to_aws).and_return(connection)
end
context 'Given an empty pool folder' do
it 'should return an empty array' do
allow(connection).to receive(:instances).and_return([nil])
result = subject.vms_in_pool(poolname)
expect(result).to eq([])
end
end
context 'Given a pool with many VMs' do
let(:expected_vm_list) do
[
{ 'name' => 'vm1' },
{ 'name' => 'vm2' },
{ 'name' => 'vm3' }
]
end
before(:each) do
instance_list = []
expected_vm_list.each do |vm_hash|
tags = [MockTag.new(key: "vm_name", value: vm_hash['name'])]
mock_vm = MockInstance.new(tags: tags)
instance_list << mock_vm
end
expect(connection).to receive(:instances).and_return(instance_list)
end
it 'should list all VMs in the VM folder for the pool' do
result = subject.vms_in_pool(poolname)
expect(result).to eq(expected_vm_list)
end
end
end
describe '#get_vm' do
before(:each) do
allow(subject).to receive(:connect_to_aws).and_return(connection)
end
context 'when VM does not exist' do
it 'should return nil' do
allow(connection).to receive(:instances).and_return([nil])
expect(subject.get_vm(poolname, vmname)).to be_nil
end
end
context 'when VM exists but is missing information' do
before(:each) do
tags = [MockTag.new(key: "vm_name", value: vmname)]
allow(connection).to receive(:instances).and_return([MockInstance.new(tags: tags)])
end
it 'should return a hash' do
expect(subject.get_vm(poolname, vmname)).to be_kind_of(Hash)
end
it 'should return the VM name' do
result = subject.get_vm(poolname, vmname)
expect(result['name']).to eq(vmname)
end
%w[boottime image_size status private_ip_address].each do |testcase|
it "should return nil for #{testcase}" do
result = subject.get_vm(poolname, vmname)
expect(result[testcase]).to be_nil
end
end
end
context 'when VM exists and contains all information' do
let(:vm_hostname) { "#{vmname}.demo.local" }
let(:boot_time) { Time.now }
let(:vm_object) do
state = Struct.new(:name)
runningstate = state.new "running"
MockInstance.new(
launch_time: boot_time,
state: runningstate,
instance_type: "a1.large",
private_ip_address: "1.1.1.1",
tags: [
MockTag.new(key: "vm_name", value: vmname),
MockTag.new(key: "pool", value: poolname)
]
)
end
let(:pool_info) { config[:pools][0] }
before(:each) do
allow(connection).to receive(:instances).and_return([vm_object])
end
it 'should return a hash' do
expect(subject.get_vm(poolname, vmname)).to be_kind_of(Hash)
end
it 'should return the VM name' do
result = subject.get_vm(poolname, vmname)
expect(result['name']).to eq(vmname)
end
it 'should return the template name' do
result = subject.get_vm(poolname, vmname)
expect(result['template']).to eq(pool_info['template'])
end
it 'should return the pool name' do
result = subject.get_vm(poolname, vmname)
expect(result['poolname']).to eq(pool_info['name'])
end
it 'should return the boot time' do
result = subject.get_vm(poolname, vmname)
expect(result['boottime']).to eq(boot_time)
end
it 'should return the status' do
result = subject.get_vm(poolname, vmname)
expect(result['status']).to eq("running")
end
it 'should return the status' do
result = subject.get_vm(poolname, vmname)
expect(result['image_size']).to eq("a1.large")
end
end
end
describe '#create_vm' do
before(:each) do
allow(subject).to receive(:connect_to_aws).and_return(connection)
end
context 'Given an invalid pool name' do
it 'should raise an error' do
expect { subject.create_vm('missing_pool', vmname) }.to raise_error(/missing_pool does not exist/)
end
end
context 'Given a vmname that already exists' do
before(:each) do
allow(subject).to receive(:get_vm).and_return({
'name' => "foobar",
'template' => "abc",
'status' => "running"
})
end
it 'should raise an error' do
expect { subject.create_vm(poolname, vmname) }.to raise_error(/Instance creation not attempted, .* already exists/)
end
end
context 'Given a successful creation' do
let(:client) { double }
before(:each) do
allow(subject).to receive(:get_vm).and_return(nil,{
'name' => vmname,
'template' => "abc",
'status' => "running"
})
result = Struct.new(:instance_id)
batch_instance = result.new(instance_id: "abcfoo")
allow(connection).to receive(:create_instances).and_return([batch_instance])
allow(connection).to receive(:client).and_return(client)
allow(client).to receive(:wait_until)
end
it 'should return a hash' do
result = subject.create_vm(poolname, vmname)
expect(result.is_a?(Hash)).to be true
end
it 'should have the new VM name' do
result = subject.create_vm(poolname, vmname)
expect(result['name']).to eq(vmname)
end
end
end
describe '#destroy_vm' do
before(:each) do
allow(subject).to receive(:connect_to_aws).and_return(connection)
end
context 'Given a missing VM name' do
let(:client) { double }
before(:each) do
allow(connection).to receive(:instances).and_return([nil])
allow(connection).to receive(:client).and_return(client)
allow(client).to receive(:wait_until)
end
it 'should return true' do
expect(subject.destroy_vm(poolname, 'missing_vm')).to be true
end
end
context 'Given a running VM' do
let(:instance) { double("instance") }
let(:client) { double }
before(:each) do
allow(connection).to receive(:instances).and_return([instance])
expect(instance).to receive(:terminate)
allow(connection).to receive(:client).and_return(client)
allow(client).to receive(:wait_until)
allow(instance).to receive(:id)
end
it 'should return true' do
expect(subject.destroy_vm(poolname, vmname)).to be true
end
end
end
end

View file

@ -0,0 +1,9 @@
require 'rspec'
describe 'VmpoolerProviderEc2' do
context 'when creating class ' do
it 'sets a version' do
expect(VmpoolerProviderEc2::VERSION).not_to be_nil
end
end
end

View file

@ -0,0 +1,33 @@
lib = File.expand_path('../lib', __FILE__)
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
require 'vmpooler-provider-ec2/version'
Gem::Specification.new do |s|
s.name = 'vmpooler-provider-ec2'
s.version = VmpoolerProviderEc2::VERSION
s.authors = ['Puppet']
s.email = ['support@puppet.com']
s.summary = 'EC2 provider for VMPooler'
s.homepage = 'https://github.com/puppetlabs/vmpooler-provider-ec2'
s.license = 'Apache-2.0'
s.required_ruby_version = Gem::Requirement.new('>= 2.3.0')
s.files = Dir[ "lib/**/*" ]
s.require_paths = ["lib"]
s.add_dependency 'aws-sdk-ec2', '~> 1'
s.add_dependency 'net-ssh', '~> 6.2.0.rc2'
s.add_development_dependency 'vmpooler', '>= 1.3.0', '~> 2.3'
# Testing dependencies
s.add_development_dependency 'climate_control', '>= 0.2.0'
s.add_development_dependency 'mock_redis', '>= 0.17.0'
s.add_development_dependency 'pry'
s.add_development_dependency 'rack-test', '>= 0.6'
s.add_development_dependency 'rspec', '>= 3.2'
s.add_development_dependency 'rubocop', '~> 1.1.0'
s.add_development_dependency 'simplecov', '>= 0.11.2'
s.add_development_dependency 'thor', '~> 1.0', '>= 1.0.1'
s.add_development_dependency 'yarjuf', '>= 2.0'
end

165
vmpooler.yaml.example Normal file
View file

@ -0,0 +1,165 @@
---
:providers:
# :providers:
#
# This section contains the VM providers for VMs and Pools
# The currently supported backing services are:
# - vsphere
# - dummy
# - gce
# - aws
#
# - provider_class
# For multiple providers, specify one of the supported backing services (vsphere or dummy or gce or aws)
# (optional: will default to it's parent :key: name eg. 'aws')
#
# - purge_unconfigured_resources
# Enable purging of VMs, disks and snapshots
# By default will purge resources in the project without a "pool" label, or a "pool" label with the value for an unconfigured pool
# An optional allowlist can be provided to ignore purging certain VMs based on pool labels
# Setting this on the provider will enable purging for the provider
# Expects a boolean value
# (optional; default: false)
#
# - resources_allowlist
# For GCE: Specify labels that should be ignored when purging VMs. For example if a VM's label is
# set to 'pool' with value 'donotdelete' and there is no pool with that name configured, it would normally be purged,
# unless you add a resources_allowlist "donotdelete" in which case it is ignored and not purged.
# Additionally the "" (empty string) has a special meaning whereas VMs that do not have the "pool" label are not purged.
# Additionally if you want to ignore VM's with an arbitrary label, include it in the allow list as a string with the separator "="
# between the label name and value eg user=bob would ignore VMs that include the label "user" with the value "bob"
# If any one of the above condition is met, the resource is ignored and not purged
# This option is only evaluated when 'purge_unconfigured_resources' is enabled
# Expects an array of strings specifying the allowlisted labels by name. The strings should be all lower case, since
# no uppercase char is allowed in a label
# (optional; default: nil)
#
# If you want to support more than one provider with different parameters you have to specify the
# backing service in the provider_class configuration parameter for example 'vsphere' or 'dummy'. Each pool can specify
# the provider to use.
#
# Multiple providers example:
:aws1:
provider_class: 'aws'
zone: 'us-west-2b'
region: 'us-west'
:aws2:
provider_class: 'aws'
zone: 'us-west-2b'
region: 'us-west'
resources_allowlist:
- "user=bob"
- ""
- "custom-pool"
# :aws:
#
# This section contains the global variables for the aws provider
# some of them can be overwritten at the pool level
#
# Available configuration parameters:
#
# - region
# The AWS region name to use when creating/deleting resources
# (required)
# - zone
# The AWS zone name to use when creating/deleting resources (vms, disks etc)
# Can be overwritten at the pool level
# (required)
# - amisize
# The AMI machine type to use eg a1.large
# Can be overwritten at the pool level
# (required)
# - volume_size
# A custom root volume size to use in GB, the default is whatever the default for the AMI used is
# (optional)
# - dns_zone_resource_name
# The name given to the DNS zone ressource. This is not the domain, but the name identifier of a zone eg example-com
# (optional) when not set, the dns setup / teardown is skipped
# - domain
# Overwrites the global domain parameter. This should match the dns zone domain set for the dns_zone_resource_name.
# It is used to infer the domain part of the FQDN ie $vm_name.$domain
# When setting multiple providers at the same time, this value should be set for each GCE pools.
# (optional) If not explicitely set, the FQDN is inferred using the global 'domain' config parameter
# Example:
:aws:
region: 'us-west'
zone: 'us-west-2b'
amisize: 'a1.small'
volume_size: '10'
dns_zone_resource_name: 'subdomain-example-com'
domain: 'subdomain.example.com'
# :pools:
#
# This section contains a list of virtual machine 'pools' for vmpooler to
# create and maintain.
#
# Available configuration parameters (per-pool):
#
# - name
# The name of the pool.
# (required)
#
# - alias
# Other names this pool can be requested as.
# (optional)
#
# - template
# The template or virtual machine target to spawn clones from, in AWS this means an AMI id.
# (required)
#
# - size
# The number of waiting VMs to keep in a pool.
# (required)
#
# - provider
# The name of the VM provider which manage this pool. This should match
# a name in the :providers: section above e.g. vsphere
# (required; will default to vsphere for backwards compatibility)
# If you have more than one provider, this is where you would choose which
# one to use for this pool
#
# - timeout
# How long (in minutes) before marking a clone in 'pending' queues as 'failed' and retrying.
# This setting overrides any globally-configured timeout setting.
# (optional; default: '15')
#
# - ready_ttl
# How long (in minutes) to keep VMs in 'ready' queues before destroying.
# (optional; default: no limit)
#
# - check_loop_delay_min (optional; default: 5) seconds
# - check_loop_delay_max (optional; default: same as check_loop_delay_min) seconds
# - check_loop_delay_decay (optional; default: 2.0) Must be greater than 1.0
# See the :config: section for information about these settings
#
# Provider specific pool settings
#
# EC2 provider
# - zone
# The zone to create the VMs in
# (optional: default is global provider zone value)
# - amisize
# The AMI machine type to use eg a1.large
# (optional: default is global provider amisize value)
# - volume_size
# (optional: default is global provider amisize value)
# - provision
# Set to true to run the extra aws setup steps (SSH, keys etc) once the VM is available
# (optional: default to false)
# Example:
:pools:
- name: 'almalinux-x86_64'
alias: [ 'almalinux-64', 'almalinux-amd64' ]
template: 'ami-foobar1234'
size: 5
timeout: 15
ready_ttl: 1440
provider: aws
zone: 'us-new-zone'
amisize: 'a1.large'
volume_size: '20'