From c3c1a8e1e50b17de7d1ca62d27be51f51d31c77f Mon Sep 17 00:00:00 2001 From: Tanisha Payne Date: Thu, 9 Jun 2022 15:29:54 -0400 Subject: [PATCH 1/4] Added aws dependency and renamed directories rename gce to aws rename gce to aws rename gce to aws create_vm method implimented create_vm method connected to ec2 setup tags properly, and retireve then in get_vm begin disk and snapshop, but we dont have perms add specs for get_vm and vms_in_pool add spec for create_vm add spec for destroy_vm update readme add a new class lib/vmpooler/aws_setup.rb to do the provisioning of nodes adding spec tests adding net:ssh lib for the provisioning of nodes adding option to provision vm once ready fix aws_setup setup of nodes once running --- .github/dependabot.yml | 8 + .github/workflows/release.yml | 37 + .github/workflows/testing.yml | 47 ++ .gitignore | 11 + .jrubyrc | 2 + .rubocop.yml | 53 ++ Gemfile | 13 + LICENSE | 201 +++++ README.md | 53 +- Rakefile | 25 + lib/vmpooler-provider-aws/version.rb | 5 + lib/vmpooler/aws_setup.rb | 109 +++ lib/vmpooler/providers/aws.rb | 522 ++++++++++++ spec/ec2_helper.rb | 110 +++ spec/helpers.rb | 153 ++++ spec/spec_helper.rb | 19 + spec/unit/providers/aws_spec.rb | 309 +++++++ spec/unit/providers/gce_spec.rb | 767 ++++++++++++++++++ .../vmpooler_provider_aws_spec.rb | 9 + vmpooler-provider-aws.gemspec | 33 + vmpooler.yaml.example | 165 ++++ 21 files changed, 2650 insertions(+), 1 deletion(-) create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/release.yml create mode 100644 .github/workflows/testing.yml create mode 100644 .gitignore create mode 100644 .jrubyrc create mode 100644 .rubocop.yml create mode 100644 Gemfile create mode 100644 LICENSE create mode 100644 Rakefile create mode 100644 lib/vmpooler-provider-aws/version.rb create mode 100644 lib/vmpooler/aws_setup.rb create mode 100644 lib/vmpooler/providers/aws.rb create mode 100644 spec/ec2_helper.rb create mode 100644 spec/helpers.rb create mode 100644 spec/spec_helper.rb create mode 100644 spec/unit/providers/aws_spec.rb create mode 100644 spec/unit/providers/gce_spec.rb create mode 100644 spec/vmpooler-provider-aws/vmpooler_provider_aws_spec.rb create mode 100644 vmpooler-provider-aws.gemspec create mode 100644 vmpooler.yaml.example diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..c8f8016 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,8 @@ +version: 2 +updates: +- package-ecosystem: bundler + directory: "/" + schedule: + interval: daily + time: "13:00" + open-pull-requests-limit: 10 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..e71ec28 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,37 @@ +name: Release + +on: workflow_dispatch + +jobs: + release: + runs-on: ubuntu-latest + if: github.repository == 'puppetlabs/vmpooler-provider-gce' + steps: + - uses: actions/checkout@v2 + - name: Get Version + id: gv + run: | + echo "::set-output name=ver::$(grep VERSION lib/vmpooler-provider-gce/version.rb |rev |cut -d "'" -f2 |rev)" + - name: Tag Release + uses: ncipollo/release-action@v1 + with: + tag: ${{ steps.gv.outputs.ver }} + token: ${{ secrets.GITHUB_TOKEN }} + draft: false + prerelease: false + generateReleaseNotes: true + - name: Install Ruby 2.5.8 + uses: ruby/setup-ruby@v1 + with: + ruby-version: '2.5.8' + - name: Build gem + run: gem build *.gemspec + - name: Publish gem + run: | + mkdir -p $HOME/.gem + touch $HOME/.gem/credentials + chmod 0600 $HOME/.gem/credentials + printf -- "---\n:rubygems_api_key: ${GEM_HOST_API_KEY}\n" > $HOME/.gem/credentials + gem push *.gem + env: + GEM_HOST_API_KEY: '${{ secrets.RUBYGEMS_AUTH_TOKEN }}' diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml new file mode 100644 index 0000000..501403f --- /dev/null +++ b/.github/workflows/testing.yml @@ -0,0 +1,47 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. +# This workflow will download a prebuilt Ruby version, install dependencies and run tests with Rake +# For more information see: https://github.com/marketplace/actions/setup-ruby-jruby-and-truffleruby + +name: Testing + +on: + pull_request: + branches: + - main + +jobs: + rubocop: + runs-on: ubuntu-latest + strategy: + matrix: + ruby-version: + - '2.5.8' + steps: + - uses: actions/checkout@v2 + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby-version }} + bundler-cache: true # runs 'bundle install' and caches installed gems automatically + - name: Run Rubocop + run: bundle exec rake rubocop + + spec_tests: + runs-on: ubuntu-latest + strategy: + matrix: + ruby-version: + - '2.5.8' + - 'jruby-9.2.12.0' + steps: + - uses: actions/checkout@v2 + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby-version }} + bundler-cache: true # runs 'bundle install' and caches installed gems automatically + - name: Run spec tests + run: bundle exec rake test diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..95e94de --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +.bundle/ +.vagrant/ +coverage/ +vendor/ +.dccache +.ruby-version +Gemfile.local +results.xml +/vmpooler.yaml +.idea +*.json diff --git a/.jrubyrc b/.jrubyrc new file mode 100644 index 0000000..d875079 --- /dev/null +++ b/.jrubyrc @@ -0,0 +1,2 @@ +# for simplecov to work in jruby, without this we are getting errors when debugging spec tests +debug.fullTrace=true \ No newline at end of file diff --git a/.rubocop.yml b/.rubocop.yml new file mode 100644 index 0000000..3333234 --- /dev/null +++ b/.rubocop.yml @@ -0,0 +1,53 @@ +AllCops: + Include: + - 'lib/**/*.rb' + Exclude: + - 'scripts/**/*' + - 'spec/**/*' + - 'vendor/**/*' + - Gemfile + - Rakefile + +# These short variable names make sense as exceptions to the rule, but generally I think short variable names do hurt readability +Naming/MethodParameterName: + AllowedNames: + - vm + - dc + - s + - x + - f + +#new cops: +Lint/DuplicateRegexpCharacterClassElement: # (new in 1.1) + Enabled: true +Lint/EmptyBlock: # (new in 1.1) + Enabled: true +Lint/ToEnumArguments: # (new in 1.1) + Enabled: true +Lint/UnmodifiedReduceAccumulator: # (new in 1.1) + Enabled: true +Style/ArgumentsForwarding: # (new in 1.1) + Enabled: false +Style/DocumentDynamicEvalDefinition: # (new in 1.1) + Enabled: true +Style/SwapValues: # (new in 1.1) + Enabled: false + +#disabled + +Metrics/AbcSize: + Enabled: false +Metrics/ClassLength: + Enabled: false +Metrics/CyclomaticComplexity: + Enabled: false +Metrics/MethodLength: + Enabled: false +Metrics/PerceivedComplexity: + Enabled: false +Metrics/ParameterLists: + Enabled: false +Layout/LineLength: + Enabled: false +Metrics/BlockLength: + Enabled: false \ No newline at end of file diff --git a/Gemfile b/Gemfile new file mode 100644 index 0000000..122d6b5 --- /dev/null +++ b/Gemfile @@ -0,0 +1,13 @@ +source ENV['GEM_SOURCE'] || 'https://rubygems.org' + +gemspec + +# Evaluate Gemfile.local if it exists +if File.exists? "#{__FILE__}.local" + instance_eval(File.read("#{__FILE__}.local")) +end + +# Evaluate ~/.gemfile if it exists +if File.exists?(File.join(Dir.home, '.gemfile')) + instance_eval(File.read(File.join(Dir.home, '.gemfile'))) +end diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index 47c0dfb..786f8eb 100644 --- a/README.md +++ b/README.md @@ -1 +1,52 @@ -# vmpooler-provider-aws \ No newline at end of file +# vmpooler-provider-aws + +This is a provider for [VMPooler](https://github.com/puppetlabs/vmpooler) allows using aws to create instances, disks, +snapshots, or destroy instances for specific pools. + +## Usage + +Include this gem in the same Gemfile that you use to install VMPooler itself and then define one or more pools with the `provider` key set to `aws`. VMPooler will take care of the rest. +See what configuration is needed for this provider in the [example file](https://github.com/puppetlabs/vmpooler-provider-aws/blob/main/vmpooler.yaml.example). + +Examples of deploying VMPooler with extra providers can be found in the [puppetlabs/vmpooler-deployment](https://github.com/puppetlabs/vmpooler-deployment) repository. + +aws authorization is handled via two required ENV vars + +1. ABS_AWS_ACCESS_KEY +2. ABS_AWS_SECRET_KEY + +### Provisioning the new nodes + +When you add the pool config `provision: true` to a pool, the new VMs will also get initialized with extra steps to setup the sshd config via NET:SSH +These steps expect two environment vars +1. ROOT_KEYS_SCRIPT: (optional) the URI location of a script (eg https in github) that will be run to setup keys. If not set, this will be skipped +2. KEY_FILE_LOCATION: (required) the location on local disk where the ssh key resides for VMPooler to connect via SSH to the AWS node + +### DNS +AWS will setup a private ip and private dns hostname for the VM once running. Optionally we can setup a human readable DNS entry to resolve the VMPooler provider `spicy-proton` fqdn + +DNS is integrated via Google's CloudDNS service. To enable, a CloudDNS zone name must be provided in the config (see the example yaml file dns_zone_resource_name) + +An A record is then created in that zone upon instance creation with the VM's internal IP, and deleted when the instance is destroyed. + +### Labels +This provider adds tags to all resources that are managed + +|resource|labels|note| +|---|---|---| +|instance|vm=$vm_name, pool=$pool_name|for example vm=foo-bar, pool=pool1| +|disk|vm=$vm_name, pool=$pool_name|for example vm=foo-bar and pool=pool1| +|snapshot|snapshot_name=$snapshot_name, vm=$vm_name, pool=$pool_name| for example snapshot_name=snap1, vm=foo-bar, pool=pool1| + +Also see the usage of vmpooler's optional purge_unconfigured_resources, which is used to delete any resource found that +do not have the pool label, and can be configured to allow a specific list of unconfigured pool names. + +### Pre-requisite + +- An IAM user must exist in the target AWS account with permissions to create, delete vms etc +- if using DNS, a DNS zone needs to be created in CloudDNS, and configured in the provider's config section with the name of that zone (dns_zone_resource_name). When not specified, the DNS setup and teardown is skipped. + + +## License + +vmpooler-provider-aws is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html). See the [LICENSE](LICENSE) file for more details. \ No newline at end of file diff --git a/Rakefile b/Rakefile new file mode 100644 index 0000000..76d6a80 --- /dev/null +++ b/Rakefile @@ -0,0 +1,25 @@ +require 'rspec/core/rake_task' + +rubocop_available = Gem::Specification::find_all_by_name('rubocop').any? +require 'rubocop/rake_task' if rubocop_available + +desc 'Run rspec tests with coloring.' +RSpec::Core::RakeTask.new(:test) do |t| + t.rspec_opts = %w[--color --format documentation] + t.pattern = 'spec/' +end + +desc 'Run rspec tests and save JUnit output to results.xml.' +RSpec::Core::RakeTask.new(:junit) do |t| + t.rspec_opts = %w[-r yarjuf -f JUnit -o results.xml] + t.pattern = 'spec/' +end + +if rubocop_available + desc 'Run RuboCop' + RuboCop::RakeTask.new(:rubocop) do |task| + task.options << '--display-cop-names' + end +end + +task :default => [:test] diff --git a/lib/vmpooler-provider-aws/version.rb b/lib/vmpooler-provider-aws/version.rb new file mode 100644 index 0000000..a8e2ab1 --- /dev/null +++ b/lib/vmpooler-provider-aws/version.rb @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +module VmpoolerProviderAws + VERSION = '0.0.1' +end diff --git a/lib/vmpooler/aws_setup.rb b/lib/vmpooler/aws_setup.rb new file mode 100644 index 0000000..bd8f7f4 --- /dev/null +++ b/lib/vmpooler/aws_setup.rb @@ -0,0 +1,109 @@ +require 'net/ssh' +# This class connects to existing running VMs via NET:SSH +# it uses a local key to do so and then setup SSHD on the hosts to enable +# dev and CI users to connect. +module Vmpooler + class PoolManager + class AwsSetup + ROOT_KEYS_SCRIPT = ENV["ROOT_KEYS_SCRIPT"] + ROOT_KEYS_SYNC_CMD = "curl -k -o - -L #{ROOT_KEYS_SCRIPT} | %s" + + def self.setup_node_by_ssh(host, platform) + @key_file = ENV["KEY_FILE_LOCATION"] || '/app/abs/.ssh/abs-aws-ec2.rsa' + conn = check_ssh_accepting_connections(host, platform) + configure_host(host, platform, conn) + end + + # For an Amazon Linux AMI, the user name is ec2-user. + # + # For a Centos AMI, the user name is centos. + # + # For a Debian AMI, the user name is admin or root. + # + # For a Fedora AMI, the user name is ec2-user or fedora. + # + # For a RHEL AMI, the user name is ec2-user or root. + # + # For a SUSE AMI, the user name is ec2-user or root. + # + # For an Ubuntu AMI, the user name is ubuntu. + + def self.get_user(platform) + if platform =~ /centos/ + user = 'centos' + elsif platform =~ /ubuntu/ + user = 'ubuntu' + elsif platform =~ /debian/ + user = 'root' + else + user = 'ec2-user' + end + user + end + + def self.check_ssh_accepting_connections(host, platform) + retries = 0 + begin + user = get_user(platform) + netssh_jruby_workaround + conn = Net::SSH.start(host, user, :keys => @key_file, :timeout => 10) + return conn + rescue Net::SSH::ConnectionTimeout, Errno::ECONNREFUSED => err + puts "Requested instances do not have sshd ready yet, try again: #{err}" + sleep 1 + retry if (retries += 1) < 300 + end + end + + # Configure the aws host by enabling root and setting the hostname + # @param host [String] the internal dns name of the instance + def self.configure_host(host, platform, ssh) + ssh.exec!('sudo cp -r .ssh /root/.') + ssh.exec!("sudo sed -ri 's/^#?PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config") + ssh.exec!("sudo hostname #{host}") + if platform =~ /amazon/ + # Amazon Linux requires this to preserve host name changes across reboots. + ssh.exec!("sudo sed -ie '/^HOSTNAME/ s/=.*/=#{host}/' /etc/sysconfig/network") + end + restart_sshd(host, platform, ssh) + sync_root_keys(host, platform) + end + + def self.restart_sshd(host, platform, ssh) + ssh.open_channel do |channel| + channel.request_pty do |ch, success| + raise "can't get pty request" unless success + if platform =~ /centos|el-|redhat|fedora|eos|amazon/ + ch.exec('sudo -E /sbin/service sshd reload') + elsif platform =~ /debian|ubuntu|cumulus/ + ch.exec('sudo su -c \"service sshd restart\"') + elsif platform =~ /arch|centos-7|el-7|redhat-7|fedora-(1[4-9]|2[0-9])/ + ch.exec('sudo -E systemctl restart sshd.service') + else + services.logger.error("Attempting to update ssh on non-supported platform: #{host}: #{platform}") + end + end + end + ssh.loop + end + + def self.sync_root_keys(host, platform) + unless ROOT_KEYS_SCRIPT.nil? + user = "root" + netssh_jruby_workaround + Net::SSH.start(host, user, :keys => @key_file) do |ssh| + ssh.exec!(ROOT_KEYS_SYNC_CMD % "env PATH=\"/usr/gnu/bin:$PATH\" bash") + end + end + end + + # issue when using net ssh 6.1.0 with jruby + # https://github.com/jruby/jruby-openssl/issues/105 + # this will turn off some algos that match /^ecd(sa|h)-sha2/ + def self.netssh_jruby_workaround + Net::SSH::Transport::Algorithms::ALGORITHMS.values.each { |algs| algs.reject! { |a| a =~ /^ecd(sa|h)-sha2/ } } + Net::SSH::KnownHosts::SUPPORTED_TYPE.reject! { |t| t =~ /^ecd(sa|h)-sha2/ } + end + end + end +end diff --git a/lib/vmpooler/providers/aws.rb b/lib/vmpooler/providers/aws.rb new file mode 100644 index 0000000..4d491e8 --- /dev/null +++ b/lib/vmpooler/providers/aws.rb @@ -0,0 +1,522 @@ +# frozen_string_literal: true + +require 'bigdecimal' +require 'bigdecimal/util' +require 'vmpooler/providers/base' +require 'aws-sdk-ec2' +require 'vmpooler/aws_setup' + +module Vmpooler + class PoolManager + class Provider + # This class represent a GCE provider to CRUD resources in a gce cloud. + class Aws < Vmpooler::PoolManager::Provider::Base + # The connection_pool method is normally used only for testing + attr_reader :connection_pool + + def initialize(config, logger, metrics, redis_connection_pool, name, options) + super(config, logger, metrics, redis_connection_pool, name, options) + + @aws_access_key = ENV['ABS_AWS_ACCESS_KEY'] + @aws_secret_key = ENV['ABS_AWS_SECRET_KEY'] + + task_limit = global_config[:config].nil? || global_config[:config]['task_limit'].nil? ? 10 : global_config[:config]['task_limit'].to_i + # The default connection pool size is: + # Whatever is biggest from: + # - How many pools this provider services + # - Maximum number of cloning tasks allowed + # - Need at least 2 connections so that a pool can have inventory functions performed while cloning etc. + default_connpool_size = [provided_pools.count, task_limit, 2].max + connpool_size = provider_config['connection_pool_size'].nil? ? default_connpool_size : provider_config['connection_pool_size'].to_i + # The default connection pool timeout should be quite large - 60 seconds + connpool_timeout = provider_config['connection_pool_timeout'].nil? ? 60 : provider_config['connection_pool_timeout'].to_i + logger.log('d', "[#{name}] ConnPool - Creating a connection pool of size #{connpool_size} with timeout #{connpool_timeout}") + @connection_pool = Vmpooler::PoolManager::GenericConnectionPool.new( + metrics: metrics, + connpool_type: 'provider_connection_pool', + connpool_provider: name, + size: connpool_size, + timeout: connpool_timeout + ) do + logger.log('d', "[#{name}] Connection Pool - Creating a connection object") + # Need to wrap the vSphere connection object in another object. The generic connection pooler will preserve + # the object reference for the connection, which means it cannot "reconnect" by creating an entirely new connection + # object. Instead by wrapping it in a Hash, the Hash object reference itself never changes but the content of the + # Hash can change, and is preserved across invocations. + new_conn = #connect to aws + { connection: new_conn } + end + @redis = redis_connection_pool + end + + # name of the provider class + def name + 'aws' + end + + def connection + @connection_pool.with_metrics do |pool_object| + return ensured_aws_connection(pool_object) + end + end + + def dns + @dns + end + + # main configuration options + def region + return provider_config['region'] if provider_config['region'] + end + + # main configuration options, overridable for each pool + def zone(pool_name) + return pool_config(pool_name)['zone'] if pool_config(pool_name)['zone'] + return provider_config['zone'] if provider_config['zone'] + end + + def amisize(pool_name) + return pool_config(pool_name)['amisize'] if pool_config(pool_name)['amisize'] + return provider_config['amisize'] if provider_config['amisize'] + end + + def volume_size(pool_name) + return pool_config(pool_name)['volume_size'] if pool_config(pool_name)['volume_size'] + return provider_config['volume_size'] if provider_config['volume_size'] + end + + #dns + def domain + provider_config['domain'] + end + + def dns_zone_resource_name + provider_config['dns_zone_resource_name'] + end + + #subnets + def get_subnet_id(pool_name) + case zone(pool_name) + when 'us-west-2b' + return 'subnet-0fe90a688844f6f26' + when 'us-west-2a' + return 'subnet-091b436f' + end + end + + def to_provision(pool_name) + return pool_config(pool_name)['provision'] if pool_config(pool_name)['provision'] + end + + # Base methods that are implemented: + + # vms_in_pool lists all the VM names in a pool, which is based on the VMs + # having a tag "pool" that match a pool config name. + # inputs + # [String] pool_name : Name of the pool + # returns + # empty array [] if no VMs found in the pool + # [Array] + # [Hashtable] + # [String] name : the name of the VM instance (unique for whole project) + def vms_in_pool(pool_name) + debug_logger('vms_in_pool') + vms = [] + pool = pool_config(pool_name) + raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil? + + zone = zone(pool_name) + filters = [{ + name: "tag:pool", + values: [pool_name], + }] + instance_list = connection.instances(filters: filters) + + return vms if instance_list.first.nil? + + instance_list.each do |vm| + vms << { 'name' => vm.tags.detect {|f| f.key == 'vm_name' }&.value || "vm_name not found in tags" } + end + debug_logger(vms) + vms + end + + # inputs + # [String] pool_name : Name of the pool + # [String] vm_name : Name of the VM to find + # returns + # nil if VM doesn't exist name, template, poolname, boottime, status, image_size, private_ip_address + # [Hastable] of the VM + # [String] name : The name of the resource, provided by the client when initially creating the resource + # [String] template : This is the name of template + # [String] poolname : Name of the pool the VM + # [Time] boottime : Time when the VM was created/booted + # [String] status : One of the following values: pending, running, shutting-down, terminated, stopping, stopped + # [String] image_size : The EC2 image size eg a1.large + # [String] private_ip_address: The private IPv4 address + def get_vm(pool_name, vm_name) + debug_logger('get_vm') + vm_hash = nil + + filters = [{ + name: "tag:vm_name", + values: [vm_name], + }] + instances = connection.instances(filters: filters).first + return vm_hash if instances.nil? + + vm_hash = generate_vm_hash(instances, pool_name) + debug_logger("vm_hash #{vm_hash}") + vm_hash + end + + # create_vm creates a new VM with a default network from the config, + # a initial disk named #{new_vmname}-disk0 that uses the 'template' as its source image + # and labels added for vm and pool + # and an instance configuration for machine_type from the config and + # labels vm and pool + # having a label "pool" that match a pool config name. + # inputs + # [String] pool : Name of the pool + # [String] new_vmname : Name to give the new VM + # returns + # [Hashtable] of the VM as per get_vm(pool_name, vm_name) + def create_vm(pool_name, new_vmname) + debug_logger('create_vm') + pool = pool_config(pool_name) + raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil? + raise("Instance creation not attempted, #{new_vmname} already exists") if get_vm(pool_name, new_vmname) + + subnet_id = get_subnet_id(pool_name) + tag = [ + { + resource_type: "instance", # accepts capacity-reservation, client-vpn-endpoint, customer-gateway, carrier-gateway, dedicated-host, dhcp-options, egress-only-internet-gateway, elastic-ip, elastic-gpu, export-image-task, export-instance-task, fleet, fpga-image, host-reservation, image, import-image-task, import-snapshot-task, instance, instance-event-window, internet-gateway, ipam, ipam-pool, ipam-scope, ipv4pool-ec2, ipv6pool-ec2, key-pair, launch-template, local-gateway, local-gateway-route-table, local-gateway-virtual-interface, local-gateway-virtual-interface-group, local-gateway-route-table-vpc-association, local-gateway-route-table-virtual-interface-group-association, natgateway, network-acl, network-interface, network-insights-analysis, network-insights-path, network-insights-access-scope, network-insights-access-scope-analysis, placement-group, prefix-list, replace-root-volume-task, reserved-instances, route-table, security-group, security-group-rule, snapshot, spot-fleet-request, spot-instances-request, subnet, subnet-cidr-reservation, traffic-mirror-filter, traffic-mirror-session, traffic-mirror-target, transit-gateway, transit-gateway-attachment, transit-gateway-connect-peer, transit-gateway-multicast-domain, transit-gateway-route-table, volume, vpc, vpc-endpoint, vpc-endpoint-service, vpc-peering-connection, vpn-connection, vpn-gateway, vpc-flow-log + tags: [ + { + key: "vm_name", + value: new_vmname, + }, + { + key: "pool", + value: pool_name, + }, + { + key: "lifetime", + value: get_current_lifetime(new_vmname), + }, + { + key: "created_by", + value: get_current_user(new_vmname), + }, + { + key: "job_url", + value: get_current_job_url(new_vmname), + }, + { + key: "organization", + value: "engineering", + }, + { + key: "portfolio", + value: "ds-ci", + }, + + ], + }, + ] + config = { + min_count: 1, + max_count: 1, + image_id: pool['template'], + monitoring: {:enabled => true}, + key_name: 'always-be-scheduling', + security_group_ids: ['sg-697fb015'], + instance_type: amisize(pool_name), + disable_api_termination: false, + instance_initiated_shutdown_behavior: 'terminate', + tag_specifications: tag, + subnet_id: subnet_id + } + + if volume_size(pool_name) + config[:block_device_mappings] = get_block_device_mappings(config['image_id'], volume_size(pool_name)) + end + + debug_logger('trigger insert_instance') + batch_instance = connection.create_instances(config) + instance_id = batch_instance.first.instance_id + connection.client.wait_until(:instance_running, {instance_ids: [instance_id]}) + created_instance = get_vm(pool_name, new_vmname) + + # extra setup steps + if to_provision(pool_name) == "true" || to_provision(pool_name) == true + provision_node_aws(created_instance['private_dns_name'], pool_name) + end + + created_instance + end + + def provision_node_aws(vm, pool_name) + AwsSetup.setup_node_by_ssh(vm, pool_name) + end + + def get_block_device_mappings(image_id, volume_size) + ec2_client = connection.client + image = ec2_client.describe_images(:image_ids => [image_id]).images.first + raise RuntimeError, "Image not found: #{image_id}" if image.nil? + # Transform the images block_device_mappings output into a format + # ready for a create. + block_device_mappings = [] + if image.root_device_type == "ebs" + orig_bdm = image.block_device_mappings + orig_bdm.each do |block_device| + block_device_mappings << { + :device_name => block_device.device_name, + :ebs => { + # Change the default size of the root volume. + :volume_size => volume_size, + # This is required to override the images default for + # delete_on_termination, forcing all volumes to be deleted once the + # instance is terminated. + :delete_on_termination => true + } + } + end + else + raise "#{image_id} does not have an ebs root device type" + end + block_device_mappings + end + + # create_disk creates an additional disk for an existing VM. It will name the new + # disk #{vm_name}-disk#{number_disk} where number_disk is the next logical disk number + # starting with 1 when adding an additional disk to a VM with only the boot disk: + # #{vm_name}-disk0 == boot disk + # #{vm_name}-disk1 == additional disk added via create_disk + # #{vm_name}-disk2 == additional disk added via create_disk if run a second time etc + # the new disk has labels added for vm and pool + # The AWS lifecycle is to create a new disk (lives independently of the instance) then to attach + # it to the existing instance. + # inputs + # [String] pool_name : Name of the pool + # [String] vm_name : Name of the existing VM + # [String] disk_size : The new disk size in GB + # returns + # [boolean] true : once the operations are finished + + # create_snapshot creates new snapshots with the unique name {new_snapshot_name}-#{disk.name} + # for one vm, and one create_snapshot() there could be multiple snapshots created, one for each drive. + # since the snapshot resource needs a unique name in the gce project, + # we create a unique name by concatenating {new_snapshot_name}-#{disk.name} + # the disk name is based on vm_name which makes it unique. + # The snapshot is added tags snapshot_name, vm, pool, diskname and boot + # inputs + # [String] pool_name : Name of the pool + # [String] vm_name : Name of the existing VM + # [String] new_snapshot_name : a unique name for this snapshot, which would be used to refer to it when reverting + # returns + # [boolean] true : once the operations are finished + # raises + # RuntimeError if the vm_name cannot be found + # RuntimeError if the snapshot_name already exists for this VM + + # revert_snapshot reverts an existing VM's disks to an existing snapshot_name + # reverting in aws entails + # 1. shutting down the VM, + # 2. detaching and deleting the drives, + # 3. creating new disks with the same name from the snapshot for each disk + # 4. attach disks and start instance + # for one vm, there might be multiple snapshots in time. We select the ones referred to by the + # snapshot_name, but that may be multiple snapshots, one for each disks + # The new disk is added tags vm and pool + # inputs + # [String] pool_name : Name of the pool + # [String] vm_name : Name of the existing VM + # [String] snapshot_name : Name of an existing snapshot + # returns + # [boolean] true : once the operations are finished + # raises + # RuntimeError if the vm_name cannot be found + # RuntimeError if the snapshot_name already exists for this VM + + # destroy_vm deletes an existing VM instance and any disks and snapshots via the labels + # in gce instances, disks and snapshots are resources that can exist independent of each other + # inputs + # [String] pool_name : Name of the pool + # [String] vm_name : Name of the existing VM + # returns + # [boolean] true : once the operations are finished + def destroy_vm(pool_name, vm_name) + debug_logger('destroy_vm') + deleted = false + + filters = [{ + name: "tag:vm_name", + values: [vm_name], + }] + instances = connection.instances(filters: filters).first + return true if instances.nil? + + debug_logger("trigger delete_instance #{vm_name}") + # vm_hash = get_vm(pool_name, vm_name) + instances.terminate + begin + connection.client.wait_until(:instance_terminated, {instance_ids: [instances.id]}) + deleted = true + rescue ::Aws::Waiters::Errors => error + debug_logger("failed waiting for instance terminated #{vm_name}: #{error}") + end + + return deleted + end + + # check if a vm is ready by opening a socket on port 22 + # if a domain is set, it will use vn_name.domain, + # if not then it will use the ip directly (AWS workaround) + def vm_ready?(_pool_name, vm_name) + begin + # TODO: we could use a healthcheck resource attached to instance + domain_set = domain || global_config[:config]['domain'] + if domain_set.nil? + vm_ip = get_vm(_pool_name, vm_name)['private_ip_address'] + vm_name = vm_ip unless vm_ip.nil? + end + open_socket(vm_name, domain_set) + rescue StandardError => _e + return false + end + true + end + + # tag_vm_user This method is called once we know who is using the VM (it is running). This method enables seeing + # who is using what in the provider pools. + # + # inputs + # [String] pool_name : Name of the pool + # [String] vm_name : Name of the VM to check if ready + # returns + # [Boolean] : true if successful, false if an error occurred and it should retry + def tag_vm_user(pool, vm_name) + user = get_current_user(vm_name) + vm_hash = get_vm(pool, vm_name) + return false if vm_hash.nil? + + new_labels = vm_hash['labels'] + # bailing in this case since labels should exist, and continuing would mean losing them + return false if new_labels.nil? + + # add new label called token-user, with value as user + new_labels['token-user'] = user + begin + instances_set_labels_request_object = Google::Apis::ComputeV1::InstancesSetLabelsRequest.new(label_fingerprint: vm_hash['label_fingerprint'], labels: new_labels) + result = connection.set_instance_labels(project, zone(pool), vm_name, instances_set_labels_request_object) + wait_for_zone_operation(project, zone(pool), result) + rescue StandardError => _e + return false + end + true + end + + # END BASE METHODS + + def get_current_user(vm_name) + @redis.with_metrics do |redis| + user = redis.hget("vmpooler__vm__#{vm_name}", 'token:user') + return '' if user.nil? + + # cleanup so it's a valid label value + # can't have upercase + user = user.downcase + # replace invalid chars with dash + user = user.gsub(/[^0-9a-z_-]/, '-') + return user + end + end + + def get_current_lifetime(vm_name) + @redis.with_metrics do |redis| + lifetime = redis.hget("vmpooler__vm__#{vm_name}", 'lifetime') || '1h' + return lifetime + end + end + + def get_current_job_url(vm_name) + @redis.with_metrics do |redis| + job = redis.hget("vmpooler__vm__#{vm_name}", 'tag:jenkins_build_url') || '' + return job + end + end + + # Return a hash of VM data + # Provides name, template, poolname, boottime, status, image_size, private_ip_address + def generate_vm_hash(vm_object, pool_name) + pool_configuration = pool_config(pool_name) + return nil if pool_configuration.nil? + + { + 'name' => vm_object.tags.detect {|f| f.key == 'vm_name' }&.value, + #'hostname' => vm_object.hostname, + 'template' => pool_configuration&.key?('template') ? pool_configuration['template'] : nil, # was expecting to get it from API, not from config, but this is what vSphere does too! + 'poolname' => vm_object.tags.detect {|f| f.key == 'pool' }&.value, + 'boottime' => vm_object.launch_time, + 'status' => vm_object.state&.name, # One of the following values: pending, running, shutting-down, terminated, stopping, stopped + #'zone' => vm_object.zone, + 'image_size' => vm_object.instance_type, + 'private_ip_address' => vm_object.private_ip_address, + 'private_dns_name' => vm_object.private_dns_name + } + end + + def ensured_aws_connection(connection_pool_object) + connection_pool_object[:connection] = connect_to_aws unless connection_pool_object[:connection] + connection_pool_object[:connection] + end + + def connect_to_aws + max_tries = global_config[:config]['max_tries'] || 3 + retry_factor = global_config[:config]['retry_factor'] || 10 + try = 1 + begin + compute = ::Aws::EC2::Resource.new( + region: region, + credentials: ::Aws::Credentials.new(@aws_access_key, @aws_secret_key), + log_level: :debug + ) + + metrics.increment('connect.open') + compute + rescue StandardError => e # is that even a thing? + metrics.increment('connect.fail') + raise e if try >= max_tries + + sleep(try * retry_factor) + try += 1 + retry + end + end + + # This should supercede the open_socket method in the Pool Manager + def open_socket(host, domain = nil, timeout = 5, port = 22, &_block) + Timeout.timeout(timeout) do + target_host = host + target_host = "#{host}.#{domain}" if domain + sock = TCPSocket.new target_host, port + begin + yield sock if block_given? + ensure + sock.close + end + end + end + + # used in local dev environment, set DEBUG_FLAG=true + # this way the upstream vmpooler manager does not get polluted with logs + def debug_logger(message, send_to_upstream: false) + # the default logger is simple and does not enforce debug levels (the first argument) + puts message if ENV['DEBUG_FLAG'] + logger.log('[g]', message) if send_to_upstream + end + end + end + end +end diff --git a/spec/ec2_helper.rb b/spec/ec2_helper.rb new file mode 100644 index 0000000..562eded --- /dev/null +++ b/spec/ec2_helper.rb @@ -0,0 +1,110 @@ +# frozen_string_literal: true + +# this file is used to Mock the GCE objects, for example the main ComputeService object +MockResult = Struct.new( + # https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/Operation.html + :client_operation_id, :creation_timestamp, :description, :end_time, :error, :http_error_message, + :http_error_status_code, :id, :insert_time, :kind, :name, :operation_type, :progress, :region, + :self_link, :start_time, :status, :status_message, :target_id, :target_link, :user, :warnings, :zone, + keyword_init: true +) + +MockOperationError = [].freeze + +MockOperationErrorError = Struct.new( + # https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/Operation/Error/Error.html + :code, :location, :message, + keyword_init: true +) + +MockInstance = Struct.new( + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/EC2/Instance.html + :instance_type, :launch_time, :private_ip_address, :state, :tags, :zone, + keyword_init: true +) + +MockTag = Struct.new( + :key, :value, + keyword_init: true +) + +MockInstanceList = Struct.new( + # https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/InstanceList.html + :id, :items, :kind, :next_page_token, :self_link, :warning, + keyword_init: true +) + +MockDiskList = Struct.new( + # https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/DiskList.html + :id, :items, :kind, :next_page_token, :self_link, :warning, + keyword_init: true +) + +MockDisk = Struct.new( + # https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/Disk.html + :creation_timestamp, :description, :disk_encryption_key, :guest_os_features, :id, :kind, :label_fingerprint, :labels, + :last_attach_timestamp, :last_detach_timestamp, :license_codes, :licenses, :name, :options, + :physical_block_size_bytes, :region, :replica_zones, :resource_policies, :self_link, :size_gb, :source_disk, + :source_disk_id, :source_image, :source_image_encryption_key, :source_image_id, :source_snapshot, + :source_snapshot_encryption_key, :source_snapshot_id, :status, :type, :users, :zone, + keyword_init: true +) + +MockSnapshotList = Struct.new( + # https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/DiskList.html + :id, :items, :kind, :next_page_token, :self_link, :warning, + keyword_init: true +) + +MockSnapshot = Struct.new( + # https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/Snapshot.html + :auto_created, :chain_name, :creation_timestamp, :description, :disk_size_gb, :download_bytes, :id, :kind, + :label_fingerprint, :labels, :license_codes, :licenses, :name, :self_link, :snapshot_encryption_key, :source_disk, + :source_disk_encryption_key, :source_disk_id, :status, :storage_bytes, :storage_bytes_status, :storage_locations, + keyword_init: true +) + +MockAttachedDisk = Struct.new( + # https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/AttachedDisk.html + :auto_delete, :boot, :device_name, :disk_encryption_key, :disk_size_gb, :guest_os_features, :index, + :initialize_params, :interface, :kind, :licenses, :mode, :shielded_instance_initial_state, :source, :type, + keyword_init: true +) + +# -------------------- +# Main ComputeService Object +# -------------------- +MockComputeServiceConnection = Struct.new( + # https://googleapis.dev/ruby/google-api-client/latest/Google/Apis/ComputeV1/ComputeService.html + :key, :quota_user, :user_ip +) do + # Onlly methods we use are listed here + def get_instance + MockInstance.new + end + + # Alias to serviceContent.propertyCollector + def insert_instance + MockResult.new + end +end + +# ------------------------------------------------------------------------------------------------------------- +# Mocking Methods +# ------------------------------------------------------------------------------------------------------------- + +# def mock_RbVmomi_VIM_ClusterComputeResource(options = {}) +# options[:name] = 'Cluster' + rand(65536).to_s if options[:name].nil? +# +# mock = MockClusterComputeResource.new() +# +# mock.name = options[:name] +# # All cluster compute resources have a root Resource Pool +# mock.resourcePool = mock_RbVmomi_VIM_ResourcePool({:name => options[:name]}) +# +# allow(mock).to receive(:is_a?) do |expected_type| +# expected_type == RbVmomi::VIM::ClusterComputeResource +# end +# +# mock +# end diff --git a/spec/helpers.rb b/spec/helpers.rb new file mode 100644 index 0000000..4b2dff6 --- /dev/null +++ b/spec/helpers.rb @@ -0,0 +1,153 @@ +# frozen_string_literal: true + +require 'mock_redis' + +def redis + @redis ||= MockRedis.new + @redis +end + +# Mock an object which represents a Logger. This stops the proliferation +# of allow(logger).to .... expectations in tests. +class MockLogger + def log(_level, string); end +end + +def expect_json(ok = true, http = 200) + expect(last_response.header['Content-Type']).to eq('application/json') + + if ok == true + expect(JSON.parse(last_response.body)['ok']).to eq(true) + else + expect(JSON.parse(last_response.body)['ok']).to eq(false) + end + + expect(last_response.status).to eq(http) +end + +def create_token(token, user, timestamp) + redis.hset("vmpooler__token__#{token}", 'user', user) + redis.hset("vmpooler__token__#{token}", 'created', timestamp) +end + +def get_token_data(token) + redis.hgetall("vmpooler__token__#{token}") +end + +def token_exists?(_token) + result = get_token_data + result && !result.empty? +end + +def create_ready_vm(template, name, redis, token = nil) + create_vm(name, redis, token) + redis.sadd("vmpooler__ready__#{template}", name) + redis.hset("vmpooler__vm__#{name}", 'template', template) +end + +def create_running_vm(template, name, redis, token = nil, user = nil) + create_vm(name, redis, token, user) + redis.sadd("vmpooler__running__#{template}", name) + redis.hset("vmpooler__vm__#{name}", 'template', template) + redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now) + redis.hset("vmpooler__vm__#{name}", 'host', 'host1') +end + +def create_pending_vm(template, name, redis, token = nil) + create_vm(name, redis, token) + redis.sadd("vmpooler__pending__#{template}", name) + redis.hset("vmpooler__vm__#{name}", 'template', template) +end + +def create_vm(name, redis, token = nil, user = nil) + redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now) + redis.hset("vmpooler__vm__#{name}", 'clone', Time.now) + redis.hset("vmpooler__vm__#{name}", 'token:token', token) if token + redis.hset("vmpooler__vm__#{name}", 'token:user', user) if user +end + +def create_completed_vm(name, pool, redis, active = false) + redis.sadd("vmpooler__completed__#{pool}", name) + redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now) + redis.hset("vmpooler__active__#{pool}", name, Time.now) if active +end + +def create_discovered_vm(name, pool, redis) + redis.sadd("vmpooler__discovered__#{pool}", name) +end + +def create_migrating_vm(name, pool, redis) + redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now) + redis.sadd("vmpooler__migrating__#{pool}", name) +end + +def create_tag(vm, tag_name, tag_value, redis) + redis.hset("vmpooler__vm__#{vm}", "tag:#{tag_name}", tag_value) +end + +def add_vm_to_migration_set(name, redis) + redis.sadd('vmpooler__migration', name) +end + +def fetch_vm(vm) + redis.hgetall("vmpooler__vm__#{vm}") +end + +def set_vm_data(vm, key, value, redis) + redis.hset("vmpooler__vm__#{vm}", key, value) +end + +def snapshot_revert_vm(vm, snapshot = '12345678901234567890123456789012', redis) + redis.sadd('vmpooler__tasks__snapshot-revert', "#{vm}:#{snapshot}") + redis.hset("vmpooler__vm__#{vm}", "snapshot:#{snapshot}", '1') +end + +def snapshot_vm(vm, snapshot = '12345678901234567890123456789012', redis) + redis.sadd('vmpooler__tasks__snapshot', "#{vm}:#{snapshot}") + redis.hset("vmpooler__vm__#{vm}", "snapshot:#{snapshot}", '1') +end + +def disk_task_vm(vm, disk_size = '10', redis) + redis.sadd('vmpooler__tasks__disk', "#{vm}:#{disk_size}") +end + +def has_vm_snapshot?(vm, redis) + redis.smembers('vmpooler__tasks__snapshot').any? do |snapshot| + instance, _sha = snapshot.split(':') + vm == instance + end +end + +def vm_reverted_to_snapshot?(vm, redis, snapshot = nil) + redis.smembers('vmpooler__tasks__snapshot-revert').any? do |action| + instance, sha = action.split(':') + instance == vm and (snapshot ? (sha == snapshot) : true) + end +end + +def pool_has_ready_vm?(pool, vm, redis) + !!redis.sismember("vmpooler__ready__#{pool}", vm) +end + +def create_ondemand_request_for_test(request_id, score, platforms_string, redis, user = nil, token = nil) + redis.zadd('vmpooler__provisioning__request', score, request_id) + redis.hset("vmpooler__odrequest__#{request_id}", 'requested', platforms_string) + redis.hset("vmpooler__odrequest__#{request_id}", 'token:token', token) if token + redis.hset("vmpooler__odrequest__#{request_id}", 'token:user', user) if user +end + +def set_ondemand_request_status(request_id, status, redis) + redis.hset("vmpooler__odrequest__#{request_id}", 'status', status) +end + +def create_ondemand_vm(vmname, request_id, pool, pool_alias, redis) + redis.sadd("vmpooler__#{request_id}__#{pool_alias}__#{pool}", vmname) +end + +def create_ondemand_creationtask(request_string, score, redis) + redis.zadd('vmpooler__odcreate__task', score, request_string) +end + +def create_ondemand_processing(request_id, score, redis) + redis.zadd('vmpooler__provisioning__processing', score, request_id) +end diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb new file mode 100644 index 0000000..9961050 --- /dev/null +++ b/spec/spec_helper.rb @@ -0,0 +1,19 @@ +# frozen_string_literal: true + +require 'simplecov' +SimpleCov.start do + add_filter '/spec/' +end +require 'helpers' +require 'rspec' +require 'vmpooler' +require 'redis' +require 'vmpooler/metrics' + +def project_root_dir + File.dirname(File.dirname(__FILE__)) +end + +def fixtures_dir + File.join(project_root_dir, 'spec', 'fixtures') +end diff --git a/spec/unit/providers/aws_spec.rb b/spec/unit/providers/aws_spec.rb new file mode 100644 index 0000000..1a44190 --- /dev/null +++ b/spec/unit/providers/aws_spec.rb @@ -0,0 +1,309 @@ +require 'spec_helper' +require 'mock_redis' +require 'ec2_helper' +require 'vmpooler/providers/aws' + +RSpec::Matchers.define :relocation_spec_with_host do |value| + match { |actual| actual[:spec].host == value } +end + +describe 'Vmpooler::PoolManager::Provider::Aws' do + let(:logger) { MockLogger.new } + let(:metrics) { Vmpooler::Metrics::DummyStatsd.new } + let(:poolname) { 'debian-9' } + let(:provider_options) { { 'param' => 'value' } } + let(:zone) { 'us-west-2b' } + let(:region) { 'us-west-2'} + let(:config) { YAML.load(<<~EOT + --- + :config: + max_tries: 3 + retry_factor: 10 + :providers: + :aws: + connection_pool_timeout: 1 + zone: '#{zone}' + region: '#{region}' + :pools: + - name: '#{poolname}' + alias: [ 'mockpool' ] + amisize: 'a1.large' + template: 'ami-03c1b544a7566b3e5' + size: 5 + timeout: 10 + ready_ttl: 1440 + provider: 'aws' + provision: true +EOT + ) + } + + let(:vmname) { 'vm17' } + let(:connection) { MockComputeServiceConnection.new } + let(:redis_connection_pool) do + Vmpooler::PoolManager::GenericConnectionPool.new( + metrics: metrics, + connpool_type: 'redis_connection_pool', + connpool_provider: 'testprovider', + size: 1, + timeout: 5 + ) { MockRedis.new } + end + + subject { Vmpooler::PoolManager::Provider::Aws.new(config, logger, metrics, redis_connection_pool, 'aws', provider_options) } + + describe '#manual tests live' do + context 'in itsysops' do + before(:each) { allow(subject).to receive(:dns).and_call_original } + let(:vmname) { "instance-46" } + let(:poolname) { "ubuntu-2004-arm64" } + skip 'gets a vm' do + + # result = subject.create_vm(poolname, vmname) + subject.provision_node_aws("ip-10-227-4-27.amz-dev.puppet.net", poolname) + # subject.create_snapshot(poolname, vmname, "foo") + #subject.create_disk(poolname, vmname, 10) + # a = subject.destroy_vm(poolname, vmname) + # b = subject.get_vm(poolname, vmname) + puts "done" + # subject.dns_teardown({'name' => vmname}) + # subject.dns_setup({'name' => vmname, 'ip' => '1.2.3.5'}) + end + end + end + + describe '#vms_in_pool' do + let(:pool_config) { config[:pools][0] } + + before(:each) do + allow(subject).to receive(:connect_to_aws).and_return(connection) + end + + context 'Given an empty pool folder' do + it 'should return an empty array' do + allow(connection).to receive(:instances).and_return([nil]) + result = subject.vms_in_pool(poolname) + + expect(result).to eq([]) + end + end + + context 'Given a pool with many VMs' do + let(:expected_vm_list) do + [ + { 'name' => 'vm1' }, + { 'name' => 'vm2' }, + { 'name' => 'vm3' } + ] + end + before(:each) do + instance_list = [] + expected_vm_list.each do |vm_hash| + tags = [MockTag.new(key: "vm_name", value: vm_hash['name'])] + mock_vm = MockInstance.new(tags: tags) + instance_list << mock_vm + end + + expect(connection).to receive(:instances).and_return(instance_list) + end + + it 'should list all VMs in the VM folder for the pool' do + result = subject.vms_in_pool(poolname) + + expect(result).to eq(expected_vm_list) + end + end + end + + describe '#get_vm' do + before(:each) do + allow(subject).to receive(:connect_to_aws).and_return(connection) + end + + context 'when VM does not exist' do + it 'should return nil' do + allow(connection).to receive(:instances).and_return([nil]) + expect(subject.get_vm(poolname, vmname)).to be_nil + end + end + + context 'when VM exists but is missing information' do + before(:each) do + tags = [MockTag.new(key: "vm_name", value: vmname)] + allow(connection).to receive(:instances).and_return([MockInstance.new(tags: tags)]) + end + + it 'should return a hash' do + expect(subject.get_vm(poolname, vmname)).to be_kind_of(Hash) + end + + it 'should return the VM name' do + result = subject.get_vm(poolname, vmname) + + expect(result['name']).to eq(vmname) + end + + %w[boottime image_size status private_ip_address].each do |testcase| + it "should return nil for #{testcase}" do + result = subject.get_vm(poolname, vmname) + + expect(result[testcase]).to be_nil + end + end + end + + context 'when VM exists and contains all information' do + let(:vm_hostname) { "#{vmname}.demo.local" } + let(:boot_time) { Time.now } + let(:vm_object) do + state = Struct.new(:name) + runningstate = state.new "running" + MockInstance.new( + launch_time: boot_time, + state: runningstate, + instance_type: "a1.large", + private_ip_address: "1.1.1.1", + tags: [ + MockTag.new(key: "vm_name", value: vmname), + MockTag.new(key: "pool", value: poolname) + ] + ) + end + let(:pool_info) { config[:pools][0] } + + before(:each) do + allow(connection).to receive(:instances).and_return([vm_object]) + end + + it 'should return a hash' do + expect(subject.get_vm(poolname, vmname)).to be_kind_of(Hash) + end + + it 'should return the VM name' do + result = subject.get_vm(poolname, vmname) + + expect(result['name']).to eq(vmname) + end + + it 'should return the template name' do + result = subject.get_vm(poolname, vmname) + + expect(result['template']).to eq(pool_info['template']) + end + + it 'should return the pool name' do + result = subject.get_vm(poolname, vmname) + + expect(result['poolname']).to eq(pool_info['name']) + end + + it 'should return the boot time' do + result = subject.get_vm(poolname, vmname) + + expect(result['boottime']).to eq(boot_time) + end + + it 'should return the status' do + result = subject.get_vm(poolname, vmname) + + expect(result['status']).to eq("running") + end + + it 'should return the status' do + result = subject.get_vm(poolname, vmname) + + expect(result['image_size']).to eq("a1.large") + end + end + end + + describe '#create_vm' do + before(:each) do + allow(subject).to receive(:connect_to_aws).and_return(connection) + end + + context 'Given an invalid pool name' do + it 'should raise an error' do + expect { subject.create_vm('missing_pool', vmname) }.to raise_error(/missing_pool does not exist/) + end + end + + context 'Given a vmname that already exists' do + before(:each) do + allow(subject).to receive(:get_vm).and_return({ + 'name' => "foobar", + 'template' => "abc", + 'status' => "running" + }) + end + + it 'should raise an error' do + expect { subject.create_vm(poolname, vmname) }.to raise_error(/Instance creation not attempted, .* already exists/) + end + end + + context 'Given a successful creation' do + let(:client) { double } + before(:each) do + allow(subject).to receive(:get_vm).and_return(nil,{ + 'name' => vmname, + 'template' => "abc", + 'status' => "running" + }) + result = Struct.new(:instance_id) + batch_instance = result.new(instance_id: "abcfoo") + allow(connection).to receive(:create_instances).and_return([batch_instance]) + allow(connection).to receive(:client).and_return(client) + allow(client).to receive(:wait_until) + end + + it 'should return a hash' do + result = subject.create_vm(poolname, vmname) + + expect(result.is_a?(Hash)).to be true + end + + it 'should have the new VM name' do + result = subject.create_vm(poolname, vmname) + + expect(result['name']).to eq(vmname) + end + end + end + + describe '#destroy_vm' do + before(:each) do + allow(subject).to receive(:connect_to_aws).and_return(connection) + end + + context 'Given a missing VM name' do + let(:client) { double } + before(:each) do + allow(connection).to receive(:instances).and_return([nil]) + allow(connection).to receive(:client).and_return(client) + allow(client).to receive(:wait_until) + end + + it 'should return true' do + expect(subject.destroy_vm(poolname, 'missing_vm')).to be true + end + end + + context 'Given a running VM' do + let(:instance) { double("instance") } + let(:client) { double } + before(:each) do + allow(connection).to receive(:instances).and_return([instance]) + expect(instance).to receive(:terminate) + allow(connection).to receive(:client).and_return(client) + allow(client).to receive(:wait_until) + allow(instance).to receive(:id) + end + + it 'should return true' do + expect(subject.destroy_vm(poolname, vmname)).to be true + end + end + end +end + diff --git a/spec/unit/providers/gce_spec.rb b/spec/unit/providers/gce_spec.rb new file mode 100644 index 0000000..cc43c0c --- /dev/null +++ b/spec/unit/providers/gce_spec.rb @@ -0,0 +1,767 @@ +require 'spec_helper' +require 'mock_redis' +require 'vmpooler/providers/gce' + +RSpec::Matchers.define :relocation_spec_with_host do |value| + match { |actual| actual[:spec].host == value } +end + +describe 'Vmpooler::PoolManager::Provider::Gce' do + let(:logger) { MockLogger.new } + let(:metrics) { Vmpooler::Metrics::DummyStatsd.new } + let(:poolname) { 'debian-9' } + let(:provider_options) { { 'param' => 'value' } } + let(:project) { 'vmpooler-test' } + let(:zone) { 'us-west1-b' } + let(:config) { YAML.load(<<~EOT + --- + :config: + max_tries: 3 + retry_factor: 10 + :providers: + :gce: + connection_pool_timeout: 1 + project: '#{project}' + zone: '#{zone}' + network_name: global/networks/default + :pools: + - name: '#{poolname}' + alias: [ 'mockpool' ] + template: 'projects/debian-cloud/global/images/family/debian-9' + size: 5 + timeout: 10 + ready_ttl: 1440 + provider: 'gce' + machine_type: 'zones/#{zone}/machineTypes/e2-micro' +EOT + ) + } + + let(:vmname) { 'vm17' } + let(:connection) { MockComputeServiceConnection.new } + let(:redis_connection_pool) do + Vmpooler::PoolManager::GenericConnectionPool.new( + metrics: metrics, + connpool_type: 'redis_connection_pool', + connpool_provider: 'testprovider', + size: 1, + timeout: 5 + ) { MockRedis.new } + end + + subject { Vmpooler::PoolManager::Provider::Gce.new(config, logger, metrics, redis_connection_pool, 'gce', provider_options) } + + before(:each) { allow(subject).to receive(:dns).and_return(MockDNS.new()) } + + describe '#name' do + it 'should be gce' do + expect(subject.name).to eq('gce') + end + end + + describe '#manual tests live' do + context 'in itsysops' do + before(:each) { allow(subject).to receive(:dns).and_call_original } + let(:vmname) { "instance-24" } + let(:project) { 'vmpooler-test' } + let(:config) { YAML.load(<<~EOT + --- + :config: + max_tries: 3 + retry_factor: 10 + :providers: + :gce: + connection_pool_timeout: 1 + project: '#{project}' + zone: '#{zone}' + network_name: 'projects/itsysopsnetworking/global/networks/shared1' + dns_zone_resource_name: 'test-vmpooler-puppet-net' + domain: 'test.vmpooler.puppet.net' + :pools: + - name: '#{poolname}' + alias: [ 'mockpool' ] + template: 'projects/debian-cloud/global/images/family/debian-9' + size: 5 + timeout: 10 + ready_ttl: 1440 + provider: 'gce' + subnetwork_name: 'projects/itsysopsnetworking/regions/us-west1/subnetworks/vmpooler-test' + machine_type: 'zones/#{zone}/machineTypes/e2-micro' +EOT + ) } + skip 'gets a vm' do + result = subject.create_vm(poolname, vmname) + #result = subject.destroy_vm(poolname, vmname) + subject.get_vm(poolname, vmname) + #subject.dns_teardown({'name' => vmname}) + # subject.dns_setup({'name' => vmname, 'ip' => '1.2.3.5'}) + end + end + end + + describe '#vms_in_pool' do + let(:pool_config) { config[:pools][0] } + + before(:each) do + allow(subject).to receive(:connect_to_gce).and_return(connection) + end + + context 'Given an empty pool folder' do + it 'should return an empty array' do + instance_list = MockInstanceList.new(items: nil) + allow(connection).to receive(:list_instances).and_return(instance_list) + result = subject.vms_in_pool(poolname) + + expect(result).to eq([]) + end + end + + context 'Given a pool folder with many VMs' do + let(:expected_vm_list) do + [ + { 'name' => 'vm1' }, + { 'name' => 'vm2' }, + { 'name' => 'vm3' } + ] + end + before(:each) do + instance_list = MockInstanceList.new(items: []) + expected_vm_list.each do |vm_hash| + mock_vm = MockInstance.new(name: vm_hash['name']) + instance_list.items << mock_vm + end + + expect(connection).to receive(:list_instances).and_return(instance_list) + end + + it 'should list all VMs in the VM folder for the pool' do + result = subject.vms_in_pool(poolname) + + expect(result).to eq(expected_vm_list) + end + end + end + + describe '#get_vm' do + before(:each) do + allow(subject).to receive(:connect_to_gce).and_return(connection) + end + + context 'when VM does not exist' do + it 'should return nil' do + allow(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found")) + expect(subject.get_vm(poolname, vmname)).to be_nil + end + end + + context 'when VM exists but is missing information' do + before(:each) do + allow(connection).to receive(:get_instance).and_return(MockInstance.new(name: vmname)) + end + + it 'should return a hash' do + expect(subject.get_vm(poolname, vmname)).to be_kind_of(Hash) + end + + it 'should return the VM name' do + result = subject.get_vm(poolname, vmname) + + expect(result['name']).to eq(vmname) + end + + %w[hostname boottime zone status].each do |testcase| + it "should return nil for #{testcase}" do + result = subject.get_vm(poolname, vmname) + + expect(result[testcase]).to be_nil + end + end + end + + context 'when VM exists and contains all information' do + let(:vm_hostname) { "#{vmname}.demo.local" } + let(:boot_time) { Time.now } + let(:vm_object) do + MockInstance.new( + name: vmname, + hostname: vm_hostname, + labels: { 'pool' => poolname }, + creation_timestamp: boot_time, + status: 'RUNNING', + zone: zone, + machine_type: "zones/#{zone}/machineTypes/e2-micro" + ) + end + let(:pool_info) { config[:pools][0] } + + before(:each) do + allow(connection).to receive(:get_instance).and_return(vm_object) + end + + it 'should return a hash' do + expect(subject.get_vm(poolname, vmname)).to be_kind_of(Hash) + end + + it 'should return the VM name' do + result = subject.get_vm(poolname, vmname) + + expect(result['name']).to eq(vmname) + end + + it 'should return the VM hostname' do + result = subject.get_vm(poolname, vmname) + + expect(result['hostname']).to eq(vm_hostname) + end + + it 'should return the template name' do + result = subject.get_vm(poolname, vmname) + + expect(result['template']).to eq(pool_info['template']) + end + + it 'should return the pool name' do + result = subject.get_vm(poolname, vmname) + + expect(result['poolname']).to eq(pool_info['name']) + end + + it 'should return the boot time' do + result = subject.get_vm(poolname, vmname) + + expect(result['boottime']).to eq(boot_time) + end + end + end + + describe '#create_vm' do + before(:each) do + allow(subject).to receive(:connect_to_gce).and_return(connection) + end + + context 'Given an invalid pool name' do + it 'should raise an error' do + expect { subject.create_vm('missing_pool', vmname) }.to raise_error(/missing_pool does not exist/) + end + end + + context 'Given a template VM that does not exist' do + before(:each) do + config[:pools][0]['template'] = 'Templates/missing_template' + # result = MockResult.new + # result.status = "PENDING" + # errors = MockOperationError + # errors << MockOperationErrorError.new(code: "foo", message: "it's missing") + # result.error = errors + allow(connection).to receive(:insert_instance).and_raise(create_google_client_error(404, 'The resource \'Templates/missing_template\' was not found')) + end + + it 'should raise an error' do + expect { subject.create_vm(poolname, vmname) }.to raise_error(Google::Apis::ClientError) + end + end + + context 'Given a successful creation' do + before(:each) do + result = MockResult.new + result.status = 'DONE' + allow(connection).to receive(:insert_instance).and_return(result) + end + + it 'should return a hash' do + allow(connection).to receive(:get_instance).and_return(MockInstance.new) + result = subject.create_vm(poolname, vmname) + + expect(result.is_a?(Hash)).to be true + end + + it 'should have the new VM name' do + instance = MockInstance.new(name: vmname) + allow(connection).to receive(:get_instance).and_return(instance) + result = subject.create_vm(poolname, vmname) + + expect(result['name']).to eq(vmname) + end + end + end + + describe '#destroy_vm' do + before(:each) do + allow(subject).to receive(:connect_to_gce).and_return(connection) + end + + context 'Given a missing VM name' do + before(:each) do + allow(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found")) + disk_list = MockDiskList.new(items: nil) + allow(connection).to receive(:list_disks).and_return(disk_list) + allow(subject).to receive(:find_all_snapshots).and_return(nil) + end + + it 'should return true' do + expect(connection.should_receive(:delete_instance).never) + expect(subject.destroy_vm(poolname, 'missing_vm')).to be true + end + end + + context 'Given a running VM' do + before(:each) do + instance = MockInstance.new(name: vmname) + allow(connection).to receive(:get_instance).and_return(instance) + result = MockResult.new + result.status = 'DONE' + allow(subject).to receive(:wait_for_operation).and_return(result) + allow(connection).to receive(:delete_instance).and_return(result) + end + + it 'should return true' do + # no dangling disks + disk_list = MockDiskList.new(items: nil) + allow(connection).to receive(:list_disks).and_return(disk_list) + # no dangling snapshots + allow(subject).to receive(:find_all_snapshots).and_return(nil) + expect(subject.destroy_vm(poolname, vmname)).to be true + end + + it 'should delete any dangling disks' do + disk = MockDisk.new(name: vmname) + disk_list = MockDiskList.new(items: [disk]) + allow(connection).to receive(:list_disks).and_return(disk_list) + # no dangling snapshots + allow(subject).to receive(:find_all_snapshots).and_return(nil) + expect(connection).to receive(:delete_disk).with(project, zone, disk.name) + subject.destroy_vm(poolname, vmname) + end + + it 'should delete any dangling snapshots' do + # no dangling disks + disk_list = MockDiskList.new(items: nil) + allow(connection).to receive(:list_disks).and_return(disk_list) + snapshot = MockSnapshot.new(name: "snapshotname-#{vmname}") + allow(subject).to receive(:find_all_snapshots).and_return([snapshot]) + expect(connection).to receive(:delete_snapshot).with(project, snapshot.name) + subject.destroy_vm(poolname, vmname) + end + end + end + + describe '#vm_ready?' do + let(:domain) { nil } + context 'When a VM is ready' do + before(:each) do + expect(subject).to receive(:open_socket).with(vmname, domain) + end + + it 'should return true' do + expect(subject.vm_ready?(poolname, vmname)).to be true + end + end + + context 'When an error occurs connecting to the VM' do + before(:each) do + expect(subject).to receive(:open_socket).and_raise(RuntimeError, 'MockError') + end + + it 'should return false' do + expect(subject.vm_ready?(poolname, vmname)).to be false + end + end + end + + describe '#create_disk' do + let(:disk_size) { 10 } + before(:each) do + allow(subject).to receive(:connect_to_gce).and_return(connection) + end + + context 'Given an invalid pool name' do + it 'should raise an error' do + expect { subject.create_disk('missing_pool', vmname, disk_size) }.to raise_error(/missing_pool does not exist/) + end + end + + context 'when VM does not exist' do + before(:each) do + expect(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found")) + end + + it 'should raise an error' do + expect { subject.create_disk(poolname, vmname, disk_size) }.to raise_error(/VM #{vmname} .+ does not exist/) + end + end + + context 'when adding the disk raises an error' do + before(:each) do + disk = MockDisk.new(name: vmname) + instance = MockInstance.new(name: vmname, disks: [disk]) + allow(connection).to receive(:get_instance).and_return(instance) + expect(connection).to receive(:insert_disk).and_raise(RuntimeError, 'Mock Disk Error') + end + + it 'should raise an error' do + expect { subject.create_disk(poolname, vmname, disk_size) }.to raise_error(/Mock Disk Error/) + end + end + + context 'when adding the disk succeeds' do + before(:each) do + disk = MockDisk.new(name: vmname) + instance = MockInstance.new(name: vmname, disks: [disk]) + allow(connection).to receive(:get_instance).and_return(instance) + result = MockResult.new + result.status = 'DONE' + allow(connection).to receive(:insert_disk).and_return(result) + allow(subject).to receive(:wait_for_operation).and_return(result) + new_disk = MockDisk.new(name: "#{vmname}-disk1", self_link: "/foo/bar/baz/#{vmname}-disk1") + allow(connection).to receive(:get_disk).and_return(new_disk) + allow(connection).to receive(:attach_disk).and_return(result) + end + + it 'should return true' do + expect(subject.create_disk(poolname, vmname, disk_size)).to be true + end + end + end + + describe '#create_snapshot' do + let(:snapshot_name) { 'snapshot' } + + before(:each) do + allow(subject).to receive(:connect_to_gce).and_return(connection) + end + + context 'when VM does not exist' do + before(:each) do + allow(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found")) + end + + it 'should raise an error' do + expect { subject.create_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/VM #{vmname} .+ does not exist/) + end + end + + context 'when snapshot already exists' do + it 'should raise an error' do + disk = MockDisk.new(name: vmname) + instance = MockInstance.new(name: vmname, disks: [disk]) + allow(connection).to receive(:get_instance).and_return(instance) + snapshots = [MockSnapshot.new(name: snapshot_name)] + allow(subject).to receive(:find_snapshot).and_return(snapshots) + expect { subject.create_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/Snapshot #{snapshot_name} .+ already exists /) + end + end + + context 'when snapshot raises an error' do + before(:each) do + attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}") + instance = MockInstance.new(name: vmname, disks: [attached_disk]) + allow(connection).to receive(:get_instance).and_return(instance) + snapshots = nil + allow(subject).to receive(:find_snapshot).and_return(snapshots) + allow(connection).to receive(:create_disk_snapshot).and_raise(RuntimeError, 'Mock Snapshot Error') + end + + it 'should raise an error' do + expect { subject.create_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/Mock Snapshot Error/) + end + end + + context 'when snapshot succeeds' do + before(:each) do + attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}") + instance = MockInstance.new(name: vmname, disks: [attached_disk]) + allow(connection).to receive(:get_instance).and_return(instance) + snapshots = nil + allow(subject).to receive(:find_snapshot).and_return(snapshots) + result = MockResult.new + result.status = 'DONE' + allow(connection).to receive(:create_disk_snapshot).and_return(result) + end + + it 'should return true' do + expect(subject.create_snapshot(poolname, vmname, snapshot_name)).to be true + end + + it 'should snapshot each attached disk' do + attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}") + attached_disk2 = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}-disk1") + instance = MockInstance.new(name: vmname, disks: [attached_disk, attached_disk2]) + allow(connection).to receive(:get_instance).and_return(instance) + + expect(connection.should_receive(:create_disk_snapshot).twice) + subject.create_snapshot(poolname, vmname, snapshot_name) + end + end + end + + describe '#revert_snapshot' do + let(:snapshot_name) { 'snapshot' } + + before(:each) do + allow(subject).to receive(:connect_to_gce).and_return(connection) + end + + context 'when VM does not exist' do + before(:each) do + allow(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found")) + end + + it 'should raise an error' do + expect { subject.revert_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/VM #{vmname} .+ does not exist/) + end + end + + context 'when snapshot does not exist' do + it 'should raise an error' do + attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}") + instance = MockInstance.new(name: vmname, disks: [attached_disk]) + allow(connection).to receive(:get_instance).and_return(instance) + snapshots = nil + allow(subject).to receive(:find_snapshot).and_return(snapshots) + expect { subject.revert_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/Snapshot #{snapshot_name} .+ does not exist /) + end + end + + context 'when instance does not have attached disks' do + it 'should skip detaching/deleting disk' do + instance = MockInstance.new(name: vmname, disks: nil) + allow(connection).to receive(:get_instance).and_return(instance) + snapshots = [] + allow(subject).to receive(:find_snapshot).and_return(snapshots) + allow(connection).to receive(:stop_instance) + allow(subject).to receive(:wait_for_operation) + allow(connection).to receive(:start_instance) + expect(subject).not_to receive(:detach_disk) + expect(subject).not_to receive(:delete_disk) + subject.revert_snapshot(poolname, vmname, snapshot_name) + end + end + + context 'when revert to snapshot raises an error' do + before(:each) do + attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}") + instance = MockInstance.new(name: vmname, disks: [attached_disk]) + allow(connection).to receive(:get_instance).and_return(instance) + snapshots = [MockSnapshot.new(name: snapshot_name)] + allow(subject).to receive(:find_snapshot).and_return(snapshots) + allow(connection).to receive(:stop_instance) + allow(subject).to receive(:wait_for_operation) + expect(connection).to receive(:detach_disk).and_raise(RuntimeError, 'Mock Snapshot Error') + end + + it 'should raise an error' do + expect { subject.revert_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/Mock Snapshot Error/) + end + end + + context 'when revert to snapshot succeeds' do + before(:each) do + attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}") + instance = MockInstance.new(name: vmname, disks: [attached_disk]) + allow(connection).to receive(:get_instance).and_return(instance) + snapshots = [MockSnapshot.new(name: snapshot_name, self_link: "foo/bar/baz/snapshot/#{snapshot_name}", labels: { 'diskname' => vmname })] + allow(subject).to receive(:find_snapshot).and_return(snapshots) + allow(connection).to receive(:stop_instance) + allow(subject).to receive(:wait_for_operation) + allow(connection).to receive(:detach_disk) + allow(connection).to receive(:delete_disk) + new_disk = MockDisk.new(name: vmname, self_link: "foo/bar/baz/disk/#{vmname}") + allow(connection).to receive(:insert_disk) + allow(connection).to receive(:get_disk).and_return(new_disk) + allow(connection).to receive(:attach_disk) + allow(connection).to receive(:start_instance) + end + + it 'should return true' do + expect(subject.revert_snapshot(poolname, vmname, snapshot_name)).to be true + end + end + end + + describe '#purge_unconfigured_resources' do + let(:empty_list) { [] } + + before(:each) do + allow(subject).to receive(:connect_to_gce).and_return(connection) + end + + context 'with empty allowlist' do + before(:each) do + allow(subject).to receive(:wait_for_zone_operation) + end + it 'should attempt to delete unconfigured instances when they dont have a label' do + instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo')]) + disk_list = MockDiskList.new(items: nil) + snapshot_list = MockSnapshotList.new(items: nil) + # the instance_list is filtered in the real code, and should only return non-configured VMs based on labels + # that do not match a real pool name + allow(connection).to receive(:list_instances).and_return(instance_list) + allow(connection).to receive(:list_disks).and_return(disk_list) + allow(connection).to receive(:list_snapshots).and_return(snapshot_list) + expect(connection).to receive(:delete_instance) + subject.purge_unconfigured_resources(nil) + end + it 'should attempt to delete unconfigured instances when they have a label that is not a configured pool' do + instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'foobar' })]) + disk_list = MockDiskList.new(items: nil) + snapshot_list = MockSnapshotList.new(items: nil) + allow(connection).to receive(:list_instances).and_return(instance_list) + allow(connection).to receive(:list_disks).and_return(disk_list) + allow(connection).to receive(:list_snapshots).and_return(snapshot_list) + expect(connection).to receive(:delete_instance) + subject.purge_unconfigured_resources(nil) + end + it 'should attempt to delete unconfigured disks and snapshots when they do not have a label' do + instance_list = MockInstanceList.new(items: nil) + disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo')]) + snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo')]) + allow(connection).to receive(:list_instances).and_return(instance_list) + allow(connection).to receive(:list_disks).and_return(disk_list) + allow(connection).to receive(:list_snapshots).and_return(snapshot_list) + expect(connection).to receive(:delete_disk) + expect(connection).to receive(:delete_snapshot) + subject.purge_unconfigured_resources(nil) + end + end + + context 'with allowlist containing a pool name' do + before(:each) do + allow(subject).to receive(:wait_for_zone_operation) + $allowlist = ['allowed'] + end + it 'should attempt to delete unconfigured instances when they dont have the allowlist label' do + instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'not_this' })]) + disk_list = MockDiskList.new(items: nil) + snapshot_list = MockSnapshotList.new(items: nil) + allow(connection).to receive(:list_instances).and_return(instance_list) + allow(connection).to receive(:list_disks).and_return(disk_list) + allow(connection).to receive(:list_snapshots).and_return(snapshot_list) + expect(connection).to receive(:delete_instance) + subject.purge_unconfigured_resources($allowlist) + end + it 'should ignore unconfigured instances when they have a label that is allowed' do + instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'allowed' })]) + disk_list = MockDiskList.new(items: nil) + snapshot_list = MockSnapshotList.new(items: nil) + allow(connection).to receive(:list_instances).and_return(instance_list) + allow(connection).to receive(:list_disks).and_return(disk_list) + allow(connection).to receive(:list_snapshots).and_return(snapshot_list) + expect(connection).not_to receive(:delete_instance) + subject.purge_unconfigured_resources($allowlist) + end + it 'should ignore unconfigured disks and snapshots when they have a label that is allowed' do + instance_list = MockInstanceList.new(items: nil) + disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'pool' => 'allowed' })]) + snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo', labels: { 'pool' => 'allowed' })]) + allow(connection).to receive(:list_instances).and_return(instance_list) + allow(connection).to receive(:list_disks).and_return(disk_list) + allow(connection).to receive(:list_snapshots).and_return(snapshot_list) + expect(connection).not_to receive(:delete_disk) + expect(connection).not_to receive(:delete_snapshot) + subject.purge_unconfigured_resources($allowlist) + end + it 'should ignore unconfigured item when they have the empty label that is allowed, which means we allow the pool label to not be set' do + $allowlist = ['allowed', ''] + instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'some' => 'not_important' })]) + disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'other' => 'thing' })]) + snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo')]) + allow(connection).to receive(:list_instances).and_return(instance_list) + allow(connection).to receive(:list_disks).and_return(disk_list) + allow(connection).to receive(:list_snapshots).and_return(snapshot_list) + expect(connection).not_to receive(:delete_instance) + expect(connection).not_to receive(:delete_disk) + expect(connection).not_to receive(:delete_snapshot) + subject.purge_unconfigured_resources($allowlist) + end + end + + context 'with allowlist containing a pool name and the empty string' do + before(:each) do + allow(subject).to receive(:wait_for_zone_operation) + $allowlist = ['allowed', ''] + end + it 'should attempt to delete unconfigured instances when they dont have the allowlist label' do + instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'not_this' })]) + disk_list = MockDiskList.new(items: nil) + snapshot_list = MockSnapshotList.new(items: nil) + allow(connection).to receive(:list_instances).and_return(instance_list) + allow(connection).to receive(:list_disks).and_return(disk_list) + allow(connection).to receive(:list_snapshots).and_return(snapshot_list) + expect(connection).to receive(:delete_instance) + subject.purge_unconfigured_resources($allowlist) + end + it 'should ignore unconfigured disks and snapshots when they have a label that is allowed' do + instance_list = MockInstanceList.new(items: nil) + disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'pool' => 'allowed' })]) + snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo', labels: { 'pool' => 'allowed' })]) + allow(connection).to receive(:list_instances).and_return(instance_list) + allow(connection).to receive(:list_disks).and_return(disk_list) + allow(connection).to receive(:list_snapshots).and_return(snapshot_list) + expect(connection).not_to receive(:delete_disk) + expect(connection).not_to receive(:delete_snapshot) + subject.purge_unconfigured_resources($allowlist) + end + it 'should ignore unconfigured item when they have the empty label that is allowed, which means we allow the pool label to not be set' do + instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'some' => 'not_important' })]) + disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'other' => 'thing' })]) + snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo')]) + allow(connection).to receive(:list_instances).and_return(instance_list) + allow(connection).to receive(:list_disks).and_return(disk_list) + allow(connection).to receive(:list_snapshots).and_return(snapshot_list) + expect(connection).not_to receive(:delete_instance) + expect(connection).not_to receive(:delete_disk) + expect(connection).not_to receive(:delete_snapshot) + subject.purge_unconfigured_resources($allowlist) + end + end + + context 'with allowlist containing a a fully qualified label that is not pool' do + before(:each) do + allow(subject).to receive(:wait_for_zone_operation) + $allowlist = ['user=Bob'] + end + it 'should attempt to delete unconfigured instances when they dont have the allowlist label' do + instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'not_this' })]) + disk_list = MockDiskList.new(items: nil) + snapshot_list = MockSnapshotList.new(items: nil) + allow(connection).to receive(:list_instances).and_return(instance_list) + allow(connection).to receive(:list_disks).and_return(disk_list) + allow(connection).to receive(:list_snapshots).and_return(snapshot_list) + expect(connection).to receive(:delete_instance) + subject.purge_unconfigured_resources($allowlist) + end + it 'should ignore unconfigured item when they match the fully qualified label' do + instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'some' => 'not_important', 'user' => 'bob' })]) + disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'other' => 'thing', 'user' => 'bob' })]) + snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo', labels: { 'user' => 'bob' })]) + allow(connection).to receive(:list_instances).and_return(instance_list) + allow(connection).to receive(:list_disks).and_return(disk_list) + allow(connection).to receive(:list_snapshots).and_return(snapshot_list) + expect(connection).not_to receive(:delete_instance) + expect(connection).not_to receive(:delete_disk) + expect(connection).not_to receive(:delete_snapshot) + subject.purge_unconfigured_resources($allowlist) + end + end + + it 'should raise any errors' do + expect(subject).to receive(:provided_pools).and_throw('mockerror') + expect { subject.purge_unconfigured_resources(nil) }.to raise_error(/mockerror/) + end + end + + describe '#get_current_user' do + it 'should downcase and replace invalid chars with dashes' do + redis_connection_pool.with_metrics do |redis| + redis.hset("vmpooler__vm__#{vmname}", 'token:user', 'BOBBY.PUPPET') + expect(subject.get_current_user(vmname)).to eq('bobby-puppet') + end + end + + it 'returns "" for nil values' do + redis_connection_pool.with_metrics do |_redis| + expect(subject.get_current_user(vmname)).to eq('') + end + end + end +end diff --git a/spec/vmpooler-provider-aws/vmpooler_provider_aws_spec.rb b/spec/vmpooler-provider-aws/vmpooler_provider_aws_spec.rb new file mode 100644 index 0000000..9ccdba9 --- /dev/null +++ b/spec/vmpooler-provider-aws/vmpooler_provider_aws_spec.rb @@ -0,0 +1,9 @@ +require 'rspec' + +describe 'VmpoolerProviderAws' do + context 'when creating class ' do + it 'sets a version' do + expect(VmpoolerProviderAws::VERSION).not_to be_nil + end + end +end \ No newline at end of file diff --git a/vmpooler-provider-aws.gemspec b/vmpooler-provider-aws.gemspec new file mode 100644 index 0000000..0b9e1e5 --- /dev/null +++ b/vmpooler-provider-aws.gemspec @@ -0,0 +1,33 @@ +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) +require 'vmpooler-provider-aws/version' + +Gem::Specification.new do |s| + s.name = 'vmpooler-provider-aws' + s.version = VmpoolerProviderAws::VERSION + s.authors = ['Puppet'] + s.email = ['support@puppet.com'] + + s.summary = 'AWS provider for VMPooler' + s.homepage = 'https://github.com/puppetlabs/vmpooler-provider-aws' + s.license = 'Apache-2.0' + s.required_ruby_version = Gem::Requirement.new('>= 2.3.0') + + s.files = Dir[ "lib/**/*" ] + s.require_paths = ["lib"] + s.add_dependency 'aws-sdk-ec2', '~> 1' + s.add_dependency 'net-ssh', '~> 6.2.0.rc2' + + s.add_development_dependency 'vmpooler', '>= 1.3.0', '~> 2.3' + + # Testing dependencies + s.add_development_dependency 'climate_control', '>= 0.2.0' + s.add_development_dependency 'mock_redis', '>= 0.17.0' + s.add_development_dependency 'pry' + s.add_development_dependency 'rack-test', '>= 0.6' + s.add_development_dependency 'rspec', '>= 3.2' + s.add_development_dependency 'rubocop', '~> 1.1.0' + s.add_development_dependency 'simplecov', '>= 0.11.2' + s.add_development_dependency 'thor', '~> 1.0', '>= 1.0.1' + s.add_development_dependency 'yarjuf', '>= 2.0' +end diff --git a/vmpooler.yaml.example b/vmpooler.yaml.example new file mode 100644 index 0000000..3560c59 --- /dev/null +++ b/vmpooler.yaml.example @@ -0,0 +1,165 @@ +--- +:providers: +# :providers: +# +# This section contains the VM providers for VMs and Pools +# The currently supported backing services are: +# - vsphere +# - dummy +# - gce +# - aws +# +# - provider_class +# For multiple providers, specify one of the supported backing services (vsphere or dummy or gce or aws) +# (optional: will default to it's parent :key: name eg. 'aws') +# +# - purge_unconfigured_resources +# Enable purging of VMs, disks and snapshots +# By default will purge resources in the project without a "pool" label, or a "pool" label with the value for an unconfigured pool +# An optional allowlist can be provided to ignore purging certain VMs based on pool labels +# Setting this on the provider will enable purging for the provider +# Expects a boolean value +# (optional; default: false) +# +# - resources_allowlist +# For GCE: Specify labels that should be ignored when purging VMs. For example if a VM's label is +# set to 'pool' with value 'donotdelete' and there is no pool with that name configured, it would normally be purged, +# unless you add a resources_allowlist "donotdelete" in which case it is ignored and not purged. +# Additionally the "" (empty string) has a special meaning whereas VMs that do not have the "pool" label are not purged. +# Additionally if you want to ignore VM's with an arbitrary label, include it in the allow list as a string with the separator "=" +# between the label name and value eg user=bob would ignore VMs that include the label "user" with the value "bob" +# If any one of the above condition is met, the resource is ignored and not purged +# This option is only evaluated when 'purge_unconfigured_resources' is enabled +# Expects an array of strings specifying the allowlisted labels by name. The strings should be all lower case, since +# no uppercase char is allowed in a label +# (optional; default: nil) +# +# If you want to support more than one provider with different parameters you have to specify the +# backing service in the provider_class configuration parameter for example 'vsphere' or 'dummy'. Each pool can specify +# the provider to use. +# +# Multiple providers example: + + :aws1: + provider_class: 'aws' + zone: 'us-west-2b' + region: 'us-west' + :aws2: + provider_class: 'aws' + zone: 'us-west-2b' + region: 'us-west' + resources_allowlist: + - "user=bob" + - "" + - "custom-pool" + +# :aws: +# +# This section contains the global variables for the aws provider +# some of them can be overwritten at the pool level +# +# Available configuration parameters: +# +# - region +# The AWS region name to use when creating/deleting resources +# (required) +# - zone +# The AWS zone name to use when creating/deleting resources (vms, disks etc) +# Can be overwritten at the pool level +# (required) +# - amisize +# The AMI machine type to use eg a1.large +# Can be overwritten at the pool level +# (required) +# - volume_size +# A custom root volume size to use in GB, the default is whatever the default for the AMI used is +# (optional) +# - dns_zone_resource_name +# The name given to the DNS zone ressource. This is not the domain, but the name identifier of a zone eg example-com +# (optional) when not set, the dns setup / teardown is skipped +# - domain +# Overwrites the global domain parameter. This should match the dns zone domain set for the dns_zone_resource_name. +# It is used to infer the domain part of the FQDN ie $vm_name.$domain +# When setting multiple providers at the same time, this value should be set for each GCE pools. +# (optional) If not explicitely set, the FQDN is inferred using the global 'domain' config parameter +# Example: + + :aws: + region: 'us-west' + zone: 'us-west-2b' + amisize: 'a1.small' + volume_size: '10' + dns_zone_resource_name: 'subdomain-example-com' + domain: 'subdomain.example.com' + +# :pools: +# +# This section contains a list of virtual machine 'pools' for vmpooler to +# create and maintain. +# +# Available configuration parameters (per-pool): +# +# - name +# The name of the pool. +# (required) +# +# - alias +# Other names this pool can be requested as. +# (optional) +# +# - template +# The template or virtual machine target to spawn clones from, in AWS this means an AMI id. +# (required) +# +# - size +# The number of waiting VMs to keep in a pool. +# (required) +# +# - provider +# The name of the VM provider which manage this pool. This should match +# a name in the :providers: section above e.g. vsphere +# (required; will default to vsphere for backwards compatibility) +# If you have more than one provider, this is where you would choose which +# one to use for this pool +# +# - timeout +# How long (in minutes) before marking a clone in 'pending' queues as 'failed' and retrying. +# This setting overrides any globally-configured timeout setting. +# (optional; default: '15') +# +# - ready_ttl +# How long (in minutes) to keep VMs in 'ready' queues before destroying. +# (optional; default: no limit) +# +# - check_loop_delay_min (optional; default: 5) seconds +# - check_loop_delay_max (optional; default: same as check_loop_delay_min) seconds +# - check_loop_delay_decay (optional; default: 2.0) Must be greater than 1.0 +# See the :config: section for information about these settings +# +# Provider specific pool settings +# +# AWS provider +# - zone +# The zone to create the VMs in +# (optional: default is global provider zone value) +# - amisize +# The AMI machine type to use eg a1.large +# (optional: default is global provider amisize value) +# - volume_size +# (optional: default is global provider amisize value) +# - provision +# Set to true to run the extra aws setup steps (SSH, keys etc) once the VM is available +# (optional: default to false) +# Example: + +:pools: + - name: 'almalinux-x86_64' + alias: [ 'almalinux-64', 'almalinux-amd64' ] + template: 'ami-foobar1234' + size: 5 + timeout: 15 + ready_ttl: 1440 + provider: aws + zone: 'us-new-zone' + amisize: 'a1.large' + volume_size: '20' From bd1b21736ab29fff6c05014ae139081a553b568b Mon Sep 17 00:00:00 2001 From: Samuel Beaulieu Date: Wed, 6 Jul 2022 13:46:27 -0500 Subject: [PATCH 2/4] fix workflow release for gha --- .github/workflows/release.yml | 4 +- spec/unit/providers/gce_spec.rb | 767 -------------------------------- 2 files changed, 2 insertions(+), 769 deletions(-) delete mode 100644 spec/unit/providers/gce_spec.rb diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e71ec28..7490f41 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,13 +5,13 @@ on: workflow_dispatch jobs: release: runs-on: ubuntu-latest - if: github.repository == 'puppetlabs/vmpooler-provider-gce' + if: github.repository == 'puppetlabs/vmpooler-provider-aws' steps: - uses: actions/checkout@v2 - name: Get Version id: gv run: | - echo "::set-output name=ver::$(grep VERSION lib/vmpooler-provider-gce/version.rb |rev |cut -d "'" -f2 |rev)" + echo "::set-output name=ver::$(grep VERSION lib/vmpooler-provider-aws/version.rb |rev |cut -d "'" -f2 |rev)" - name: Tag Release uses: ncipollo/release-action@v1 with: diff --git a/spec/unit/providers/gce_spec.rb b/spec/unit/providers/gce_spec.rb deleted file mode 100644 index cc43c0c..0000000 --- a/spec/unit/providers/gce_spec.rb +++ /dev/null @@ -1,767 +0,0 @@ -require 'spec_helper' -require 'mock_redis' -require 'vmpooler/providers/gce' - -RSpec::Matchers.define :relocation_spec_with_host do |value| - match { |actual| actual[:spec].host == value } -end - -describe 'Vmpooler::PoolManager::Provider::Gce' do - let(:logger) { MockLogger.new } - let(:metrics) { Vmpooler::Metrics::DummyStatsd.new } - let(:poolname) { 'debian-9' } - let(:provider_options) { { 'param' => 'value' } } - let(:project) { 'vmpooler-test' } - let(:zone) { 'us-west1-b' } - let(:config) { YAML.load(<<~EOT - --- - :config: - max_tries: 3 - retry_factor: 10 - :providers: - :gce: - connection_pool_timeout: 1 - project: '#{project}' - zone: '#{zone}' - network_name: global/networks/default - :pools: - - name: '#{poolname}' - alias: [ 'mockpool' ] - template: 'projects/debian-cloud/global/images/family/debian-9' - size: 5 - timeout: 10 - ready_ttl: 1440 - provider: 'gce' - machine_type: 'zones/#{zone}/machineTypes/e2-micro' -EOT - ) - } - - let(:vmname) { 'vm17' } - let(:connection) { MockComputeServiceConnection.new } - let(:redis_connection_pool) do - Vmpooler::PoolManager::GenericConnectionPool.new( - metrics: metrics, - connpool_type: 'redis_connection_pool', - connpool_provider: 'testprovider', - size: 1, - timeout: 5 - ) { MockRedis.new } - end - - subject { Vmpooler::PoolManager::Provider::Gce.new(config, logger, metrics, redis_connection_pool, 'gce', provider_options) } - - before(:each) { allow(subject).to receive(:dns).and_return(MockDNS.new()) } - - describe '#name' do - it 'should be gce' do - expect(subject.name).to eq('gce') - end - end - - describe '#manual tests live' do - context 'in itsysops' do - before(:each) { allow(subject).to receive(:dns).and_call_original } - let(:vmname) { "instance-24" } - let(:project) { 'vmpooler-test' } - let(:config) { YAML.load(<<~EOT - --- - :config: - max_tries: 3 - retry_factor: 10 - :providers: - :gce: - connection_pool_timeout: 1 - project: '#{project}' - zone: '#{zone}' - network_name: 'projects/itsysopsnetworking/global/networks/shared1' - dns_zone_resource_name: 'test-vmpooler-puppet-net' - domain: 'test.vmpooler.puppet.net' - :pools: - - name: '#{poolname}' - alias: [ 'mockpool' ] - template: 'projects/debian-cloud/global/images/family/debian-9' - size: 5 - timeout: 10 - ready_ttl: 1440 - provider: 'gce' - subnetwork_name: 'projects/itsysopsnetworking/regions/us-west1/subnetworks/vmpooler-test' - machine_type: 'zones/#{zone}/machineTypes/e2-micro' -EOT - ) } - skip 'gets a vm' do - result = subject.create_vm(poolname, vmname) - #result = subject.destroy_vm(poolname, vmname) - subject.get_vm(poolname, vmname) - #subject.dns_teardown({'name' => vmname}) - # subject.dns_setup({'name' => vmname, 'ip' => '1.2.3.5'}) - end - end - end - - describe '#vms_in_pool' do - let(:pool_config) { config[:pools][0] } - - before(:each) do - allow(subject).to receive(:connect_to_gce).and_return(connection) - end - - context 'Given an empty pool folder' do - it 'should return an empty array' do - instance_list = MockInstanceList.new(items: nil) - allow(connection).to receive(:list_instances).and_return(instance_list) - result = subject.vms_in_pool(poolname) - - expect(result).to eq([]) - end - end - - context 'Given a pool folder with many VMs' do - let(:expected_vm_list) do - [ - { 'name' => 'vm1' }, - { 'name' => 'vm2' }, - { 'name' => 'vm3' } - ] - end - before(:each) do - instance_list = MockInstanceList.new(items: []) - expected_vm_list.each do |vm_hash| - mock_vm = MockInstance.new(name: vm_hash['name']) - instance_list.items << mock_vm - end - - expect(connection).to receive(:list_instances).and_return(instance_list) - end - - it 'should list all VMs in the VM folder for the pool' do - result = subject.vms_in_pool(poolname) - - expect(result).to eq(expected_vm_list) - end - end - end - - describe '#get_vm' do - before(:each) do - allow(subject).to receive(:connect_to_gce).and_return(connection) - end - - context 'when VM does not exist' do - it 'should return nil' do - allow(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found")) - expect(subject.get_vm(poolname, vmname)).to be_nil - end - end - - context 'when VM exists but is missing information' do - before(:each) do - allow(connection).to receive(:get_instance).and_return(MockInstance.new(name: vmname)) - end - - it 'should return a hash' do - expect(subject.get_vm(poolname, vmname)).to be_kind_of(Hash) - end - - it 'should return the VM name' do - result = subject.get_vm(poolname, vmname) - - expect(result['name']).to eq(vmname) - end - - %w[hostname boottime zone status].each do |testcase| - it "should return nil for #{testcase}" do - result = subject.get_vm(poolname, vmname) - - expect(result[testcase]).to be_nil - end - end - end - - context 'when VM exists and contains all information' do - let(:vm_hostname) { "#{vmname}.demo.local" } - let(:boot_time) { Time.now } - let(:vm_object) do - MockInstance.new( - name: vmname, - hostname: vm_hostname, - labels: { 'pool' => poolname }, - creation_timestamp: boot_time, - status: 'RUNNING', - zone: zone, - machine_type: "zones/#{zone}/machineTypes/e2-micro" - ) - end - let(:pool_info) { config[:pools][0] } - - before(:each) do - allow(connection).to receive(:get_instance).and_return(vm_object) - end - - it 'should return a hash' do - expect(subject.get_vm(poolname, vmname)).to be_kind_of(Hash) - end - - it 'should return the VM name' do - result = subject.get_vm(poolname, vmname) - - expect(result['name']).to eq(vmname) - end - - it 'should return the VM hostname' do - result = subject.get_vm(poolname, vmname) - - expect(result['hostname']).to eq(vm_hostname) - end - - it 'should return the template name' do - result = subject.get_vm(poolname, vmname) - - expect(result['template']).to eq(pool_info['template']) - end - - it 'should return the pool name' do - result = subject.get_vm(poolname, vmname) - - expect(result['poolname']).to eq(pool_info['name']) - end - - it 'should return the boot time' do - result = subject.get_vm(poolname, vmname) - - expect(result['boottime']).to eq(boot_time) - end - end - end - - describe '#create_vm' do - before(:each) do - allow(subject).to receive(:connect_to_gce).and_return(connection) - end - - context 'Given an invalid pool name' do - it 'should raise an error' do - expect { subject.create_vm('missing_pool', vmname) }.to raise_error(/missing_pool does not exist/) - end - end - - context 'Given a template VM that does not exist' do - before(:each) do - config[:pools][0]['template'] = 'Templates/missing_template' - # result = MockResult.new - # result.status = "PENDING" - # errors = MockOperationError - # errors << MockOperationErrorError.new(code: "foo", message: "it's missing") - # result.error = errors - allow(connection).to receive(:insert_instance).and_raise(create_google_client_error(404, 'The resource \'Templates/missing_template\' was not found')) - end - - it 'should raise an error' do - expect { subject.create_vm(poolname, vmname) }.to raise_error(Google::Apis::ClientError) - end - end - - context 'Given a successful creation' do - before(:each) do - result = MockResult.new - result.status = 'DONE' - allow(connection).to receive(:insert_instance).and_return(result) - end - - it 'should return a hash' do - allow(connection).to receive(:get_instance).and_return(MockInstance.new) - result = subject.create_vm(poolname, vmname) - - expect(result.is_a?(Hash)).to be true - end - - it 'should have the new VM name' do - instance = MockInstance.new(name: vmname) - allow(connection).to receive(:get_instance).and_return(instance) - result = subject.create_vm(poolname, vmname) - - expect(result['name']).to eq(vmname) - end - end - end - - describe '#destroy_vm' do - before(:each) do - allow(subject).to receive(:connect_to_gce).and_return(connection) - end - - context 'Given a missing VM name' do - before(:each) do - allow(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found")) - disk_list = MockDiskList.new(items: nil) - allow(connection).to receive(:list_disks).and_return(disk_list) - allow(subject).to receive(:find_all_snapshots).and_return(nil) - end - - it 'should return true' do - expect(connection.should_receive(:delete_instance).never) - expect(subject.destroy_vm(poolname, 'missing_vm')).to be true - end - end - - context 'Given a running VM' do - before(:each) do - instance = MockInstance.new(name: vmname) - allow(connection).to receive(:get_instance).and_return(instance) - result = MockResult.new - result.status = 'DONE' - allow(subject).to receive(:wait_for_operation).and_return(result) - allow(connection).to receive(:delete_instance).and_return(result) - end - - it 'should return true' do - # no dangling disks - disk_list = MockDiskList.new(items: nil) - allow(connection).to receive(:list_disks).and_return(disk_list) - # no dangling snapshots - allow(subject).to receive(:find_all_snapshots).and_return(nil) - expect(subject.destroy_vm(poolname, vmname)).to be true - end - - it 'should delete any dangling disks' do - disk = MockDisk.new(name: vmname) - disk_list = MockDiskList.new(items: [disk]) - allow(connection).to receive(:list_disks).and_return(disk_list) - # no dangling snapshots - allow(subject).to receive(:find_all_snapshots).and_return(nil) - expect(connection).to receive(:delete_disk).with(project, zone, disk.name) - subject.destroy_vm(poolname, vmname) - end - - it 'should delete any dangling snapshots' do - # no dangling disks - disk_list = MockDiskList.new(items: nil) - allow(connection).to receive(:list_disks).and_return(disk_list) - snapshot = MockSnapshot.new(name: "snapshotname-#{vmname}") - allow(subject).to receive(:find_all_snapshots).and_return([snapshot]) - expect(connection).to receive(:delete_snapshot).with(project, snapshot.name) - subject.destroy_vm(poolname, vmname) - end - end - end - - describe '#vm_ready?' do - let(:domain) { nil } - context 'When a VM is ready' do - before(:each) do - expect(subject).to receive(:open_socket).with(vmname, domain) - end - - it 'should return true' do - expect(subject.vm_ready?(poolname, vmname)).to be true - end - end - - context 'When an error occurs connecting to the VM' do - before(:each) do - expect(subject).to receive(:open_socket).and_raise(RuntimeError, 'MockError') - end - - it 'should return false' do - expect(subject.vm_ready?(poolname, vmname)).to be false - end - end - end - - describe '#create_disk' do - let(:disk_size) { 10 } - before(:each) do - allow(subject).to receive(:connect_to_gce).and_return(connection) - end - - context 'Given an invalid pool name' do - it 'should raise an error' do - expect { subject.create_disk('missing_pool', vmname, disk_size) }.to raise_error(/missing_pool does not exist/) - end - end - - context 'when VM does not exist' do - before(:each) do - expect(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found")) - end - - it 'should raise an error' do - expect { subject.create_disk(poolname, vmname, disk_size) }.to raise_error(/VM #{vmname} .+ does not exist/) - end - end - - context 'when adding the disk raises an error' do - before(:each) do - disk = MockDisk.new(name: vmname) - instance = MockInstance.new(name: vmname, disks: [disk]) - allow(connection).to receive(:get_instance).and_return(instance) - expect(connection).to receive(:insert_disk).and_raise(RuntimeError, 'Mock Disk Error') - end - - it 'should raise an error' do - expect { subject.create_disk(poolname, vmname, disk_size) }.to raise_error(/Mock Disk Error/) - end - end - - context 'when adding the disk succeeds' do - before(:each) do - disk = MockDisk.new(name: vmname) - instance = MockInstance.new(name: vmname, disks: [disk]) - allow(connection).to receive(:get_instance).and_return(instance) - result = MockResult.new - result.status = 'DONE' - allow(connection).to receive(:insert_disk).and_return(result) - allow(subject).to receive(:wait_for_operation).and_return(result) - new_disk = MockDisk.new(name: "#{vmname}-disk1", self_link: "/foo/bar/baz/#{vmname}-disk1") - allow(connection).to receive(:get_disk).and_return(new_disk) - allow(connection).to receive(:attach_disk).and_return(result) - end - - it 'should return true' do - expect(subject.create_disk(poolname, vmname, disk_size)).to be true - end - end - end - - describe '#create_snapshot' do - let(:snapshot_name) { 'snapshot' } - - before(:each) do - allow(subject).to receive(:connect_to_gce).and_return(connection) - end - - context 'when VM does not exist' do - before(:each) do - allow(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found")) - end - - it 'should raise an error' do - expect { subject.create_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/VM #{vmname} .+ does not exist/) - end - end - - context 'when snapshot already exists' do - it 'should raise an error' do - disk = MockDisk.new(name: vmname) - instance = MockInstance.new(name: vmname, disks: [disk]) - allow(connection).to receive(:get_instance).and_return(instance) - snapshots = [MockSnapshot.new(name: snapshot_name)] - allow(subject).to receive(:find_snapshot).and_return(snapshots) - expect { subject.create_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/Snapshot #{snapshot_name} .+ already exists /) - end - end - - context 'when snapshot raises an error' do - before(:each) do - attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}") - instance = MockInstance.new(name: vmname, disks: [attached_disk]) - allow(connection).to receive(:get_instance).and_return(instance) - snapshots = nil - allow(subject).to receive(:find_snapshot).and_return(snapshots) - allow(connection).to receive(:create_disk_snapshot).and_raise(RuntimeError, 'Mock Snapshot Error') - end - - it 'should raise an error' do - expect { subject.create_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/Mock Snapshot Error/) - end - end - - context 'when snapshot succeeds' do - before(:each) do - attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}") - instance = MockInstance.new(name: vmname, disks: [attached_disk]) - allow(connection).to receive(:get_instance).and_return(instance) - snapshots = nil - allow(subject).to receive(:find_snapshot).and_return(snapshots) - result = MockResult.new - result.status = 'DONE' - allow(connection).to receive(:create_disk_snapshot).and_return(result) - end - - it 'should return true' do - expect(subject.create_snapshot(poolname, vmname, snapshot_name)).to be true - end - - it 'should snapshot each attached disk' do - attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}") - attached_disk2 = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}-disk1") - instance = MockInstance.new(name: vmname, disks: [attached_disk, attached_disk2]) - allow(connection).to receive(:get_instance).and_return(instance) - - expect(connection.should_receive(:create_disk_snapshot).twice) - subject.create_snapshot(poolname, vmname, snapshot_name) - end - end - end - - describe '#revert_snapshot' do - let(:snapshot_name) { 'snapshot' } - - before(:each) do - allow(subject).to receive(:connect_to_gce).and_return(connection) - end - - context 'when VM does not exist' do - before(:each) do - allow(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found")) - end - - it 'should raise an error' do - expect { subject.revert_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/VM #{vmname} .+ does not exist/) - end - end - - context 'when snapshot does not exist' do - it 'should raise an error' do - attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}") - instance = MockInstance.new(name: vmname, disks: [attached_disk]) - allow(connection).to receive(:get_instance).and_return(instance) - snapshots = nil - allow(subject).to receive(:find_snapshot).and_return(snapshots) - expect { subject.revert_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/Snapshot #{snapshot_name} .+ does not exist /) - end - end - - context 'when instance does not have attached disks' do - it 'should skip detaching/deleting disk' do - instance = MockInstance.new(name: vmname, disks: nil) - allow(connection).to receive(:get_instance).and_return(instance) - snapshots = [] - allow(subject).to receive(:find_snapshot).and_return(snapshots) - allow(connection).to receive(:stop_instance) - allow(subject).to receive(:wait_for_operation) - allow(connection).to receive(:start_instance) - expect(subject).not_to receive(:detach_disk) - expect(subject).not_to receive(:delete_disk) - subject.revert_snapshot(poolname, vmname, snapshot_name) - end - end - - context 'when revert to snapshot raises an error' do - before(:each) do - attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}") - instance = MockInstance.new(name: vmname, disks: [attached_disk]) - allow(connection).to receive(:get_instance).and_return(instance) - snapshots = [MockSnapshot.new(name: snapshot_name)] - allow(subject).to receive(:find_snapshot).and_return(snapshots) - allow(connection).to receive(:stop_instance) - allow(subject).to receive(:wait_for_operation) - expect(connection).to receive(:detach_disk).and_raise(RuntimeError, 'Mock Snapshot Error') - end - - it 'should raise an error' do - expect { subject.revert_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/Mock Snapshot Error/) - end - end - - context 'when revert to snapshot succeeds' do - before(:each) do - attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}") - instance = MockInstance.new(name: vmname, disks: [attached_disk]) - allow(connection).to receive(:get_instance).and_return(instance) - snapshots = [MockSnapshot.new(name: snapshot_name, self_link: "foo/bar/baz/snapshot/#{snapshot_name}", labels: { 'diskname' => vmname })] - allow(subject).to receive(:find_snapshot).and_return(snapshots) - allow(connection).to receive(:stop_instance) - allow(subject).to receive(:wait_for_operation) - allow(connection).to receive(:detach_disk) - allow(connection).to receive(:delete_disk) - new_disk = MockDisk.new(name: vmname, self_link: "foo/bar/baz/disk/#{vmname}") - allow(connection).to receive(:insert_disk) - allow(connection).to receive(:get_disk).and_return(new_disk) - allow(connection).to receive(:attach_disk) - allow(connection).to receive(:start_instance) - end - - it 'should return true' do - expect(subject.revert_snapshot(poolname, vmname, snapshot_name)).to be true - end - end - end - - describe '#purge_unconfigured_resources' do - let(:empty_list) { [] } - - before(:each) do - allow(subject).to receive(:connect_to_gce).and_return(connection) - end - - context 'with empty allowlist' do - before(:each) do - allow(subject).to receive(:wait_for_zone_operation) - end - it 'should attempt to delete unconfigured instances when they dont have a label' do - instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo')]) - disk_list = MockDiskList.new(items: nil) - snapshot_list = MockSnapshotList.new(items: nil) - # the instance_list is filtered in the real code, and should only return non-configured VMs based on labels - # that do not match a real pool name - allow(connection).to receive(:list_instances).and_return(instance_list) - allow(connection).to receive(:list_disks).and_return(disk_list) - allow(connection).to receive(:list_snapshots).and_return(snapshot_list) - expect(connection).to receive(:delete_instance) - subject.purge_unconfigured_resources(nil) - end - it 'should attempt to delete unconfigured instances when they have a label that is not a configured pool' do - instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'foobar' })]) - disk_list = MockDiskList.new(items: nil) - snapshot_list = MockSnapshotList.new(items: nil) - allow(connection).to receive(:list_instances).and_return(instance_list) - allow(connection).to receive(:list_disks).and_return(disk_list) - allow(connection).to receive(:list_snapshots).and_return(snapshot_list) - expect(connection).to receive(:delete_instance) - subject.purge_unconfigured_resources(nil) - end - it 'should attempt to delete unconfigured disks and snapshots when they do not have a label' do - instance_list = MockInstanceList.new(items: nil) - disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo')]) - snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo')]) - allow(connection).to receive(:list_instances).and_return(instance_list) - allow(connection).to receive(:list_disks).and_return(disk_list) - allow(connection).to receive(:list_snapshots).and_return(snapshot_list) - expect(connection).to receive(:delete_disk) - expect(connection).to receive(:delete_snapshot) - subject.purge_unconfigured_resources(nil) - end - end - - context 'with allowlist containing a pool name' do - before(:each) do - allow(subject).to receive(:wait_for_zone_operation) - $allowlist = ['allowed'] - end - it 'should attempt to delete unconfigured instances when they dont have the allowlist label' do - instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'not_this' })]) - disk_list = MockDiskList.new(items: nil) - snapshot_list = MockSnapshotList.new(items: nil) - allow(connection).to receive(:list_instances).and_return(instance_list) - allow(connection).to receive(:list_disks).and_return(disk_list) - allow(connection).to receive(:list_snapshots).and_return(snapshot_list) - expect(connection).to receive(:delete_instance) - subject.purge_unconfigured_resources($allowlist) - end - it 'should ignore unconfigured instances when they have a label that is allowed' do - instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'allowed' })]) - disk_list = MockDiskList.new(items: nil) - snapshot_list = MockSnapshotList.new(items: nil) - allow(connection).to receive(:list_instances).and_return(instance_list) - allow(connection).to receive(:list_disks).and_return(disk_list) - allow(connection).to receive(:list_snapshots).and_return(snapshot_list) - expect(connection).not_to receive(:delete_instance) - subject.purge_unconfigured_resources($allowlist) - end - it 'should ignore unconfigured disks and snapshots when they have a label that is allowed' do - instance_list = MockInstanceList.new(items: nil) - disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'pool' => 'allowed' })]) - snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo', labels: { 'pool' => 'allowed' })]) - allow(connection).to receive(:list_instances).and_return(instance_list) - allow(connection).to receive(:list_disks).and_return(disk_list) - allow(connection).to receive(:list_snapshots).and_return(snapshot_list) - expect(connection).not_to receive(:delete_disk) - expect(connection).not_to receive(:delete_snapshot) - subject.purge_unconfigured_resources($allowlist) - end - it 'should ignore unconfigured item when they have the empty label that is allowed, which means we allow the pool label to not be set' do - $allowlist = ['allowed', ''] - instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'some' => 'not_important' })]) - disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'other' => 'thing' })]) - snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo')]) - allow(connection).to receive(:list_instances).and_return(instance_list) - allow(connection).to receive(:list_disks).and_return(disk_list) - allow(connection).to receive(:list_snapshots).and_return(snapshot_list) - expect(connection).not_to receive(:delete_instance) - expect(connection).not_to receive(:delete_disk) - expect(connection).not_to receive(:delete_snapshot) - subject.purge_unconfigured_resources($allowlist) - end - end - - context 'with allowlist containing a pool name and the empty string' do - before(:each) do - allow(subject).to receive(:wait_for_zone_operation) - $allowlist = ['allowed', ''] - end - it 'should attempt to delete unconfigured instances when they dont have the allowlist label' do - instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'not_this' })]) - disk_list = MockDiskList.new(items: nil) - snapshot_list = MockSnapshotList.new(items: nil) - allow(connection).to receive(:list_instances).and_return(instance_list) - allow(connection).to receive(:list_disks).and_return(disk_list) - allow(connection).to receive(:list_snapshots).and_return(snapshot_list) - expect(connection).to receive(:delete_instance) - subject.purge_unconfigured_resources($allowlist) - end - it 'should ignore unconfigured disks and snapshots when they have a label that is allowed' do - instance_list = MockInstanceList.new(items: nil) - disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'pool' => 'allowed' })]) - snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo', labels: { 'pool' => 'allowed' })]) - allow(connection).to receive(:list_instances).and_return(instance_list) - allow(connection).to receive(:list_disks).and_return(disk_list) - allow(connection).to receive(:list_snapshots).and_return(snapshot_list) - expect(connection).not_to receive(:delete_disk) - expect(connection).not_to receive(:delete_snapshot) - subject.purge_unconfigured_resources($allowlist) - end - it 'should ignore unconfigured item when they have the empty label that is allowed, which means we allow the pool label to not be set' do - instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'some' => 'not_important' })]) - disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'other' => 'thing' })]) - snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo')]) - allow(connection).to receive(:list_instances).and_return(instance_list) - allow(connection).to receive(:list_disks).and_return(disk_list) - allow(connection).to receive(:list_snapshots).and_return(snapshot_list) - expect(connection).not_to receive(:delete_instance) - expect(connection).not_to receive(:delete_disk) - expect(connection).not_to receive(:delete_snapshot) - subject.purge_unconfigured_resources($allowlist) - end - end - - context 'with allowlist containing a a fully qualified label that is not pool' do - before(:each) do - allow(subject).to receive(:wait_for_zone_operation) - $allowlist = ['user=Bob'] - end - it 'should attempt to delete unconfigured instances when they dont have the allowlist label' do - instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'not_this' })]) - disk_list = MockDiskList.new(items: nil) - snapshot_list = MockSnapshotList.new(items: nil) - allow(connection).to receive(:list_instances).and_return(instance_list) - allow(connection).to receive(:list_disks).and_return(disk_list) - allow(connection).to receive(:list_snapshots).and_return(snapshot_list) - expect(connection).to receive(:delete_instance) - subject.purge_unconfigured_resources($allowlist) - end - it 'should ignore unconfigured item when they match the fully qualified label' do - instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'some' => 'not_important', 'user' => 'bob' })]) - disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'other' => 'thing', 'user' => 'bob' })]) - snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo', labels: { 'user' => 'bob' })]) - allow(connection).to receive(:list_instances).and_return(instance_list) - allow(connection).to receive(:list_disks).and_return(disk_list) - allow(connection).to receive(:list_snapshots).and_return(snapshot_list) - expect(connection).not_to receive(:delete_instance) - expect(connection).not_to receive(:delete_disk) - expect(connection).not_to receive(:delete_snapshot) - subject.purge_unconfigured_resources($allowlist) - end - end - - it 'should raise any errors' do - expect(subject).to receive(:provided_pools).and_throw('mockerror') - expect { subject.purge_unconfigured_resources(nil) }.to raise_error(/mockerror/) - end - end - - describe '#get_current_user' do - it 'should downcase and replace invalid chars with dashes' do - redis_connection_pool.with_metrics do |redis| - redis.hset("vmpooler__vm__#{vmname}", 'token:user', 'BOBBY.PUPPET') - expect(subject.get_current_user(vmname)).to eq('bobby-puppet') - end - end - - it 'returns "" for nil values' do - redis_connection_pool.with_metrics do |_redis| - expect(subject.get_current_user(vmname)).to eq('') - end - end - end -end From ee36ee868d5a08121a5ed2b02591d9802d35a01f Mon Sep 17 00:00:00 2001 From: Samuel Beaulieu Date: Wed, 6 Jul 2022 14:34:12 -0500 Subject: [PATCH 3/4] fix rubocop offenses --- .rubocop.yml | 2 + lib/vmpooler/aws_setup.rb | 45 ++++----- lib/vmpooler/providers/aws.rb | 167 +++++++++++++++----------------- spec/ec2_helper.rb | 2 +- spec/unit/providers/aws_spec.rb | 6 +- 5 files changed, 109 insertions(+), 113 deletions(-) diff --git a/.rubocop.yml b/.rubocop.yml index 3333234..af4d2a2 100644 --- a/.rubocop.yml +++ b/.rubocop.yml @@ -50,4 +50,6 @@ Metrics/ParameterLists: Layout/LineLength: Enabled: false Metrics/BlockLength: + Enabled: false +Style/CaseLikeIf: Enabled: false \ No newline at end of file diff --git a/lib/vmpooler/aws_setup.rb b/lib/vmpooler/aws_setup.rb index bd8f7f4..94a9b99 100644 --- a/lib/vmpooler/aws_setup.rb +++ b/lib/vmpooler/aws_setup.rb @@ -1,15 +1,17 @@ +# frozen_string_literal: true + require 'net/ssh' -# This class connects to existing running VMs via NET:SSH -# it uses a local key to do so and then setup SSHD on the hosts to enable -# dev and CI users to connect. module Vmpooler class PoolManager + # This class connects to existing running VMs via NET:SSH + # it uses a local key to do so and then setup SSHD on the hosts to enable + # dev and CI users to connect. class AwsSetup - ROOT_KEYS_SCRIPT = ENV["ROOT_KEYS_SCRIPT"] + ROOT_KEYS_SCRIPT = ENV['ROOT_KEYS_SCRIPT'] ROOT_KEYS_SYNC_CMD = "curl -k -o - -L #{ROOT_KEYS_SCRIPT} | %s" def self.setup_node_by_ssh(host, platform) - @key_file = ENV["KEY_FILE_LOCATION"] || '/app/abs/.ssh/abs-aws-ec2.rsa' + @key_file = ENV['KEY_FILE_LOCATION'] || '/app/abs/.ssh/abs-aws-ec2.rsa' conn = check_ssh_accepting_connections(host, platform) configure_host(host, platform, conn) end @@ -30,15 +32,14 @@ module Vmpooler def self.get_user(platform) if platform =~ /centos/ - user = 'centos' + 'centos' elsif platform =~ /ubuntu/ - user = 'ubuntu' + 'ubuntu' elsif platform =~ /debian/ - user = 'root' + 'root' else - user = 'ec2-user' + 'ec2-user' end - user end def self.check_ssh_accepting_connections(host, platform) @@ -46,10 +47,9 @@ module Vmpooler begin user = get_user(platform) netssh_jruby_workaround - conn = Net::SSH.start(host, user, :keys => @key_file, :timeout => 10) - return conn - rescue Net::SSH::ConnectionTimeout, Errno::ECONNREFUSED => err - puts "Requested instances do not have sshd ready yet, try again: #{err}" + Net::SSH.start(host, user, keys: @key_file, timeout: 10) + rescue Net::SSH::ConnectionTimeout, Errno::ECONNREFUSED => e + puts "Requested instances do not have sshd ready yet, try again: #{e}" sleep 1 retry if (retries += 1) < 300 end @@ -73,6 +73,7 @@ module Vmpooler ssh.open_channel do |channel| channel.request_pty do |ch, success| raise "can't get pty request" unless success + if platform =~ /centos|el-|redhat|fedora|eos|amazon/ ch.exec('sudo -E /sbin/service sshd reload') elsif platform =~ /debian|ubuntu|cumulus/ @@ -87,13 +88,13 @@ module Vmpooler ssh.loop end - def self.sync_root_keys(host, platform) - unless ROOT_KEYS_SCRIPT.nil? - user = "root" - netssh_jruby_workaround - Net::SSH.start(host, user, :keys => @key_file) do |ssh| - ssh.exec!(ROOT_KEYS_SYNC_CMD % "env PATH=\"/usr/gnu/bin:$PATH\" bash") - end + def self.sync_root_keys(host, _platform) + return if ROOT_KEYS_SCRIPT.nil? + + user = 'root' + netssh_jruby_workaround + Net::SSH.start(host, user, keys: @key_file) do |ssh| + ssh.exec!(ROOT_KEYS_SYNC_CMD % 'env PATH="/usr/gnu/bin:$PATH" bash') end end @@ -101,7 +102,7 @@ module Vmpooler # https://github.com/jruby/jruby-openssl/issues/105 # this will turn off some algos that match /^ecd(sa|h)-sha2/ def self.netssh_jruby_workaround - Net::SSH::Transport::Algorithms::ALGORITHMS.values.each { |algs| algs.reject! { |a| a =~ /^ecd(sa|h)-sha2/ } } + Net::SSH::Transport::Algorithms::ALGORITHMS.each_value { |algs| algs.reject! { |a| a =~ /^ecd(sa|h)-sha2/ } } Net::SSH::KnownHosts::SUPPORTED_TYPE.reject! { |t| t =~ /^ecd(sa|h)-sha2/ } end end diff --git a/lib/vmpooler/providers/aws.rb b/lib/vmpooler/providers/aws.rb index 4d491e8..3a180bb 100644 --- a/lib/vmpooler/providers/aws.rb +++ b/lib/vmpooler/providers/aws.rb @@ -16,10 +16,10 @@ module Vmpooler def initialize(config, logger, metrics, redis_connection_pool, name, options) super(config, logger, metrics, redis_connection_pool, name, options) - + @aws_access_key = ENV['ABS_AWS_ACCESS_KEY'] - @aws_secret_key = ENV['ABS_AWS_SECRET_KEY'] - + @aws_secret_key = ENV['ABS_AWS_SECRET_KEY'] + task_limit = global_config[:config].nil? || global_config[:config]['task_limit'].nil? ? 10 : global_config[:config]['task_limit'].to_i # The default connection pool size is: # Whatever is biggest from: @@ -43,7 +43,7 @@ module Vmpooler # the object reference for the connection, which means it cannot "reconnect" by creating an entirely new connection # object. Instead by wrapping it in a Hash, the Hash object reference itself never changes but the content of the # Hash can change, and is preserved across invocations. - new_conn = #connect to aws + new_conn = connect_to_aws { connection: new_conn } end @redis = redis_connection_pool @@ -60,9 +60,7 @@ module Vmpooler end end - def dns - @dns - end + attr_reader :dns # main configuration options def region @@ -85,7 +83,7 @@ module Vmpooler return provider_config['volume_size'] if provider_config['volume_size'] end - #dns + # dns def domain provider_config['domain'] end @@ -94,13 +92,13 @@ module Vmpooler provider_config['dns_zone_resource_name'] end - #subnets + # subnets def get_subnet_id(pool_name) case zone(pool_name) when 'us-west-2b' - return 'subnet-0fe90a688844f6f26' + 'subnet-0fe90a688844f6f26' when 'us-west-2a' - return 'subnet-091b436f' + 'subnet-091b436f' end end @@ -125,17 +123,16 @@ module Vmpooler pool = pool_config(pool_name) raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil? - zone = zone(pool_name) filters = [{ - name: "tag:pool", - values: [pool_name], - }] + name: 'tag:pool', + values: [pool_name] + }] instance_list = connection.instances(filters: filters) return vms if instance_list.first.nil? instance_list.each do |vm| - vms << { 'name' => vm.tags.detect {|f| f.key == 'vm_name' }&.value || "vm_name not found in tags" } + vms << { 'name' => vm.tags.detect { |f| f.key == 'vm_name' }&.value || 'vm_name not found in tags' } end debug_logger(vms) vms @@ -159,8 +156,8 @@ module Vmpooler vm_hash = nil filters = [{ - name: "tag:vm_name", - values: [vm_name], + name: 'tag:vm_name', + values: [vm_name] }] instances = connection.instances(filters: filters).first return vm_hash if instances.nil? @@ -188,70 +185,66 @@ module Vmpooler raise("Instance creation not attempted, #{new_vmname} already exists") if get_vm(pool_name, new_vmname) subnet_id = get_subnet_id(pool_name) - tag = [ + tag = [ { - resource_type: "instance", # accepts capacity-reservation, client-vpn-endpoint, customer-gateway, carrier-gateway, dedicated-host, dhcp-options, egress-only-internet-gateway, elastic-ip, elastic-gpu, export-image-task, export-instance-task, fleet, fpga-image, host-reservation, image, import-image-task, import-snapshot-task, instance, instance-event-window, internet-gateway, ipam, ipam-pool, ipam-scope, ipv4pool-ec2, ipv6pool-ec2, key-pair, launch-template, local-gateway, local-gateway-route-table, local-gateway-virtual-interface, local-gateway-virtual-interface-group, local-gateway-route-table-vpc-association, local-gateway-route-table-virtual-interface-group-association, natgateway, network-acl, network-interface, network-insights-analysis, network-insights-path, network-insights-access-scope, network-insights-access-scope-analysis, placement-group, prefix-list, replace-root-volume-task, reserved-instances, route-table, security-group, security-group-rule, snapshot, spot-fleet-request, spot-instances-request, subnet, subnet-cidr-reservation, traffic-mirror-filter, traffic-mirror-session, traffic-mirror-target, transit-gateway, transit-gateway-attachment, transit-gateway-connect-peer, transit-gateway-multicast-domain, transit-gateway-route-table, volume, vpc, vpc-endpoint, vpc-endpoint-service, vpc-peering-connection, vpn-connection, vpn-gateway, vpc-flow-log + resource_type: 'instance', # accepts capacity-reservation, client-vpn-endpoint, customer-gateway, carrier-gateway, dedicated-host, dhcp-options, egress-only-internet-gateway, elastic-ip, elastic-gpu, export-image-task, export-instance-task, fleet, fpga-image, host-reservation, image, import-image-task, import-snapshot-task, instance, instance-event-window, internet-gateway, ipam, ipam-pool, ipam-scope, ipv4pool-ec2, ipv6pool-ec2, key-pair, launch-template, local-gateway, local-gateway-route-table, local-gateway-virtual-interface, local-gateway-virtual-interface-group, local-gateway-route-table-vpc-association, local-gateway-route-table-virtual-interface-group-association, natgateway, network-acl, network-interface, network-insights-analysis, network-insights-path, network-insights-access-scope, network-insights-access-scope-analysis, placement-group, prefix-list, replace-root-volume-task, reserved-instances, route-table, security-group, security-group-rule, snapshot, spot-fleet-request, spot-instances-request, subnet, subnet-cidr-reservation, traffic-mirror-filter, traffic-mirror-session, traffic-mirror-target, transit-gateway, transit-gateway-attachment, transit-gateway-connect-peer, transit-gateway-multicast-domain, transit-gateway-route-table, volume, vpc, vpc-endpoint, vpc-endpoint-service, vpc-peering-connection, vpn-connection, vpn-gateway, vpc-flow-log tags: [ { - key: "vm_name", - value: new_vmname, + key: 'vm_name', + value: new_vmname }, { - key: "pool", - value: pool_name, + key: 'pool', + value: pool_name }, { - key: "lifetime", - value: get_current_lifetime(new_vmname), + key: 'lifetime', + value: get_current_lifetime(new_vmname) }, { - key: "created_by", - value: get_current_user(new_vmname), + key: 'created_by', + value: get_current_user(new_vmname) }, { - key: "job_url", - value: get_current_job_url(new_vmname), + key: 'job_url', + value: get_current_job_url(new_vmname) }, { - key: "organization", - value: "engineering", + key: 'organization', + value: 'engineering' }, { - key: "portfolio", - value: "ds-ci", - }, + key: 'portfolio', + value: 'ds-ci' + } - ], - }, + ] + } ] config = { - min_count: 1, - max_count: 1, - image_id: pool['template'], - monitoring: {:enabled => true}, - key_name: 'always-be-scheduling', - security_group_ids: ['sg-697fb015'], - instance_type: amisize(pool_name), - disable_api_termination: false, - instance_initiated_shutdown_behavior: 'terminate', - tag_specifications: tag, - subnet_id: subnet_id + min_count: 1, + max_count: 1, + image_id: pool['template'], + monitoring: { enabled: true }, + key_name: 'always-be-scheduling', + security_group_ids: ['sg-697fb015'], + instance_type: amisize(pool_name), + disable_api_termination: false, + instance_initiated_shutdown_behavior: 'terminate', + tag_specifications: tag, + subnet_id: subnet_id } - - if volume_size(pool_name) - config[:block_device_mappings] = get_block_device_mappings(config['image_id'], volume_size(pool_name)) - end + + config[:block_device_mappings] = get_block_device_mappings(config['image_id'], volume_size(pool_name)) if volume_size(pool_name) debug_logger('trigger insert_instance') batch_instance = connection.create_instances(config) instance_id = batch_instance.first.instance_id - connection.client.wait_until(:instance_running, {instance_ids: [instance_id]}) + connection.client.wait_until(:instance_running, { instance_ids: [instance_id] }) created_instance = get_vm(pool_name, new_vmname) # extra setup steps - if to_provision(pool_name) == "true" || to_provision(pool_name) == true - provision_node_aws(created_instance['private_dns_name'], pool_name) - end + provision_node_aws(created_instance['private_dns_name'], pool_name) if to_provision(pool_name) == 'true' || to_provision(pool_name) == true created_instance end @@ -262,28 +255,26 @@ module Vmpooler def get_block_device_mappings(image_id, volume_size) ec2_client = connection.client - image = ec2_client.describe_images(:image_ids => [image_id]).images.first - raise RuntimeError, "Image not found: #{image_id}" if image.nil? + image = ec2_client.describe_images(image_ids: [image_id]).images.first + raise "Image not found: #{image_id}" if image.nil? + raise "#{image_id} does not have an ebs root device type" unless image.root_device_type == 'ebs' + # Transform the images block_device_mappings output into a format # ready for a create. block_device_mappings = [] - if image.root_device_type == "ebs" - orig_bdm = image.block_device_mappings - orig_bdm.each do |block_device| - block_device_mappings << { - :device_name => block_device.device_name, - :ebs => { - # Change the default size of the root volume. - :volume_size => volume_size, - # This is required to override the images default for - # delete_on_termination, forcing all volumes to be deleted once the - # instance is terminated. - :delete_on_termination => true - } + orig_bdm = image.block_device_mappings + orig_bdm.each do |block_device| + block_device_mappings << { + device_name: block_device.device_name, + ebs: { + # Change the default size of the root volume. + volume_size: volume_size, + # This is required to override the images default for + # delete_on_termination, forcing all volumes to be deleted once the + # instance is terminated. + delete_on_termination: true } - end - else - raise "#{image_id} does not have an ebs root device type" + } end block_device_mappings end @@ -346,14 +337,14 @@ module Vmpooler # [String] vm_name : Name of the existing VM # returns # [boolean] true : once the operations are finished - def destroy_vm(pool_name, vm_name) + def destroy_vm(_pool_name, vm_name) debug_logger('destroy_vm') deleted = false filters = [{ - name: "tag:vm_name", - values: [vm_name], - }] + name: 'tag:vm_name', + values: [vm_name] + }] instances = connection.instances(filters: filters).first return true if instances.nil? @@ -361,24 +352,24 @@ module Vmpooler # vm_hash = get_vm(pool_name, vm_name) instances.terminate begin - connection.client.wait_until(:instance_terminated, {instance_ids: [instances.id]}) + connection.client.wait_until(:instance_terminated, { instance_ids: [instances.id] }) deleted = true - rescue ::Aws::Waiters::Errors => error - debug_logger("failed waiting for instance terminated #{vm_name}: #{error}") + rescue ::Aws::Waiters::Errors => e + debug_logger("failed waiting for instance terminated #{vm_name}: #{e}") end - return deleted + deleted end # check if a vm is ready by opening a socket on port 22 # if a domain is set, it will use vn_name.domain, # if not then it will use the ip directly (AWS workaround) - def vm_ready?(_pool_name, vm_name) + def vm_ready?(pool_name, vm_name) begin # TODO: we could use a healthcheck resource attached to instance domain_set = domain || global_config[:config]['domain'] if domain_set.nil? - vm_ip = get_vm(_pool_name, vm_name)['private_ip_address'] + vm_ip = get_vm(pool_name, vm_name)['private_ip_address'] vm_name = vm_ip unless vm_ip.nil? end open_socket(vm_name, domain_set) @@ -454,13 +445,13 @@ module Vmpooler return nil if pool_configuration.nil? { - 'name' => vm_object.tags.detect {|f| f.key == 'vm_name' }&.value, - #'hostname' => vm_object.hostname, + 'name' => vm_object.tags.detect { |f| f.key == 'vm_name' }&.value, + # 'hostname' => vm_object.hostname, 'template' => pool_configuration&.key?('template') ? pool_configuration['template'] : nil, # was expecting to get it from API, not from config, but this is what vSphere does too! - 'poolname' => vm_object.tags.detect {|f| f.key == 'pool' }&.value, + 'poolname' => vm_object.tags.detect { |f| f.key == 'pool' }&.value, 'boottime' => vm_object.launch_time, 'status' => vm_object.state&.name, # One of the following values: pending, running, shutting-down, terminated, stopping, stopped - #'zone' => vm_object.zone, + # 'zone' => vm_object.zone, 'image_size' => vm_object.instance_type, 'private_ip_address' => vm_object.private_ip_address, 'private_dns_name' => vm_object.private_dns_name diff --git a/spec/ec2_helper.rb b/spec/ec2_helper.rb index 562eded..f81e655 100644 --- a/spec/ec2_helper.rb +++ b/spec/ec2_helper.rb @@ -19,7 +19,7 @@ MockOperationErrorError = Struct.new( MockInstance = Struct.new( # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/EC2/Instance.html - :instance_type, :launch_time, :private_ip_address, :state, :tags, :zone, + :instance_type, :launch_time, :private_ip_address, :state, :tags, :zone, :private_dns_name, keyword_init: true ) diff --git a/spec/unit/providers/aws_spec.rb b/spec/unit/providers/aws_spec.rb index 1a44190..a5d0bed 100644 --- a/spec/unit/providers/aws_spec.rb +++ b/spec/unit/providers/aws_spec.rb @@ -33,7 +33,6 @@ describe 'Vmpooler::PoolManager::Provider::Aws' do timeout: 10 ready_ttl: 1440 provider: 'aws' - provision: true EOT ) } @@ -54,7 +53,10 @@ EOT describe '#manual tests live' do context 'in itsysops' do - before(:each) { allow(subject).to receive(:dns).and_call_original } + before(:each) { + config['provision'] = "true" + allow(subject).to receive(:dns).and_call_original + } let(:vmname) { "instance-46" } let(:poolname) { "ubuntu-2004-arm64" } skip 'gets a vm' do From 65c797137ebd5e0286a2dcdd26fb5608e4de5062 Mon Sep 17 00:00:00 2001 From: Samuel Beaulieu Date: Thu, 7 Jul 2022 08:43:03 -0500 Subject: [PATCH 4/4] rename from AWS to EC2 for consistency The other cloud provider is GCE and not GCP. In the same way we are creating VMs in EC2 on AWS. --- .github/workflows/release.yml | 4 ++-- .gitignore | 1 + README.md | 2 +- .../version.rb | 2 +- lib/vmpooler/providers/{aws.rb => ec2.rb} | 4 ++-- spec/unit/providers/{aws_spec.rb => ec2_spec.rb} | 10 +++++----- .../vmpooler_provider_ec2_spec.rb} | 4 ++-- ...ovider-aws.gemspec => vmpooler-provider-ec2.gemspec | 10 +++++----- vmpooler.yaml.example | 2 +- 9 files changed, 20 insertions(+), 19 deletions(-) rename lib/{vmpooler-provider-aws => vmpooler-provider-ec2}/version.rb (67%) rename lib/vmpooler/providers/{aws.rb => ec2.rb} (99%) rename spec/unit/providers/{aws_spec.rb => ec2_spec.rb} (97%) rename spec/{vmpooler-provider-aws/vmpooler_provider_aws_spec.rb => vmpooler-provider-ec2/vmpooler_provider_ec2_spec.rb} (51%) rename vmpooler-provider-aws.gemspec => vmpooler-provider-ec2.gemspec (85%) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7490f41..17ec95b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,13 +5,13 @@ on: workflow_dispatch jobs: release: runs-on: ubuntu-latest - if: github.repository == 'puppetlabs/vmpooler-provider-aws' + if: github.repository == 'puppetlabs/vmpooler-provider-ec2' steps: - uses: actions/checkout@v2 - name: Get Version id: gv run: | - echo "::set-output name=ver::$(grep VERSION lib/vmpooler-provider-aws/version.rb |rev |cut -d "'" -f2 |rev)" + echo "::set-output name=ver::$(grep VERSION lib/vmpooler-provider-ec2/version.rb |rev |cut -d "'" -f2 |rev)" - name: Tag Release uses: ncipollo/release-action@v1 with: diff --git a/.gitignore b/.gitignore index 95e94de..c93a15b 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ results.xml /vmpooler.yaml .idea *.json +.secrets/ diff --git a/README.md b/README.md index 786f8eb..de7332b 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ aws authorization is handled via two required ENV vars When you add the pool config `provision: true` to a pool, the new VMs will also get initialized with extra steps to setup the sshd config via NET:SSH These steps expect two environment vars 1. ROOT_KEYS_SCRIPT: (optional) the URI location of a script (eg https in github) that will be run to setup keys. If not set, this will be skipped -2. KEY_FILE_LOCATION: (required) the location on local disk where the ssh key resides for VMPooler to connect via SSH to the AWS node +2. KEY_FILE_LOCATION: (required) the location on local disk where the ssh key resides for VMPooler to connect via SSH to the EC2 node ### DNS AWS will setup a private ip and private dns hostname for the VM once running. Optionally we can setup a human readable DNS entry to resolve the VMPooler provider `spicy-proton` fqdn diff --git a/lib/vmpooler-provider-aws/version.rb b/lib/vmpooler-provider-ec2/version.rb similarity index 67% rename from lib/vmpooler-provider-aws/version.rb rename to lib/vmpooler-provider-ec2/version.rb index a8e2ab1..54be4c7 100644 --- a/lib/vmpooler-provider-aws/version.rb +++ b/lib/vmpooler-provider-ec2/version.rb @@ -1,5 +1,5 @@ # frozen_string_literal: true -module VmpoolerProviderAws +module VmpoolerProviderEc2 VERSION = '0.0.1' end diff --git a/lib/vmpooler/providers/aws.rb b/lib/vmpooler/providers/ec2.rb similarity index 99% rename from lib/vmpooler/providers/aws.rb rename to lib/vmpooler/providers/ec2.rb index 3a180bb..e72c877 100644 --- a/lib/vmpooler/providers/aws.rb +++ b/lib/vmpooler/providers/ec2.rb @@ -10,7 +10,7 @@ module Vmpooler class PoolManager class Provider # This class represent a GCE provider to CRUD resources in a gce cloud. - class Aws < Vmpooler::PoolManager::Provider::Base + class Ec2 < Vmpooler::PoolManager::Provider::Base # The connection_pool method is normally used only for testing attr_reader :connection_pool @@ -51,7 +51,7 @@ module Vmpooler # name of the provider class def name - 'aws' + 'ec2' end def connection diff --git a/spec/unit/providers/aws_spec.rb b/spec/unit/providers/ec2_spec.rb similarity index 97% rename from spec/unit/providers/aws_spec.rb rename to spec/unit/providers/ec2_spec.rb index a5d0bed..f1d4850 100644 --- a/spec/unit/providers/aws_spec.rb +++ b/spec/unit/providers/ec2_spec.rb @@ -1,13 +1,13 @@ require 'spec_helper' require 'mock_redis' require 'ec2_helper' -require 'vmpooler/providers/aws' +require 'vmpooler/providers/ec2' RSpec::Matchers.define :relocation_spec_with_host do |value| match { |actual| actual[:spec].host == value } end -describe 'Vmpooler::PoolManager::Provider::Aws' do +describe 'Vmpooler::PoolManager::Provider::Ec2' do let(:logger) { MockLogger.new } let(:metrics) { Vmpooler::Metrics::DummyStatsd.new } let(:poolname) { 'debian-9' } @@ -20,7 +20,7 @@ describe 'Vmpooler::PoolManager::Provider::Aws' do max_tries: 3 retry_factor: 10 :providers: - :aws: + :ec2: connection_pool_timeout: 1 zone: '#{zone}' region: '#{region}' @@ -32,7 +32,7 @@ describe 'Vmpooler::PoolManager::Provider::Aws' do size: 5 timeout: 10 ready_ttl: 1440 - provider: 'aws' + provider: 'ec2' EOT ) } @@ -49,7 +49,7 @@ EOT ) { MockRedis.new } end - subject { Vmpooler::PoolManager::Provider::Aws.new(config, logger, metrics, redis_connection_pool, 'aws', provider_options) } + subject { Vmpooler::PoolManager::Provider::Ec2.new(config, logger, metrics, redis_connection_pool, 'ec2', provider_options) } describe '#manual tests live' do context 'in itsysops' do diff --git a/spec/vmpooler-provider-aws/vmpooler_provider_aws_spec.rb b/spec/vmpooler-provider-ec2/vmpooler_provider_ec2_spec.rb similarity index 51% rename from spec/vmpooler-provider-aws/vmpooler_provider_aws_spec.rb rename to spec/vmpooler-provider-ec2/vmpooler_provider_ec2_spec.rb index 9ccdba9..6c5e789 100644 --- a/spec/vmpooler-provider-aws/vmpooler_provider_aws_spec.rb +++ b/spec/vmpooler-provider-ec2/vmpooler_provider_ec2_spec.rb @@ -1,9 +1,9 @@ require 'rspec' -describe 'VmpoolerProviderAws' do +describe 'VmpoolerProviderEc2' do context 'when creating class ' do it 'sets a version' do - expect(VmpoolerProviderAws::VERSION).not_to be_nil + expect(VmpoolerProviderEc2::VERSION).not_to be_nil end end end \ No newline at end of file diff --git a/vmpooler-provider-aws.gemspec b/vmpooler-provider-ec2.gemspec similarity index 85% rename from vmpooler-provider-aws.gemspec rename to vmpooler-provider-ec2.gemspec index 0b9e1e5..a569143 100644 --- a/vmpooler-provider-aws.gemspec +++ b/vmpooler-provider-ec2.gemspec @@ -1,15 +1,15 @@ lib = File.expand_path('../lib', __FILE__) $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) -require 'vmpooler-provider-aws/version' +require 'vmpooler-provider-ec2/version' Gem::Specification.new do |s| - s.name = 'vmpooler-provider-aws' - s.version = VmpoolerProviderAws::VERSION + s.name = 'vmpooler-provider-ec2' + s.version = VmpoolerProviderEc2::VERSION s.authors = ['Puppet'] s.email = ['support@puppet.com'] - s.summary = 'AWS provider for VMPooler' - s.homepage = 'https://github.com/puppetlabs/vmpooler-provider-aws' + s.summary = 'EC2 provider for VMPooler' + s.homepage = 'https://github.com/puppetlabs/vmpooler-provider-ec2' s.license = 'Apache-2.0' s.required_ruby_version = Gem::Requirement.new('>= 2.3.0') diff --git a/vmpooler.yaml.example b/vmpooler.yaml.example index 3560c59..28b92a3 100644 --- a/vmpooler.yaml.example +++ b/vmpooler.yaml.example @@ -138,7 +138,7 @@ # # Provider specific pool settings # -# AWS provider +# EC2 provider # - zone # The zone to create the VMs in # (optional: default is global provider zone value)