mirror of
https://github.com/puppetlabs/vmpooler-provider-ec2.git
synced 2026-01-26 10:28:41 -05:00
Added aws dependency and renamed directories
This commit is contained in:
commit
77b340c225
20 changed files with 2432 additions and 0 deletions
8
.github/dependabot.yml
vendored
Normal file
8
.github/dependabot.yml
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: bundler
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: daily
|
||||||
|
time: "13:00"
|
||||||
|
open-pull-requests-limit: 10
|
||||||
37
.github/workflows/release.yml
vendored
Normal file
37
.github/workflows/release.yml
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
||||||
|
name: Release
|
||||||
|
|
||||||
|
on: workflow_dispatch
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository == 'puppetlabs/vmpooler-provider-gce'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Get Version
|
||||||
|
id: gv
|
||||||
|
run: |
|
||||||
|
echo "::set-output name=ver::$(grep VERSION lib/vmpooler-provider-gce/version.rb |rev |cut -d "'" -f2 |rev)"
|
||||||
|
- name: Tag Release
|
||||||
|
uses: ncipollo/release-action@v1
|
||||||
|
with:
|
||||||
|
tag: ${{ steps.gv.outputs.ver }}
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
draft: false
|
||||||
|
prerelease: false
|
||||||
|
generateReleaseNotes: true
|
||||||
|
- name: Install Ruby 2.5.8
|
||||||
|
uses: ruby/setup-ruby@v1
|
||||||
|
with:
|
||||||
|
ruby-version: '2.5.8'
|
||||||
|
- name: Build gem
|
||||||
|
run: gem build *.gemspec
|
||||||
|
- name: Publish gem
|
||||||
|
run: |
|
||||||
|
mkdir -p $HOME/.gem
|
||||||
|
touch $HOME/.gem/credentials
|
||||||
|
chmod 0600 $HOME/.gem/credentials
|
||||||
|
printf -- "---\n:rubygems_api_key: ${GEM_HOST_API_KEY}\n" > $HOME/.gem/credentials
|
||||||
|
gem push *.gem
|
||||||
|
env:
|
||||||
|
GEM_HOST_API_KEY: '${{ secrets.RUBYGEMS_AUTH_TOKEN }}'
|
||||||
47
.github/workflows/testing.yml
vendored
Normal file
47
.github/workflows/testing.yml
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
||||||
|
# This workflow uses actions that are not certified by GitHub.
|
||||||
|
# They are provided by a third-party and are governed by
|
||||||
|
# separate terms of service, privacy policy, and support
|
||||||
|
# documentation.
|
||||||
|
# This workflow will download a prebuilt Ruby version, install dependencies and run tests with Rake
|
||||||
|
# For more information see: https://github.com/marketplace/actions/setup-ruby-jruby-and-truffleruby
|
||||||
|
|
||||||
|
name: Testing
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
rubocop:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
ruby-version:
|
||||||
|
- '2.5.8'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Set up Ruby
|
||||||
|
uses: ruby/setup-ruby@v1
|
||||||
|
with:
|
||||||
|
ruby-version: ${{ matrix.ruby-version }}
|
||||||
|
bundler-cache: true # runs 'bundle install' and caches installed gems automatically
|
||||||
|
- name: Run Rubocop
|
||||||
|
run: bundle exec rake rubocop
|
||||||
|
|
||||||
|
spec_tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
ruby-version:
|
||||||
|
- '2.5.8'
|
||||||
|
- 'jruby-9.2.12.0'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Set up Ruby
|
||||||
|
uses: ruby/setup-ruby@v1
|
||||||
|
with:
|
||||||
|
ruby-version: ${{ matrix.ruby-version }}
|
||||||
|
bundler-cache: true # runs 'bundle install' and caches installed gems automatically
|
||||||
|
- name: Run spec tests
|
||||||
|
run: bundle exec rake test
|
||||||
11
.gitignore
vendored
Normal file
11
.gitignore
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
.bundle/
|
||||||
|
.vagrant/
|
||||||
|
coverage/
|
||||||
|
vendor/
|
||||||
|
.dccache
|
||||||
|
.ruby-version
|
||||||
|
Gemfile.local
|
||||||
|
results.xml
|
||||||
|
/vmpooler.yaml
|
||||||
|
.idea
|
||||||
|
*.json
|
||||||
2
.jrubyrc
Normal file
2
.jrubyrc
Normal file
|
|
@ -0,0 +1,2 @@
|
||||||
|
# for simplecov to work in jruby, without this we are getting errors when debugging spec tests
|
||||||
|
debug.fullTrace=true
|
||||||
53
.rubocop.yml
Normal file
53
.rubocop.yml
Normal file
|
|
@ -0,0 +1,53 @@
|
||||||
|
AllCops:
|
||||||
|
Include:
|
||||||
|
- 'lib/**/*.rb'
|
||||||
|
Exclude:
|
||||||
|
- 'scripts/**/*'
|
||||||
|
- 'spec/**/*'
|
||||||
|
- 'vendor/**/*'
|
||||||
|
- Gemfile
|
||||||
|
- Rakefile
|
||||||
|
|
||||||
|
# These short variable names make sense as exceptions to the rule, but generally I think short variable names do hurt readability
|
||||||
|
Naming/MethodParameterName:
|
||||||
|
AllowedNames:
|
||||||
|
- vm
|
||||||
|
- dc
|
||||||
|
- s
|
||||||
|
- x
|
||||||
|
- f
|
||||||
|
|
||||||
|
#new cops:
|
||||||
|
Lint/DuplicateRegexpCharacterClassElement: # (new in 1.1)
|
||||||
|
Enabled: true
|
||||||
|
Lint/EmptyBlock: # (new in 1.1)
|
||||||
|
Enabled: true
|
||||||
|
Lint/ToEnumArguments: # (new in 1.1)
|
||||||
|
Enabled: true
|
||||||
|
Lint/UnmodifiedReduceAccumulator: # (new in 1.1)
|
||||||
|
Enabled: true
|
||||||
|
Style/ArgumentsForwarding: # (new in 1.1)
|
||||||
|
Enabled: false
|
||||||
|
Style/DocumentDynamicEvalDefinition: # (new in 1.1)
|
||||||
|
Enabled: true
|
||||||
|
Style/SwapValues: # (new in 1.1)
|
||||||
|
Enabled: false
|
||||||
|
|
||||||
|
#disabled
|
||||||
|
|
||||||
|
Metrics/AbcSize:
|
||||||
|
Enabled: false
|
||||||
|
Metrics/ClassLength:
|
||||||
|
Enabled: false
|
||||||
|
Metrics/CyclomaticComplexity:
|
||||||
|
Enabled: false
|
||||||
|
Metrics/MethodLength:
|
||||||
|
Enabled: false
|
||||||
|
Metrics/PerceivedComplexity:
|
||||||
|
Enabled: false
|
||||||
|
Metrics/ParameterLists:
|
||||||
|
Enabled: false
|
||||||
|
Layout/LineLength:
|
||||||
|
Enabled: false
|
||||||
|
Metrics/BlockLength:
|
||||||
|
Enabled: false
|
||||||
10
CODEOWNERS
Normal file
10
CODEOWNERS
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
|
||||||
|
# This will cause DIO to be assigned review of any opened PRs against
|
||||||
|
# the branches containing this file.
|
||||||
|
# See https://help.github.com/en/articles/about-code-owners for info on how to
|
||||||
|
# take ownership of parts of the code base that should be reviewed by another
|
||||||
|
# team.
|
||||||
|
|
||||||
|
# DIO will be the default owners for everything in the repo.
|
||||||
|
* @puppetlabs/dio
|
||||||
|
|
||||||
13
Gemfile
Normal file
13
Gemfile
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
source ENV['GEM_SOURCE'] || 'https://rubygems.org'
|
||||||
|
|
||||||
|
gemspec
|
||||||
|
|
||||||
|
# Evaluate Gemfile.local if it exists
|
||||||
|
if File.exists? "#{__FILE__}.local"
|
||||||
|
instance_eval(File.read("#{__FILE__}.local"))
|
||||||
|
end
|
||||||
|
|
||||||
|
# Evaluate ~/.gemfile if it exists
|
||||||
|
if File.exists?(File.join(Dir.home, '.gemfile'))
|
||||||
|
instance_eval(File.read(File.join(Dir.home, '.gemfile')))
|
||||||
|
end
|
||||||
201
LICENSE
Normal file
201
LICENSE
Normal file
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
43
README.md
Normal file
43
README.md
Normal file
|
|
@ -0,0 +1,43 @@
|
||||||
|
# vmpooler-provider-gce
|
||||||
|
|
||||||
|
This is a provider for [VMPooler](https://github.com/puppetlabs/vmpooler) allows using GCE to create instances, disks,
|
||||||
|
snapshots, or destroy instances for specific pools.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Include this gem in the same Gemfile that you use to install VMPooler itself and then define one or more pools with the `provider` key set to `gce`. VMPooler will take care of the rest.
|
||||||
|
See what configuration is needed for this provider in the [example file](https://github.com/puppetlabs/vmpooler-provider-gce/blob/main/vmpooler.yaml.example).
|
||||||
|
|
||||||
|
Examples of deploying VMPooler with extra providers can be found in the [puppetlabs/vmpooler-deployment](https://github.com/puppetlabs/vmpooler-deployment) repository.
|
||||||
|
|
||||||
|
GCE authorization is handled via a service account (or personal account) private key (json format) and can be configured via
|
||||||
|
|
||||||
|
1. GOOGLE_APPLICATION_CREDENTIALS environment variable eg GOOGLE_APPLICATION_CREDENTIALS=/my/home/directory/my_account_key.json
|
||||||
|
|
||||||
|
### DNS
|
||||||
|
DNS is integrated via Google's CloudDNS service. To enable, a CloudDNS zone name must be provided in the config (see the example yaml file dns_zone_resource_name)
|
||||||
|
|
||||||
|
An A record is then created in that zone upon instance creation with the VM's internal IP, and deleted when the instance is destroyed.
|
||||||
|
|
||||||
|
### Labels
|
||||||
|
This provider adds labels to all resources that are managed
|
||||||
|
|
||||||
|
|resource|labels|note|
|
||||||
|
|---|---|---|
|
||||||
|
|instance|vm=$vm_name, pool=$pool_name|for example vm=foo-bar, pool=pool1|
|
||||||
|
|disk|vm=$vm_name, pool=$pool_name|for example vm=foo-bar and pool=pool1|
|
||||||
|
|snapshot|snapshot_name=$snapshot_name, vm=$vm_name, pool=$pool_name| for example snapshot_name=snap1, vm=foo-bar, pool=pool1|
|
||||||
|
|
||||||
|
Also see the usage of vmpooler's optional purge_unconfigured_resources, which is used to delete any resource found that
|
||||||
|
do not have the pool label, and can be configured to allow a specific list of unconfigured pool names.
|
||||||
|
|
||||||
|
### Pre-requisite
|
||||||
|
|
||||||
|
- A service account needs to be created and a private json key generated (see usage section)
|
||||||
|
- The service account needs to be given permissions to the project (broad permissions would be compute v1 admin and dns admin). A yaml file is provided that lists the least-privilege permissions needed
|
||||||
|
- if using DNS, a DNS zone needs to be created in CloudDNS, and configured in the provider's config section with the name of that zone (dns_zone_resource_name). When not specified, the DNS setup and teardown is skipped.
|
||||||
|
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
vmpooler-provider-gce is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html). See the [LICENSE](LICENSE) file for more details.
|
||||||
25
Rakefile
Normal file
25
Rakefile
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
require 'rspec/core/rake_task'
|
||||||
|
|
||||||
|
rubocop_available = Gem::Specification::find_all_by_name('rubocop').any?
|
||||||
|
require 'rubocop/rake_task' if rubocop_available
|
||||||
|
|
||||||
|
desc 'Run rspec tests with coloring.'
|
||||||
|
RSpec::Core::RakeTask.new(:test) do |t|
|
||||||
|
t.rspec_opts = %w[--color --format documentation]
|
||||||
|
t.pattern = 'spec/'
|
||||||
|
end
|
||||||
|
|
||||||
|
desc 'Run rspec tests and save JUnit output to results.xml.'
|
||||||
|
RSpec::Core::RakeTask.new(:junit) do |t|
|
||||||
|
t.rspec_opts = %w[-r yarjuf -f JUnit -o results.xml]
|
||||||
|
t.pattern = 'spec/'
|
||||||
|
end
|
||||||
|
|
||||||
|
if rubocop_available
|
||||||
|
desc 'Run RuboCop'
|
||||||
|
RuboCop::RakeTask.new(:rubocop) do |task|
|
||||||
|
task.options << '--display-cop-names'
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
task :default => [:test]
|
||||||
5
lib/vmpooler-provider-aws/version.rb
Normal file
5
lib/vmpooler-provider-aws/version.rb
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
module VmpoolerProviderAws
|
||||||
|
VERSION = '0.0.1'
|
||||||
|
end
|
||||||
740
lib/vmpooler/providers/aws.rb
Normal file
740
lib/vmpooler/providers/aws.rb
Normal file
|
|
@ -0,0 +1,740 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require 'bigdecimal'
|
||||||
|
require 'bigdecimal/util'
|
||||||
|
require 'vmpooler/providers/base'
|
||||||
|
|
||||||
|
module Vmpooler
|
||||||
|
class PoolManager
|
||||||
|
class Provider
|
||||||
|
# This class represent a GCE provider to CRUD resources in a gce cloud.
|
||||||
|
class Aws < Vmpooler::PoolManager::Provider::Base
|
||||||
|
# The connection_pool method is normally used only for testing
|
||||||
|
attr_reader :connection_pool
|
||||||
|
|
||||||
|
def initialize(config, logger, metrics, redis_connection_pool, name, options)
|
||||||
|
super(config, logger, metrics, redis_connection_pool, name, options)
|
||||||
|
|
||||||
|
task_limit = global_config[:config].nil? || global_config[:config]['task_limit'].nil? ? 10 : global_config[:config]['task_limit'].to_i
|
||||||
|
# The default connection pool size is:
|
||||||
|
# Whatever is biggest from:
|
||||||
|
# - How many pools this provider services
|
||||||
|
# - Maximum number of cloning tasks allowed
|
||||||
|
# - Need at least 2 connections so that a pool can have inventory functions performed while cloning etc.
|
||||||
|
default_connpool_size = [provided_pools.count, task_limit, 2].max
|
||||||
|
connpool_size = provider_config['connection_pool_size'].nil? ? default_connpool_size : provider_config['connection_pool_size'].to_i
|
||||||
|
# The default connection pool timeout should be quite large - 60 seconds
|
||||||
|
connpool_timeout = provider_config['connection_pool_timeout'].nil? ? 60 : provider_config['connection_pool_timeout'].to_i
|
||||||
|
logger.log('d', "[#{name}] ConnPool - Creating a connection pool of size #{connpool_size} with timeout #{connpool_timeout}")
|
||||||
|
@connection_pool = Vmpooler::PoolManager::GenericConnectionPool.new(
|
||||||
|
metrics: metrics,
|
||||||
|
connpool_type: 'provider_connection_pool',
|
||||||
|
connpool_provider: name,
|
||||||
|
size: connpool_size,
|
||||||
|
timeout: connpool_timeout
|
||||||
|
) do
|
||||||
|
logger.log('d', "[#{name}] Connection Pool - Creating a connection object")
|
||||||
|
# Need to wrap the vSphere connection object in another object. The generic connection pooler will preserve
|
||||||
|
# the object reference for the connection, which means it cannot "reconnect" by creating an entirely new connection
|
||||||
|
# object. Instead by wrapping it in a Hash, the Hash object reference itself never changes but the content of the
|
||||||
|
# Hash can change, and is preserved across invocations.
|
||||||
|
new_conn = #connect to aws
|
||||||
|
{ connection: new_conn }
|
||||||
|
end
|
||||||
|
@redis = redis_connection_pool
|
||||||
|
end
|
||||||
|
|
||||||
|
# name of the provider class
|
||||||
|
def name
|
||||||
|
'aws'
|
||||||
|
end
|
||||||
|
|
||||||
|
def connection
|
||||||
|
@connection_pool.with_metrics do |pool_object|
|
||||||
|
return ensured_aws_connection(pool_object)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def dns
|
||||||
|
|
||||||
|
@dns
|
||||||
|
end
|
||||||
|
|
||||||
|
# main configuration options
|
||||||
|
def project
|
||||||
|
provider_config['project']
|
||||||
|
end
|
||||||
|
|
||||||
|
def network_name
|
||||||
|
provider_config['network_name']
|
||||||
|
end
|
||||||
|
|
||||||
|
def subnetwork_name(pool_name)
|
||||||
|
return pool_config(pool_name)['subnetwork_name'] if pool_config(pool_name)['subnetwork_name']
|
||||||
|
end
|
||||||
|
|
||||||
|
# main configuration options, overridable for each pool
|
||||||
|
def zone(pool_name)
|
||||||
|
return pool_config(pool_name)['zone'] if pool_config(pool_name)['zone']
|
||||||
|
return provider_config['zone'] if provider_config['zone']
|
||||||
|
end
|
||||||
|
|
||||||
|
def machine_type(pool_name)
|
||||||
|
return pool_config(pool_name)['machine_type'] if pool_config(pool_name)['machine_type']
|
||||||
|
return provider_config['machine_type'] if provider_config['machine_type']
|
||||||
|
end
|
||||||
|
|
||||||
|
def domain
|
||||||
|
provider_config['domain']
|
||||||
|
end
|
||||||
|
|
||||||
|
def dns_zone_resource_name
|
||||||
|
provider_config['dns_zone_resource_name']
|
||||||
|
end
|
||||||
|
|
||||||
|
# Base methods that are implemented:
|
||||||
|
|
||||||
|
# vms_in_pool lists all the VM names in a pool, which is based on the VMs
|
||||||
|
# having a label "pool" that match a pool config name.
|
||||||
|
# inputs
|
||||||
|
# [String] pool_name : Name of the pool
|
||||||
|
# returns
|
||||||
|
# empty array [] if no VMs found in the pool
|
||||||
|
# [Array]
|
||||||
|
# [Hashtable]
|
||||||
|
# [String] name : the name of the VM instance (unique for whole project)
|
||||||
|
def vms_in_pool(pool_name)
|
||||||
|
debug_logger('vms_in_pool')
|
||||||
|
vms = []
|
||||||
|
pool = pool_config(pool_name)
|
||||||
|
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
||||||
|
|
||||||
|
zone = zone(pool_name)
|
||||||
|
filter = "(labels.pool = #{pool_name})"
|
||||||
|
instance_list = connection.list_instances(project, zone, filter: filter)
|
||||||
|
|
||||||
|
return vms if instance_list.items.nil?
|
||||||
|
|
||||||
|
instance_list.items.each do |vm|
|
||||||
|
vms << { 'name' => vm.name }
|
||||||
|
end
|
||||||
|
debug_logger(vms)
|
||||||
|
vms
|
||||||
|
end
|
||||||
|
|
||||||
|
# inputs
|
||||||
|
# [String] pool_name : Name of the pool
|
||||||
|
# [String] vm_name : Name of the VM to find
|
||||||
|
# returns
|
||||||
|
# nil if VM doesn't exist
|
||||||
|
# [Hastable] of the VM
|
||||||
|
# [String] name : The name of the resource, provided by the client when initially creating the resource
|
||||||
|
# [String] hostname : Specifies the hostname of the instance. The specified hostname must be RFC1035 compliant. If hostname is not specified,
|
||||||
|
# the default hostname is [ INSTANCE_NAME].c.[PROJECT_ID].internal when using the global DNS, and
|
||||||
|
# [ INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal when using zonal DNS
|
||||||
|
# [String] template : This is the name of template
|
||||||
|
# [String] poolname : Name of the pool the VM as per labels
|
||||||
|
# [Time] boottime : Time when the VM was created/booted
|
||||||
|
# [String] status : One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED
|
||||||
|
# [String] zone : URL of the zone where the instance resides.
|
||||||
|
# [String] machine_type : Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/machine-type.
|
||||||
|
def get_vm(pool_name, vm_name)
|
||||||
|
debug_logger('get_vm')
|
||||||
|
vm_hash = nil
|
||||||
|
begin
|
||||||
|
vm_object = connection.get_instance(project, zone(pool_name), vm_name)
|
||||||
|
rescue ::Google::Apis::ClientError => e
|
||||||
|
raise e unless e.status_code == 404
|
||||||
|
|
||||||
|
# swallow the ClientError error 404 and return nil when the VM was not found
|
||||||
|
return nil
|
||||||
|
end
|
||||||
|
|
||||||
|
return vm_hash if vm_object.nil?
|
||||||
|
|
||||||
|
vm_hash = generate_vm_hash(vm_object, pool_name)
|
||||||
|
debug_logger("vm_hash #{vm_hash}")
|
||||||
|
vm_hash
|
||||||
|
end
|
||||||
|
|
||||||
|
# create_vm creates a new VM with a default network from the config,
|
||||||
|
# a initial disk named #{new_vmname}-disk0 that uses the 'template' as its source image
|
||||||
|
# and labels added for vm and pool
|
||||||
|
# and an instance configuration for machine_type from the config and
|
||||||
|
# labels vm and pool
|
||||||
|
# having a label "pool" that match a pool config name.
|
||||||
|
# inputs
|
||||||
|
# [String] pool : Name of the pool
|
||||||
|
# [String] new_vmname : Name to give the new VM
|
||||||
|
# returns
|
||||||
|
# [Hashtable] of the VM as per get_vm(pool_name, vm_name)
|
||||||
|
def create_vm(pool_name, new_vmname)
|
||||||
|
debug_logger('create_vm')
|
||||||
|
pool = pool_config(pool_name)
|
||||||
|
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
||||||
|
|
||||||
|
# harcoded network info
|
||||||
|
network_interfaces = Google::Apis::ComputeV1::NetworkInterface.new(
|
||||||
|
network: network_name
|
||||||
|
)
|
||||||
|
network_interfaces.subnetwork = subnetwork_name(pool_name) if subnetwork_name(pool_name)
|
||||||
|
init_params = {
|
||||||
|
source_image: pool['template'], # The source image to create this disk.
|
||||||
|
labels: { 'vm' => new_vmname, 'pool' => pool_name },
|
||||||
|
disk_name: "#{new_vmname}-disk0"
|
||||||
|
}
|
||||||
|
disk = Google::Apis::ComputeV1::AttachedDisk.new(
|
||||||
|
auto_delete: true,
|
||||||
|
boot: true,
|
||||||
|
initialize_params: Google::Apis::ComputeV1::AttachedDiskInitializeParams.new(init_params)
|
||||||
|
)
|
||||||
|
append_domain = domain || global_config[:config]['domain']
|
||||||
|
fqdn = "#{new_vmname}.#{append_domain}" if append_domain
|
||||||
|
|
||||||
|
# Assume all pool config is valid i.e. not missing
|
||||||
|
client = ::Google::Apis::ComputeV1::Instance.new(
|
||||||
|
name: new_vmname,
|
||||||
|
hostname: fqdn,
|
||||||
|
machine_type: pool['machine_type'],
|
||||||
|
disks: [disk],
|
||||||
|
network_interfaces: [network_interfaces],
|
||||||
|
labels: { 'vm' => new_vmname, 'pool' => pool_name },
|
||||||
|
tags: Google::Apis::ComputeV1::Tags.new(items: [project])
|
||||||
|
)
|
||||||
|
|
||||||
|
debug_logger('trigger insert_instance')
|
||||||
|
result = connection.insert_instance(project, zone(pool_name), client)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
created_instance = get_vm(pool_name, new_vmname)
|
||||||
|
dns_setup(created_instance)
|
||||||
|
created_instance
|
||||||
|
end
|
||||||
|
|
||||||
|
# create_disk creates an additional disk for an existing VM. It will name the new
|
||||||
|
# disk #{vm_name}-disk#{number_disk} where number_disk is the next logical disk number
|
||||||
|
# starting with 1 when adding an additional disk to a VM with only the boot disk:
|
||||||
|
# #{vm_name}-disk0 == boot disk
|
||||||
|
# #{vm_name}-disk1 == additional disk added via create_disk
|
||||||
|
# #{vm_name}-disk2 == additional disk added via create_disk if run a second time etc
|
||||||
|
# the new disk has labels added for vm and pool
|
||||||
|
# The GCE lifecycle is to create a new disk (lives independently of the instance) then to attach
|
||||||
|
# it to the existing instance.
|
||||||
|
# inputs
|
||||||
|
# [String] pool_name : Name of the pool
|
||||||
|
# [String] vm_name : Name of the existing VM
|
||||||
|
# [String] disk_size : The new disk size in GB
|
||||||
|
# returns
|
||||||
|
# [boolean] true : once the operations are finished
|
||||||
|
def create_disk(pool_name, vm_name, disk_size)
|
||||||
|
debug_logger('create_disk')
|
||||||
|
pool = pool_config(pool_name)
|
||||||
|
raise("Pool #{pool_name} does not exist for the provider #{name}") if pool.nil?
|
||||||
|
|
||||||
|
begin
|
||||||
|
vm_object = connection.get_instance(project, zone(pool_name), vm_name)
|
||||||
|
rescue ::Google::Apis::ClientError => e
|
||||||
|
raise e unless e.status_code == 404
|
||||||
|
|
||||||
|
# if it does not exist
|
||||||
|
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}")
|
||||||
|
end
|
||||||
|
# this number should start at 1 when there is only the boot disk,
|
||||||
|
# eg the new disk will be named spicy-proton-disk1
|
||||||
|
number_disk = vm_object.disks.length
|
||||||
|
|
||||||
|
disk_name = "#{vm_name}-disk#{number_disk}"
|
||||||
|
disk = Google::Apis::ComputeV1::Disk.new(
|
||||||
|
name: disk_name,
|
||||||
|
size_gb: disk_size,
|
||||||
|
labels: { 'pool' => pool_name, 'vm' => vm_name }
|
||||||
|
)
|
||||||
|
debug_logger("trigger insert_disk #{disk_name} for #{vm_name}")
|
||||||
|
result = connection.insert_disk(project, zone(pool_name), disk)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
debug_logger("trigger get_disk #{disk_name} for #{vm_name}")
|
||||||
|
new_disk = connection.get_disk(project, zone(pool_name), disk_name)
|
||||||
|
|
||||||
|
attached_disk = Google::Apis::ComputeV1::AttachedDisk.new(
|
||||||
|
auto_delete: true,
|
||||||
|
boot: false,
|
||||||
|
source: new_disk.self_link
|
||||||
|
)
|
||||||
|
debug_logger("trigger attach_disk #{disk_name} for #{vm_name}")
|
||||||
|
result = connection.attach_disk(project, zone(pool_name), vm_object.name, attached_disk)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# create_snapshot creates new snapshots with the unique name {new_snapshot_name}-#{disk.name}
|
||||||
|
# for one vm, and one create_snapshot() there could be multiple snapshots created, one for each drive.
|
||||||
|
# since the snapshot resource needs a unique name in the gce project,
|
||||||
|
# we create a unique name by concatenating {new_snapshot_name}-#{disk.name}
|
||||||
|
# the disk name is based on vm_name which makes it unique.
|
||||||
|
# The snapshot is added labels snapshot_name, vm, pool, diskname and boot
|
||||||
|
# inputs
|
||||||
|
# [String] pool_name : Name of the pool
|
||||||
|
# [String] vm_name : Name of the existing VM
|
||||||
|
# [String] new_snapshot_name : a unique name for this snapshot, which would be used to refer to it when reverting
|
||||||
|
# returns
|
||||||
|
# [boolean] true : once the operations are finished
|
||||||
|
# raises
|
||||||
|
# RuntimeError if the vm_name cannot be found
|
||||||
|
# RuntimeError if the snapshot_name already exists for this VM
|
||||||
|
def create_snapshot(pool_name, vm_name, new_snapshot_name)
|
||||||
|
debug_logger('create_snapshot')
|
||||||
|
begin
|
||||||
|
vm_object = connection.get_instance(project, zone(pool_name), vm_name)
|
||||||
|
rescue ::Google::Apis::ClientError => e
|
||||||
|
raise e unless e.status_code == 404
|
||||||
|
|
||||||
|
# if it does not exist
|
||||||
|
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}")
|
||||||
|
end
|
||||||
|
|
||||||
|
old_snap = find_snapshot(vm_name, new_snapshot_name)
|
||||||
|
raise("Snapshot #{new_snapshot_name} for VM #{vm_name} in pool #{pool_name} already exists for the provider #{name}") unless old_snap.nil?
|
||||||
|
|
||||||
|
result_list = []
|
||||||
|
vm_object.disks.each do |attached_disk|
|
||||||
|
disk_name = disk_name_from_source(attached_disk)
|
||||||
|
snapshot_obj = ::Google::Apis::ComputeV1::Snapshot.new(
|
||||||
|
name: "#{new_snapshot_name}-#{disk_name}",
|
||||||
|
labels: {
|
||||||
|
'snapshot_name' => new_snapshot_name,
|
||||||
|
'vm' => vm_name,
|
||||||
|
'pool' => pool_name,
|
||||||
|
'diskname' => disk_name,
|
||||||
|
'boot' => attached_disk.boot.to_s
|
||||||
|
}
|
||||||
|
)
|
||||||
|
debug_logger("trigger async create_disk_snapshot #{vm_name}: #{new_snapshot_name}-#{disk_name}")
|
||||||
|
result = connection.create_disk_snapshot(project, zone(pool_name), disk_name, snapshot_obj)
|
||||||
|
# do them all async, keep a list, check later
|
||||||
|
result_list << result
|
||||||
|
end
|
||||||
|
# now check they are done
|
||||||
|
result_list.each do |result|
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# revert_snapshot reverts an existing VM's disks to an existing snapshot_name
|
||||||
|
# reverting in gce entails
|
||||||
|
# 1. shutting down the VM,
|
||||||
|
# 2. detaching and deleting the drives,
|
||||||
|
# 3. creating new disks with the same name from the snapshot for each disk
|
||||||
|
# 4. attach disks and start instance
|
||||||
|
# for one vm, there might be multiple snapshots in time. We select the ones referred to by the
|
||||||
|
# snapshot_name, but that may be multiple snapshots, one for each disks
|
||||||
|
# The new disk is added labels vm and pool
|
||||||
|
# inputs
|
||||||
|
# [String] pool_name : Name of the pool
|
||||||
|
# [String] vm_name : Name of the existing VM
|
||||||
|
# [String] snapshot_name : Name of an existing snapshot
|
||||||
|
# returns
|
||||||
|
# [boolean] true : once the operations are finished
|
||||||
|
# raises
|
||||||
|
# RuntimeError if the vm_name cannot be found
|
||||||
|
# RuntimeError if the snapshot_name already exists for this VM
|
||||||
|
def revert_snapshot(pool_name, vm_name, snapshot_name)
|
||||||
|
debug_logger('revert_snapshot')
|
||||||
|
begin
|
||||||
|
vm_object = connection.get_instance(project, zone(pool_name), vm_name)
|
||||||
|
rescue ::Google::Apis::ClientError => e
|
||||||
|
raise e unless e.status_code == 404
|
||||||
|
|
||||||
|
# if it does not exist
|
||||||
|
raise("VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}")
|
||||||
|
end
|
||||||
|
|
||||||
|
snapshot_object = find_snapshot(vm_name, snapshot_name)
|
||||||
|
raise("Snapshot #{snapshot_name} for VM #{vm_name} in pool #{pool_name} does not exist for the provider #{name}") if snapshot_object.nil?
|
||||||
|
|
||||||
|
# Shutdown instance
|
||||||
|
debug_logger("trigger stop_instance #{vm_name}")
|
||||||
|
result = connection.stop_instance(project, zone(pool_name), vm_name)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
|
||||||
|
# Delete existing disks
|
||||||
|
vm_object.disks&.each do |attached_disk|
|
||||||
|
debug_logger("trigger detach_disk #{vm_name}: #{attached_disk.device_name}")
|
||||||
|
result = connection.detach_disk(project, zone(pool_name), vm_name, attached_disk.device_name)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
current_disk_name = disk_name_from_source(attached_disk)
|
||||||
|
debug_logger("trigger delete_disk #{vm_name}: #{current_disk_name}")
|
||||||
|
result = connection.delete_disk(project, zone(pool_name), current_disk_name)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
end
|
||||||
|
|
||||||
|
# this block is sensitive to disruptions, for example if vmpooler is stopped while this is running
|
||||||
|
snapshot_object.each do |snapshot|
|
||||||
|
current_disk_name = snapshot.labels['diskname']
|
||||||
|
bootable = (snapshot.labels['boot'] == 'true')
|
||||||
|
disk = Google::Apis::ComputeV1::Disk.new(
|
||||||
|
name: current_disk_name,
|
||||||
|
labels: { 'pool' => pool_name, 'vm' => vm_name },
|
||||||
|
source_snapshot: snapshot.self_link
|
||||||
|
)
|
||||||
|
# create disk in GCE as a separate resource
|
||||||
|
debug_logger("trigger insert_disk #{vm_name}: #{current_disk_name} based on #{snapshot.self_link}")
|
||||||
|
result = connection.insert_disk(project, zone(pool_name), disk)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
# read the new disk info
|
||||||
|
new_disk_info = connection.get_disk(project, zone(pool_name), current_disk_name)
|
||||||
|
new_attached_disk = Google::Apis::ComputeV1::AttachedDisk.new(
|
||||||
|
auto_delete: true,
|
||||||
|
boot: bootable,
|
||||||
|
source: new_disk_info.self_link
|
||||||
|
)
|
||||||
|
# attach the new disk to existing instance
|
||||||
|
debug_logger("trigger attach_disk #{vm_name}: #{current_disk_name}")
|
||||||
|
result = connection.attach_disk(project, zone(pool_name), vm_name, new_attached_disk)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
end
|
||||||
|
|
||||||
|
debug_logger("trigger start_instance #{vm_name}")
|
||||||
|
result = connection.start_instance(project, zone(pool_name), vm_name)
|
||||||
|
wait_for_operation(project, pool_name, result)
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# destroy_vm deletes an existing VM instance and any disks and snapshots via the labels
|
||||||
|
# in gce instances, disks and snapshots are resources that can exist independent of each other
|
||||||
|
# inputs
|
||||||
|
# [String] pool_name : Name of the pool
|
||||||
|
# [String] vm_name : Name of the existing VM
|
||||||
|
# returns
|
||||||
|
# [boolean] true : once the operations are finished
|
||||||
|
def destroy_vm(pool_name, vm_name)
|
||||||
|
debug_logger('destroy_vm')
|
||||||
|
deleted = false
|
||||||
|
begin
|
||||||
|
connection.get_instance(project, zone(pool_name), vm_name)
|
||||||
|
rescue ::Google::Apis::ClientError => e
|
||||||
|
raise e unless e.status_code == 404
|
||||||
|
|
||||||
|
# If a VM doesn't exist then it is effectively deleted
|
||||||
|
deleted = true
|
||||||
|
debug_logger("instance #{vm_name} already deleted")
|
||||||
|
end
|
||||||
|
|
||||||
|
unless deleted
|
||||||
|
debug_logger("trigger delete_instance #{vm_name}")
|
||||||
|
vm_hash = get_vm(pool_name, vm_name)
|
||||||
|
result = connection.delete_instance(project, zone(pool_name), vm_name)
|
||||||
|
wait_for_operation(project, pool_name, result, 10)
|
||||||
|
dns_teardown(vm_hash)
|
||||||
|
end
|
||||||
|
|
||||||
|
# list and delete any leftover disk, for instance if they were detached from the instance
|
||||||
|
filter = "(labels.vm = #{vm_name})"
|
||||||
|
disk_list = connection.list_disks(project, zone(pool_name), filter: filter)
|
||||||
|
result_list = []
|
||||||
|
disk_list.items&.each do |disk|
|
||||||
|
debug_logger("trigger delete_disk #{disk.name}")
|
||||||
|
result = connection.delete_disk(project, zone(pool_name), disk.name)
|
||||||
|
# do them all async, keep a list, check later
|
||||||
|
result_list << result
|
||||||
|
end
|
||||||
|
# now check they are done
|
||||||
|
result_list.each do |r|
|
||||||
|
wait_for_operation(project, pool_name, r)
|
||||||
|
end
|
||||||
|
|
||||||
|
# list and delete leftover snapshots, this could happen if snapshots were taken,
|
||||||
|
# as they are not removed when the original disk is deleted or the instance is detroyed
|
||||||
|
snapshot_list = find_all_snapshots(vm_name)
|
||||||
|
result_list = []
|
||||||
|
snapshot_list&.each do |snapshot|
|
||||||
|
debug_logger("trigger delete_snapshot #{snapshot.name}")
|
||||||
|
result = connection.delete_snapshot(project, snapshot.name)
|
||||||
|
# do them all async, keep a list, check later
|
||||||
|
result_list << result
|
||||||
|
end
|
||||||
|
# now check they are done
|
||||||
|
result_list.each do |r|
|
||||||
|
wait_for_operation(project, pool_name, r)
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
def vm_ready?(_pool_name, vm_name)
|
||||||
|
begin
|
||||||
|
# TODO: we could use a healthcheck resource attached to instance
|
||||||
|
open_socket(vm_name, domain || global_config[:config]['domain'])
|
||||||
|
rescue StandardError => _e
|
||||||
|
return false
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Scans zones that are configured for list of resources (VM, disks, snapshots) that do not have the label.pool set
|
||||||
|
# to one of the configured pools. If it is also not in the allowlist, the resource is destroyed
|
||||||
|
def purge_unconfigured_resources(allowlist)
|
||||||
|
debug_logger('purge_unconfigured_resources')
|
||||||
|
pools_array = provided_pools
|
||||||
|
filter = {}
|
||||||
|
# we have to group things by zone, because the API search feature is done against a zone and not global
|
||||||
|
# so we will do the searches in each configured zone
|
||||||
|
pools_array.each do |pool|
|
||||||
|
filter[zone(pool)] = [] if filter[zone(pool)].nil?
|
||||||
|
filter[zone(pool)] << "(labels.pool != #{pool})"
|
||||||
|
end
|
||||||
|
filter.each_key do |zone|
|
||||||
|
# this filter should return any item that have a labels.pool that is not in the config OR
|
||||||
|
# do not have a pool label at all
|
||||||
|
filter_string = "#{filter[zone].join(' AND ')} OR -labels.pool:*"
|
||||||
|
# VMs
|
||||||
|
instance_list = connection.list_instances(project, zone, filter: filter_string)
|
||||||
|
|
||||||
|
result_list = []
|
||||||
|
instance_list.items&.each do |vm|
|
||||||
|
next if should_be_ignored(vm, allowlist)
|
||||||
|
|
||||||
|
debug_logger("trigger async delete_instance #{vm.name}")
|
||||||
|
result = connection.delete_instance(project, zone, vm.name)
|
||||||
|
vm_pool = vm.labels&.key?('pool') ? vm.labels['pool'] : nil
|
||||||
|
existing_vm = generate_vm_hash(vm, vm_pool)
|
||||||
|
dns_teardown(existing_vm)
|
||||||
|
result_list << result
|
||||||
|
end
|
||||||
|
# now check they are done
|
||||||
|
result_list.each do |result|
|
||||||
|
wait_for_zone_operation(project, zone, result)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Disks
|
||||||
|
disks_list = connection.list_disks(project, zone, filter: filter_string)
|
||||||
|
disks_list.items&.each do |disk|
|
||||||
|
next if should_be_ignored(disk, allowlist)
|
||||||
|
|
||||||
|
debug_logger("trigger async no wait delete_disk #{disk.name}")
|
||||||
|
connection.delete_disk(project, zone, disk.name)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Snapshots
|
||||||
|
snapshot_list = connection.list_snapshots(project, filter: filter_string)
|
||||||
|
next if snapshot_list.items.nil?
|
||||||
|
|
||||||
|
snapshot_list.items.each do |sn|
|
||||||
|
next if should_be_ignored(sn, allowlist)
|
||||||
|
|
||||||
|
debug_logger("trigger async no wait delete_snapshot #{sn.name}")
|
||||||
|
connection.delete_snapshot(project, sn.name)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# tag_vm_user This method is called once we know who is using the VM (it is running). This method enables seeing
|
||||||
|
# who is using what in the provider pools.
|
||||||
|
#
|
||||||
|
# inputs
|
||||||
|
# [String] pool_name : Name of the pool
|
||||||
|
# [String] vm_name : Name of the VM to check if ready
|
||||||
|
# returns
|
||||||
|
# [Boolean] : true if successful, false if an error occurred and it should retry
|
||||||
|
def tag_vm_user(pool, vm_name)
|
||||||
|
user = get_current_user(vm_name)
|
||||||
|
vm_hash = get_vm(pool, vm_name)
|
||||||
|
return false if vm_hash.nil?
|
||||||
|
|
||||||
|
new_labels = vm_hash['labels']
|
||||||
|
# bailing in this case since labels should exist, and continuing would mean losing them
|
||||||
|
return false if new_labels.nil?
|
||||||
|
|
||||||
|
# add new label called token-user, with value as user
|
||||||
|
new_labels['token-user'] = user
|
||||||
|
begin
|
||||||
|
instances_set_labels_request_object = Google::Apis::ComputeV1::InstancesSetLabelsRequest.new(label_fingerprint: vm_hash['label_fingerprint'], labels: new_labels)
|
||||||
|
result = connection.set_instance_labels(project, zone(pool), vm_name, instances_set_labels_request_object)
|
||||||
|
wait_for_zone_operation(project, zone(pool), result)
|
||||||
|
rescue StandardError => _e
|
||||||
|
return false
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# END BASE METHODS
|
||||||
|
|
||||||
|
def dns_setup(created_instance)
|
||||||
|
dns_zone = dns.zone(dns_zone_resource_name) if dns_zone_resource_name
|
||||||
|
return unless dns_zone && created_instance && created_instance['name'] && created_instance['ip']
|
||||||
|
|
||||||
|
name = created_instance['name']
|
||||||
|
begin
|
||||||
|
change = dns_zone.add(name, 'A', 60, [created_instance['ip']])
|
||||||
|
debug_logger("#{change.id} - #{change.started_at} - #{change.status} DNS address added") if change
|
||||||
|
rescue Google::Cloud::AlreadyExistsError => _e
|
||||||
|
# DNS setup is done only for new instances, so in the rare case where a DNS record already exists (it is stale) and we replace it.
|
||||||
|
# the error is Google::Cloud::AlreadyExistsError: alreadyExists: The resource 'entity.change.additions[0]' named 'instance-8.test.vmpooler.net. (A)' already exists
|
||||||
|
change = dns_zone.replace(name, 'A', 60, [created_instance['ip']])
|
||||||
|
debug_logger("#{change.id} - #{change.started_at} - #{change.status} DNS address previously existed and was replaced") if change
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def dns_teardown(created_instance)
|
||||||
|
dns_zone = dns.zone(dns_zone_resource_name) if dns_zone_resource_name
|
||||||
|
return unless dns_zone && created_instance
|
||||||
|
|
||||||
|
name = created_instance['name']
|
||||||
|
change = dns_zone.remove(name, 'A')
|
||||||
|
debug_logger("#{change.id} - #{change.started_at} - #{change.status} DNS address removed") if change
|
||||||
|
end
|
||||||
|
|
||||||
|
def should_be_ignored(item, allowlist)
|
||||||
|
return false if allowlist.nil?
|
||||||
|
|
||||||
|
allowlist.map!(&:downcase) # remove uppercase from configured values because its not valid as resource label
|
||||||
|
array_flattened_labels = []
|
||||||
|
item.labels&.each do |k, v|
|
||||||
|
array_flattened_labels << "#{k}=#{v}"
|
||||||
|
end
|
||||||
|
(!item.labels.nil? && allowlist&.include?(item.labels['pool'])) || # the allow list specifies the value within the pool label
|
||||||
|
(allowlist&.include?('') && !item.labels&.keys&.include?('pool')) || # the allow list specifies "" string and the pool label is not set
|
||||||
|
!(allowlist & array_flattened_labels).empty? # the allow list specify a fully qualified label eg user=Bob and the item has it
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_current_user(vm_name)
|
||||||
|
@redis.with_metrics do |redis|
|
||||||
|
user = redis.hget("vmpooler__vm__#{vm_name}", 'token:user')
|
||||||
|
return '' if user.nil?
|
||||||
|
|
||||||
|
# cleanup so it's a valid label value
|
||||||
|
# can't have upercase
|
||||||
|
user = user.downcase
|
||||||
|
# replace invalid chars with dash
|
||||||
|
user = user.gsub(/[^0-9a-z_-]/, '-')
|
||||||
|
return user
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Compute resource wait for operation to be DONE (synchronous operation)
|
||||||
|
def wait_for_zone_operation(project, zone, result, retries = 5)
|
||||||
|
while result.status != 'DONE'
|
||||||
|
result = connection.wait_zone_operation(project, zone, result.name)
|
||||||
|
debug_logger(" -> wait_for_zone_operation status #{result.status} (#{result.name})")
|
||||||
|
end
|
||||||
|
if result.error # unsure what kind of error can be stored here
|
||||||
|
error_message = ''
|
||||||
|
# array of errors, combine them all
|
||||||
|
result.error.errors.each do |error|
|
||||||
|
error_message = "#{error_message} #{error.code}:#{error.message}"
|
||||||
|
end
|
||||||
|
raise "Operation: #{result.description} failed with error: #{error_message}"
|
||||||
|
end
|
||||||
|
result
|
||||||
|
rescue Google::Apis::TransmissionError => e
|
||||||
|
# Error returned once timeout reached, each retry typically about 1 minute.
|
||||||
|
if retries.positive?
|
||||||
|
retries -= 1
|
||||||
|
retry
|
||||||
|
end
|
||||||
|
raise
|
||||||
|
rescue Google::Apis::ClientError => e
|
||||||
|
raise e unless e.status_code == 404
|
||||||
|
|
||||||
|
# if the operation is not found, and we are 'waiting' on it, it might be because it
|
||||||
|
# is already finished
|
||||||
|
puts "waited on #{result.name} but was not found, so skipping"
|
||||||
|
end
|
||||||
|
|
||||||
|
def wait_for_operation(project, pool_name, result, retries = 5)
|
||||||
|
wait_for_zone_operation(project, zone(pool_name), result, retries)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Return a hash of VM data
|
||||||
|
# Provides vmname, hostname, template, poolname, boottime, status, zone, machine_type, labels, label_fingerprint, ip information
|
||||||
|
def generate_vm_hash(vm_object, pool_name)
|
||||||
|
pool_configuration = pool_config(pool_name)
|
||||||
|
return nil if pool_configuration.nil?
|
||||||
|
|
||||||
|
{
|
||||||
|
'name' => vm_object.name,
|
||||||
|
'hostname' => vm_object.hostname,
|
||||||
|
'template' => pool_configuration&.key?('template') ? pool_configuration['template'] : nil, # was expecting to get it from API, not from config, but this is what vSphere does too!
|
||||||
|
'poolname' => vm_object.labels&.key?('pool') ? vm_object.labels['pool'] : nil,
|
||||||
|
'boottime' => vm_object.creation_timestamp,
|
||||||
|
'status' => vm_object.status, # One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED
|
||||||
|
'zone' => vm_object.zone,
|
||||||
|
'machine_type' => vm_object.machine_type,
|
||||||
|
'labels' => vm_object.labels,
|
||||||
|
'label_fingerprint' => vm_object.label_fingerprint,
|
||||||
|
'ip' => vm_object.network_interfaces ? vm_object.network_interfaces.first.network_ip : nil
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
def ensured_aws_connection(connection_pool_object)
|
||||||
|
connection_pool_object[:connection] = connect_to_gce unless connection_pool_object[:connection]
|
||||||
|
connection_pool_object[:connection]
|
||||||
|
end
|
||||||
|
|
||||||
|
def connect_to_aws
|
||||||
|
max_tries = global_config[:config]['max_tries'] || 3
|
||||||
|
retry_factor = global_config[:config]['retry_factor'] || 10
|
||||||
|
try = 1
|
||||||
|
begin
|
||||||
|
scopes = ['https://www.googleapis.com/auth/compute', 'https://www.googleapis.com/auth/cloud-platform']
|
||||||
|
|
||||||
|
authorization = Google::Auth.get_application_default(scopes)
|
||||||
|
|
||||||
|
compute = ::Google::Apis::ComputeV1::ComputeService.new
|
||||||
|
compute.authorization = authorization
|
||||||
|
|
||||||
|
metrics.increment('connect.open')
|
||||||
|
compute
|
||||||
|
rescue StandardError => e # is that even a thing?
|
||||||
|
metrics.increment('connect.fail')
|
||||||
|
raise e if try >= max_tries
|
||||||
|
|
||||||
|
sleep(try * retry_factor)
|
||||||
|
try += 1
|
||||||
|
retry
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# This should supercede the open_socket method in the Pool Manager
|
||||||
|
def open_socket(host, domain = nil, timeout = 5, port = 22, &_block)
|
||||||
|
Timeout.timeout(timeout) do
|
||||||
|
target_host = host
|
||||||
|
target_host = "#{host}.#{domain}" if domain
|
||||||
|
sock = TCPSocket.new target_host, port
|
||||||
|
begin
|
||||||
|
yield sock if block_given?
|
||||||
|
ensure
|
||||||
|
sock.close
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# this is used because for one vm, with the same snapshot name there could be multiple snapshots,
|
||||||
|
# one for each disk
|
||||||
|
def find_snapshot(vm_name, snapshotname)
|
||||||
|
filter = "(labels.vm = #{vm_name}) AND (labels.snapshot_name = #{snapshotname})"
|
||||||
|
snapshot_list = connection.list_snapshots(project, filter: filter)
|
||||||
|
snapshot_list.items # array of snapshot objects
|
||||||
|
end
|
||||||
|
|
||||||
|
# find all snapshots ever created for one vm,
|
||||||
|
# regardless of snapshot name, for example when deleting it all
|
||||||
|
def find_all_snapshots(vm_name)
|
||||||
|
filter = "(labels.vm = #{vm_name})"
|
||||||
|
snapshot_list = connection.list_snapshots(project, filter: filter)
|
||||||
|
snapshot_list.items # array of snapshot objects
|
||||||
|
end
|
||||||
|
|
||||||
|
def disk_name_from_source(attached_disk)
|
||||||
|
attached_disk.source.split('/')[-1] # disk name is after the last / of the full source URL
|
||||||
|
end
|
||||||
|
|
||||||
|
# used in local dev environment, set DEBUG_FLAG=true
|
||||||
|
# this way the upstream vmpooler manager does not get polluted with logs
|
||||||
|
def debug_logger(message, send_to_upstream: false)
|
||||||
|
# the default logger is simple and does not enforce debug levels (the first argument)
|
||||||
|
puts message if ENV['DEBUG_FLAG']
|
||||||
|
logger.log('[g]', message) if send_to_upstream
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
153
spec/helpers.rb
Normal file
153
spec/helpers.rb
Normal file
|
|
@ -0,0 +1,153 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require 'mock_redis'
|
||||||
|
|
||||||
|
def redis
|
||||||
|
@redis ||= MockRedis.new
|
||||||
|
@redis
|
||||||
|
end
|
||||||
|
|
||||||
|
# Mock an object which represents a Logger. This stops the proliferation
|
||||||
|
# of allow(logger).to .... expectations in tests.
|
||||||
|
class MockLogger
|
||||||
|
def log(_level, string); end
|
||||||
|
end
|
||||||
|
|
||||||
|
def expect_json(ok = true, http = 200)
|
||||||
|
expect(last_response.header['Content-Type']).to eq('application/json')
|
||||||
|
|
||||||
|
if ok == true
|
||||||
|
expect(JSON.parse(last_response.body)['ok']).to eq(true)
|
||||||
|
else
|
||||||
|
expect(JSON.parse(last_response.body)['ok']).to eq(false)
|
||||||
|
end
|
||||||
|
|
||||||
|
expect(last_response.status).to eq(http)
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_token(token, user, timestamp)
|
||||||
|
redis.hset("vmpooler__token__#{token}", 'user', user)
|
||||||
|
redis.hset("vmpooler__token__#{token}", 'created', timestamp)
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_token_data(token)
|
||||||
|
redis.hgetall("vmpooler__token__#{token}")
|
||||||
|
end
|
||||||
|
|
||||||
|
def token_exists?(_token)
|
||||||
|
result = get_token_data
|
||||||
|
result && !result.empty?
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_ready_vm(template, name, redis, token = nil)
|
||||||
|
create_vm(name, redis, token)
|
||||||
|
redis.sadd("vmpooler__ready__#{template}", name)
|
||||||
|
redis.hset("vmpooler__vm__#{name}", 'template', template)
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_running_vm(template, name, redis, token = nil, user = nil)
|
||||||
|
create_vm(name, redis, token, user)
|
||||||
|
redis.sadd("vmpooler__running__#{template}", name)
|
||||||
|
redis.hset("vmpooler__vm__#{name}", 'template', template)
|
||||||
|
redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
|
||||||
|
redis.hset("vmpooler__vm__#{name}", 'host', 'host1')
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_pending_vm(template, name, redis, token = nil)
|
||||||
|
create_vm(name, redis, token)
|
||||||
|
redis.sadd("vmpooler__pending__#{template}", name)
|
||||||
|
redis.hset("vmpooler__vm__#{name}", 'template', template)
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_vm(name, redis, token = nil, user = nil)
|
||||||
|
redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
|
||||||
|
redis.hset("vmpooler__vm__#{name}", 'clone', Time.now)
|
||||||
|
redis.hset("vmpooler__vm__#{name}", 'token:token', token) if token
|
||||||
|
redis.hset("vmpooler__vm__#{name}", 'token:user', user) if user
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_completed_vm(name, pool, redis, active = false)
|
||||||
|
redis.sadd("vmpooler__completed__#{pool}", name)
|
||||||
|
redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
|
||||||
|
redis.hset("vmpooler__active__#{pool}", name, Time.now) if active
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_discovered_vm(name, pool, redis)
|
||||||
|
redis.sadd("vmpooler__discovered__#{pool}", name)
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_migrating_vm(name, pool, redis)
|
||||||
|
redis.hset("vmpooler__vm__#{name}", 'checkout', Time.now)
|
||||||
|
redis.sadd("vmpooler__migrating__#{pool}", name)
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_tag(vm, tag_name, tag_value, redis)
|
||||||
|
redis.hset("vmpooler__vm__#{vm}", "tag:#{tag_name}", tag_value)
|
||||||
|
end
|
||||||
|
|
||||||
|
def add_vm_to_migration_set(name, redis)
|
||||||
|
redis.sadd('vmpooler__migration', name)
|
||||||
|
end
|
||||||
|
|
||||||
|
def fetch_vm(vm)
|
||||||
|
redis.hgetall("vmpooler__vm__#{vm}")
|
||||||
|
end
|
||||||
|
|
||||||
|
def set_vm_data(vm, key, value, redis)
|
||||||
|
redis.hset("vmpooler__vm__#{vm}", key, value)
|
||||||
|
end
|
||||||
|
|
||||||
|
def snapshot_revert_vm(vm, snapshot = '12345678901234567890123456789012', redis)
|
||||||
|
redis.sadd('vmpooler__tasks__snapshot-revert', "#{vm}:#{snapshot}")
|
||||||
|
redis.hset("vmpooler__vm__#{vm}", "snapshot:#{snapshot}", '1')
|
||||||
|
end
|
||||||
|
|
||||||
|
def snapshot_vm(vm, snapshot = '12345678901234567890123456789012', redis)
|
||||||
|
redis.sadd('vmpooler__tasks__snapshot', "#{vm}:#{snapshot}")
|
||||||
|
redis.hset("vmpooler__vm__#{vm}", "snapshot:#{snapshot}", '1')
|
||||||
|
end
|
||||||
|
|
||||||
|
def disk_task_vm(vm, disk_size = '10', redis)
|
||||||
|
redis.sadd('vmpooler__tasks__disk', "#{vm}:#{disk_size}")
|
||||||
|
end
|
||||||
|
|
||||||
|
def has_vm_snapshot?(vm, redis)
|
||||||
|
redis.smembers('vmpooler__tasks__snapshot').any? do |snapshot|
|
||||||
|
instance, _sha = snapshot.split(':')
|
||||||
|
vm == instance
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def vm_reverted_to_snapshot?(vm, redis, snapshot = nil)
|
||||||
|
redis.smembers('vmpooler__tasks__snapshot-revert').any? do |action|
|
||||||
|
instance, sha = action.split(':')
|
||||||
|
instance == vm and (snapshot ? (sha == snapshot) : true)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def pool_has_ready_vm?(pool, vm, redis)
|
||||||
|
!!redis.sismember("vmpooler__ready__#{pool}", vm)
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_ondemand_request_for_test(request_id, score, platforms_string, redis, user = nil, token = nil)
|
||||||
|
redis.zadd('vmpooler__provisioning__request', score, request_id)
|
||||||
|
redis.hset("vmpooler__odrequest__#{request_id}", 'requested', platforms_string)
|
||||||
|
redis.hset("vmpooler__odrequest__#{request_id}", 'token:token', token) if token
|
||||||
|
redis.hset("vmpooler__odrequest__#{request_id}", 'token:user', user) if user
|
||||||
|
end
|
||||||
|
|
||||||
|
def set_ondemand_request_status(request_id, status, redis)
|
||||||
|
redis.hset("vmpooler__odrequest__#{request_id}", 'status', status)
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_ondemand_vm(vmname, request_id, pool, pool_alias, redis)
|
||||||
|
redis.sadd("vmpooler__#{request_id}__#{pool_alias}__#{pool}", vmname)
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_ondemand_creationtask(request_string, score, redis)
|
||||||
|
redis.zadd('vmpooler__odcreate__task', score, request_string)
|
||||||
|
end
|
||||||
|
|
||||||
|
def create_ondemand_processing(request_id, score, redis)
|
||||||
|
redis.zadd('vmpooler__provisioning__processing', score, request_id)
|
||||||
|
end
|
||||||
19
spec/spec_helper.rb
Normal file
19
spec/spec_helper.rb
Normal file
|
|
@ -0,0 +1,19 @@
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require 'simplecov'
|
||||||
|
SimpleCov.start do
|
||||||
|
add_filter '/spec/'
|
||||||
|
end
|
||||||
|
require 'helpers'
|
||||||
|
require 'rspec'
|
||||||
|
require 'vmpooler'
|
||||||
|
require 'redis'
|
||||||
|
require 'vmpooler/metrics'
|
||||||
|
|
||||||
|
def project_root_dir
|
||||||
|
File.dirname(File.dirname(__FILE__))
|
||||||
|
end
|
||||||
|
|
||||||
|
def fixtures_dir
|
||||||
|
File.join(project_root_dir, 'spec', 'fixtures')
|
||||||
|
end
|
||||||
97
spec/unit/providers/aws_spec.rb
Normal file
97
spec/unit/providers/aws_spec.rb
Normal file
|
|
@ -0,0 +1,97 @@
|
||||||
|
require 'spec_helper'
|
||||||
|
require 'mock_redis'
|
||||||
|
require 'vmpooler/providers/aws'
|
||||||
|
|
||||||
|
RSpec::Matchers.define :relocation_spec_with_host do |value|
|
||||||
|
match { |actual| actual[:spec].host == value }
|
||||||
|
end
|
||||||
|
|
||||||
|
describe 'Vmpooler::PoolManager::Provider::Aws' do
|
||||||
|
let(:logger) { MockLogger.new }
|
||||||
|
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
|
||||||
|
let(:poolname) { 'debian-9' }
|
||||||
|
let(:provider_options) { { 'param' => 'value' } }
|
||||||
|
let(:project) { 'vmpooler-test' }
|
||||||
|
let(:zone) { 'us-west1-b' }
|
||||||
|
let(:config) { YAML.load(<<~EOT
|
||||||
|
---
|
||||||
|
:config:
|
||||||
|
max_tries: 3
|
||||||
|
retry_factor: 10
|
||||||
|
:providers:
|
||||||
|
:gce:
|
||||||
|
connection_pool_timeout: 1
|
||||||
|
project: '#{project}'
|
||||||
|
zone: '#{zone}'
|
||||||
|
network_name: global/networks/default
|
||||||
|
:pools:
|
||||||
|
- name: '#{poolname}'
|
||||||
|
alias: [ 'mockpool' ]
|
||||||
|
template: 'projects/debian-cloud/global/images/family/debian-9'
|
||||||
|
size: 5
|
||||||
|
timeout: 10
|
||||||
|
ready_ttl: 1440
|
||||||
|
provider: 'gce'
|
||||||
|
machine_type: 'zones/#{zone}/machineTypes/e2-micro'
|
||||||
|
EOT
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
let(:vmname) { 'vm17' }
|
||||||
|
let(:connection) { MockComputeServiceConnection.new }
|
||||||
|
let(:redis_connection_pool) do
|
||||||
|
Vmpooler::PoolManager::GenericConnectionPool.new(
|
||||||
|
metrics: metrics,
|
||||||
|
connpool_type: 'redis_connection_pool',
|
||||||
|
connpool_provider: 'testprovider',
|
||||||
|
size: 1,
|
||||||
|
timeout: 5
|
||||||
|
) { MockRedis.new }
|
||||||
|
end
|
||||||
|
|
||||||
|
subject { Vmpooler::PoolManager::Provider::Aws.new(config, logger, metrics, redis_connection_pool, 'aws', provider_options) }
|
||||||
|
|
||||||
|
before(:each) { allow(subject).to receive(:dns).and_return(MockDNS.new()) }
|
||||||
|
|
||||||
|
describe '#manual tests live' do
|
||||||
|
context 'in itsysops' do
|
||||||
|
before(:each) { allow(subject).to receive(:dns).and_call_original }
|
||||||
|
let(:vmname) { "instance-24" }
|
||||||
|
let(:project) { 'vmpooler-test' }
|
||||||
|
let(:config) { YAML.load(<<~EOT
|
||||||
|
---
|
||||||
|
:config:
|
||||||
|
max_tries: 3
|
||||||
|
retry_factor: 10
|
||||||
|
:providers:
|
||||||
|
:gce:
|
||||||
|
connection_pool_timeout: 1
|
||||||
|
project: '#{project}'
|
||||||
|
zone: '#{zone}'
|
||||||
|
network_name: 'projects/itsysopsnetworking/global/networks/shared1'
|
||||||
|
dns_zone_resource_name: 'test-vmpooler-puppet-net'
|
||||||
|
domain: 'test.vmpooler.puppet.net'
|
||||||
|
:pools:
|
||||||
|
- name: '#{poolname}'
|
||||||
|
alias: [ 'mockpool' ]
|
||||||
|
template: 'projects/debian-cloud/global/images/family/debian-9'
|
||||||
|
size: 5
|
||||||
|
timeout: 10
|
||||||
|
ready_ttl: 1440
|
||||||
|
provider: 'gce'
|
||||||
|
subnetwork_name: 'projects/itsysopsnetworking/regions/us-west1/subnetworks/vmpooler-test'
|
||||||
|
machine_type: 'zones/#{zone}/machineTypes/e2-micro'
|
||||||
|
EOT
|
||||||
|
) }
|
||||||
|
it 'gets a vm' do
|
||||||
|
result = subject.create_vm(poolname, vmname)
|
||||||
|
#result = subject.destroy_vm(poolname, vmname)
|
||||||
|
subject.get_vm(poolname, vmname)
|
||||||
|
#subject.dns_teardown({'name' => vmname})
|
||||||
|
# subject.dns_setup({'name' => vmname, 'ip' => '1.2.3.5'})
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
end
|
||||||
767
spec/unit/providers/gce_spec.rb
Normal file
767
spec/unit/providers/gce_spec.rb
Normal file
|
|
@ -0,0 +1,767 @@
|
||||||
|
require 'spec_helper'
|
||||||
|
require 'mock_redis'
|
||||||
|
require 'vmpooler/providers/gce'
|
||||||
|
|
||||||
|
RSpec::Matchers.define :relocation_spec_with_host do |value|
|
||||||
|
match { |actual| actual[:spec].host == value }
|
||||||
|
end
|
||||||
|
|
||||||
|
describe 'Vmpooler::PoolManager::Provider::Gce' do
|
||||||
|
let(:logger) { MockLogger.new }
|
||||||
|
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
|
||||||
|
let(:poolname) { 'debian-9' }
|
||||||
|
let(:provider_options) { { 'param' => 'value' } }
|
||||||
|
let(:project) { 'vmpooler-test' }
|
||||||
|
let(:zone) { 'us-west1-b' }
|
||||||
|
let(:config) { YAML.load(<<~EOT
|
||||||
|
---
|
||||||
|
:config:
|
||||||
|
max_tries: 3
|
||||||
|
retry_factor: 10
|
||||||
|
:providers:
|
||||||
|
:gce:
|
||||||
|
connection_pool_timeout: 1
|
||||||
|
project: '#{project}'
|
||||||
|
zone: '#{zone}'
|
||||||
|
network_name: global/networks/default
|
||||||
|
:pools:
|
||||||
|
- name: '#{poolname}'
|
||||||
|
alias: [ 'mockpool' ]
|
||||||
|
template: 'projects/debian-cloud/global/images/family/debian-9'
|
||||||
|
size: 5
|
||||||
|
timeout: 10
|
||||||
|
ready_ttl: 1440
|
||||||
|
provider: 'gce'
|
||||||
|
machine_type: 'zones/#{zone}/machineTypes/e2-micro'
|
||||||
|
EOT
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
let(:vmname) { 'vm17' }
|
||||||
|
let(:connection) { MockComputeServiceConnection.new }
|
||||||
|
let(:redis_connection_pool) do
|
||||||
|
Vmpooler::PoolManager::GenericConnectionPool.new(
|
||||||
|
metrics: metrics,
|
||||||
|
connpool_type: 'redis_connection_pool',
|
||||||
|
connpool_provider: 'testprovider',
|
||||||
|
size: 1,
|
||||||
|
timeout: 5
|
||||||
|
) { MockRedis.new }
|
||||||
|
end
|
||||||
|
|
||||||
|
subject { Vmpooler::PoolManager::Provider::Gce.new(config, logger, metrics, redis_connection_pool, 'gce', provider_options) }
|
||||||
|
|
||||||
|
before(:each) { allow(subject).to receive(:dns).and_return(MockDNS.new()) }
|
||||||
|
|
||||||
|
describe '#name' do
|
||||||
|
it 'should be gce' do
|
||||||
|
expect(subject.name).to eq('gce')
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#manual tests live' do
|
||||||
|
context 'in itsysops' do
|
||||||
|
before(:each) { allow(subject).to receive(:dns).and_call_original }
|
||||||
|
let(:vmname) { "instance-24" }
|
||||||
|
let(:project) { 'vmpooler-test' }
|
||||||
|
let(:config) { YAML.load(<<~EOT
|
||||||
|
---
|
||||||
|
:config:
|
||||||
|
max_tries: 3
|
||||||
|
retry_factor: 10
|
||||||
|
:providers:
|
||||||
|
:gce:
|
||||||
|
connection_pool_timeout: 1
|
||||||
|
project: '#{project}'
|
||||||
|
zone: '#{zone}'
|
||||||
|
network_name: 'projects/itsysopsnetworking/global/networks/shared1'
|
||||||
|
dns_zone_resource_name: 'test-vmpooler-puppet-net'
|
||||||
|
domain: 'test.vmpooler.puppet.net'
|
||||||
|
:pools:
|
||||||
|
- name: '#{poolname}'
|
||||||
|
alias: [ 'mockpool' ]
|
||||||
|
template: 'projects/debian-cloud/global/images/family/debian-9'
|
||||||
|
size: 5
|
||||||
|
timeout: 10
|
||||||
|
ready_ttl: 1440
|
||||||
|
provider: 'gce'
|
||||||
|
subnetwork_name: 'projects/itsysopsnetworking/regions/us-west1/subnetworks/vmpooler-test'
|
||||||
|
machine_type: 'zones/#{zone}/machineTypes/e2-micro'
|
||||||
|
EOT
|
||||||
|
) }
|
||||||
|
skip 'gets a vm' do
|
||||||
|
result = subject.create_vm(poolname, vmname)
|
||||||
|
#result = subject.destroy_vm(poolname, vmname)
|
||||||
|
subject.get_vm(poolname, vmname)
|
||||||
|
#subject.dns_teardown({'name' => vmname})
|
||||||
|
# subject.dns_setup({'name' => vmname, 'ip' => '1.2.3.5'})
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#vms_in_pool' do
|
||||||
|
let(:pool_config) { config[:pools][0] }
|
||||||
|
|
||||||
|
before(:each) do
|
||||||
|
allow(subject).to receive(:connect_to_gce).and_return(connection)
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'Given an empty pool folder' do
|
||||||
|
it 'should return an empty array' do
|
||||||
|
instance_list = MockInstanceList.new(items: nil)
|
||||||
|
allow(connection).to receive(:list_instances).and_return(instance_list)
|
||||||
|
result = subject.vms_in_pool(poolname)
|
||||||
|
|
||||||
|
expect(result).to eq([])
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'Given a pool folder with many VMs' do
|
||||||
|
let(:expected_vm_list) do
|
||||||
|
[
|
||||||
|
{ 'name' => 'vm1' },
|
||||||
|
{ 'name' => 'vm2' },
|
||||||
|
{ 'name' => 'vm3' }
|
||||||
|
]
|
||||||
|
end
|
||||||
|
before(:each) do
|
||||||
|
instance_list = MockInstanceList.new(items: [])
|
||||||
|
expected_vm_list.each do |vm_hash|
|
||||||
|
mock_vm = MockInstance.new(name: vm_hash['name'])
|
||||||
|
instance_list.items << mock_vm
|
||||||
|
end
|
||||||
|
|
||||||
|
expect(connection).to receive(:list_instances).and_return(instance_list)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should list all VMs in the VM folder for the pool' do
|
||||||
|
result = subject.vms_in_pool(poolname)
|
||||||
|
|
||||||
|
expect(result).to eq(expected_vm_list)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#get_vm' do
|
||||||
|
before(:each) do
|
||||||
|
allow(subject).to receive(:connect_to_gce).and_return(connection)
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when VM does not exist' do
|
||||||
|
it 'should return nil' do
|
||||||
|
allow(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found"))
|
||||||
|
expect(subject.get_vm(poolname, vmname)).to be_nil
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when VM exists but is missing information' do
|
||||||
|
before(:each) do
|
||||||
|
allow(connection).to receive(:get_instance).and_return(MockInstance.new(name: vmname))
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return a hash' do
|
||||||
|
expect(subject.get_vm(poolname, vmname)).to be_kind_of(Hash)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return the VM name' do
|
||||||
|
result = subject.get_vm(poolname, vmname)
|
||||||
|
|
||||||
|
expect(result['name']).to eq(vmname)
|
||||||
|
end
|
||||||
|
|
||||||
|
%w[hostname boottime zone status].each do |testcase|
|
||||||
|
it "should return nil for #{testcase}" do
|
||||||
|
result = subject.get_vm(poolname, vmname)
|
||||||
|
|
||||||
|
expect(result[testcase]).to be_nil
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when VM exists and contains all information' do
|
||||||
|
let(:vm_hostname) { "#{vmname}.demo.local" }
|
||||||
|
let(:boot_time) { Time.now }
|
||||||
|
let(:vm_object) do
|
||||||
|
MockInstance.new(
|
||||||
|
name: vmname,
|
||||||
|
hostname: vm_hostname,
|
||||||
|
labels: { 'pool' => poolname },
|
||||||
|
creation_timestamp: boot_time,
|
||||||
|
status: 'RUNNING',
|
||||||
|
zone: zone,
|
||||||
|
machine_type: "zones/#{zone}/machineTypes/e2-micro"
|
||||||
|
)
|
||||||
|
end
|
||||||
|
let(:pool_info) { config[:pools][0] }
|
||||||
|
|
||||||
|
before(:each) do
|
||||||
|
allow(connection).to receive(:get_instance).and_return(vm_object)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return a hash' do
|
||||||
|
expect(subject.get_vm(poolname, vmname)).to be_kind_of(Hash)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return the VM name' do
|
||||||
|
result = subject.get_vm(poolname, vmname)
|
||||||
|
|
||||||
|
expect(result['name']).to eq(vmname)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return the VM hostname' do
|
||||||
|
result = subject.get_vm(poolname, vmname)
|
||||||
|
|
||||||
|
expect(result['hostname']).to eq(vm_hostname)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return the template name' do
|
||||||
|
result = subject.get_vm(poolname, vmname)
|
||||||
|
|
||||||
|
expect(result['template']).to eq(pool_info['template'])
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return the pool name' do
|
||||||
|
result = subject.get_vm(poolname, vmname)
|
||||||
|
|
||||||
|
expect(result['poolname']).to eq(pool_info['name'])
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return the boot time' do
|
||||||
|
result = subject.get_vm(poolname, vmname)
|
||||||
|
|
||||||
|
expect(result['boottime']).to eq(boot_time)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#create_vm' do
|
||||||
|
before(:each) do
|
||||||
|
allow(subject).to receive(:connect_to_gce).and_return(connection)
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'Given an invalid pool name' do
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect { subject.create_vm('missing_pool', vmname) }.to raise_error(/missing_pool does not exist/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'Given a template VM that does not exist' do
|
||||||
|
before(:each) do
|
||||||
|
config[:pools][0]['template'] = 'Templates/missing_template'
|
||||||
|
# result = MockResult.new
|
||||||
|
# result.status = "PENDING"
|
||||||
|
# errors = MockOperationError
|
||||||
|
# errors << MockOperationErrorError.new(code: "foo", message: "it's missing")
|
||||||
|
# result.error = errors
|
||||||
|
allow(connection).to receive(:insert_instance).and_raise(create_google_client_error(404, 'The resource \'Templates/missing_template\' was not found'))
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect { subject.create_vm(poolname, vmname) }.to raise_error(Google::Apis::ClientError)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'Given a successful creation' do
|
||||||
|
before(:each) do
|
||||||
|
result = MockResult.new
|
||||||
|
result.status = 'DONE'
|
||||||
|
allow(connection).to receive(:insert_instance).and_return(result)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return a hash' do
|
||||||
|
allow(connection).to receive(:get_instance).and_return(MockInstance.new)
|
||||||
|
result = subject.create_vm(poolname, vmname)
|
||||||
|
|
||||||
|
expect(result.is_a?(Hash)).to be true
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should have the new VM name' do
|
||||||
|
instance = MockInstance.new(name: vmname)
|
||||||
|
allow(connection).to receive(:get_instance).and_return(instance)
|
||||||
|
result = subject.create_vm(poolname, vmname)
|
||||||
|
|
||||||
|
expect(result['name']).to eq(vmname)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#destroy_vm' do
|
||||||
|
before(:each) do
|
||||||
|
allow(subject).to receive(:connect_to_gce).and_return(connection)
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'Given a missing VM name' do
|
||||||
|
before(:each) do
|
||||||
|
allow(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found"))
|
||||||
|
disk_list = MockDiskList.new(items: nil)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
allow(subject).to receive(:find_all_snapshots).and_return(nil)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true' do
|
||||||
|
expect(connection.should_receive(:delete_instance).never)
|
||||||
|
expect(subject.destroy_vm(poolname, 'missing_vm')).to be true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'Given a running VM' do
|
||||||
|
before(:each) do
|
||||||
|
instance = MockInstance.new(name: vmname)
|
||||||
|
allow(connection).to receive(:get_instance).and_return(instance)
|
||||||
|
result = MockResult.new
|
||||||
|
result.status = 'DONE'
|
||||||
|
allow(subject).to receive(:wait_for_operation).and_return(result)
|
||||||
|
allow(connection).to receive(:delete_instance).and_return(result)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true' do
|
||||||
|
# no dangling disks
|
||||||
|
disk_list = MockDiskList.new(items: nil)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
# no dangling snapshots
|
||||||
|
allow(subject).to receive(:find_all_snapshots).and_return(nil)
|
||||||
|
expect(subject.destroy_vm(poolname, vmname)).to be true
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should delete any dangling disks' do
|
||||||
|
disk = MockDisk.new(name: vmname)
|
||||||
|
disk_list = MockDiskList.new(items: [disk])
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
# no dangling snapshots
|
||||||
|
allow(subject).to receive(:find_all_snapshots).and_return(nil)
|
||||||
|
expect(connection).to receive(:delete_disk).with(project, zone, disk.name)
|
||||||
|
subject.destroy_vm(poolname, vmname)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should delete any dangling snapshots' do
|
||||||
|
# no dangling disks
|
||||||
|
disk_list = MockDiskList.new(items: nil)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
snapshot = MockSnapshot.new(name: "snapshotname-#{vmname}")
|
||||||
|
allow(subject).to receive(:find_all_snapshots).and_return([snapshot])
|
||||||
|
expect(connection).to receive(:delete_snapshot).with(project, snapshot.name)
|
||||||
|
subject.destroy_vm(poolname, vmname)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#vm_ready?' do
|
||||||
|
let(:domain) { nil }
|
||||||
|
context 'When a VM is ready' do
|
||||||
|
before(:each) do
|
||||||
|
expect(subject).to receive(:open_socket).with(vmname, domain)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true' do
|
||||||
|
expect(subject.vm_ready?(poolname, vmname)).to be true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'When an error occurs connecting to the VM' do
|
||||||
|
before(:each) do
|
||||||
|
expect(subject).to receive(:open_socket).and_raise(RuntimeError, 'MockError')
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return false' do
|
||||||
|
expect(subject.vm_ready?(poolname, vmname)).to be false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#create_disk' do
|
||||||
|
let(:disk_size) { 10 }
|
||||||
|
before(:each) do
|
||||||
|
allow(subject).to receive(:connect_to_gce).and_return(connection)
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'Given an invalid pool name' do
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect { subject.create_disk('missing_pool', vmname, disk_size) }.to raise_error(/missing_pool does not exist/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when VM does not exist' do
|
||||||
|
before(:each) do
|
||||||
|
expect(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found"))
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect { subject.create_disk(poolname, vmname, disk_size) }.to raise_error(/VM #{vmname} .+ does not exist/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when adding the disk raises an error' do
|
||||||
|
before(:each) do
|
||||||
|
disk = MockDisk.new(name: vmname)
|
||||||
|
instance = MockInstance.new(name: vmname, disks: [disk])
|
||||||
|
allow(connection).to receive(:get_instance).and_return(instance)
|
||||||
|
expect(connection).to receive(:insert_disk).and_raise(RuntimeError, 'Mock Disk Error')
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect { subject.create_disk(poolname, vmname, disk_size) }.to raise_error(/Mock Disk Error/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when adding the disk succeeds' do
|
||||||
|
before(:each) do
|
||||||
|
disk = MockDisk.new(name: vmname)
|
||||||
|
instance = MockInstance.new(name: vmname, disks: [disk])
|
||||||
|
allow(connection).to receive(:get_instance).and_return(instance)
|
||||||
|
result = MockResult.new
|
||||||
|
result.status = 'DONE'
|
||||||
|
allow(connection).to receive(:insert_disk).and_return(result)
|
||||||
|
allow(subject).to receive(:wait_for_operation).and_return(result)
|
||||||
|
new_disk = MockDisk.new(name: "#{vmname}-disk1", self_link: "/foo/bar/baz/#{vmname}-disk1")
|
||||||
|
allow(connection).to receive(:get_disk).and_return(new_disk)
|
||||||
|
allow(connection).to receive(:attach_disk).and_return(result)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true' do
|
||||||
|
expect(subject.create_disk(poolname, vmname, disk_size)).to be true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#create_snapshot' do
|
||||||
|
let(:snapshot_name) { 'snapshot' }
|
||||||
|
|
||||||
|
before(:each) do
|
||||||
|
allow(subject).to receive(:connect_to_gce).and_return(connection)
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when VM does not exist' do
|
||||||
|
before(:each) do
|
||||||
|
allow(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found"))
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect { subject.create_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/VM #{vmname} .+ does not exist/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when snapshot already exists' do
|
||||||
|
it 'should raise an error' do
|
||||||
|
disk = MockDisk.new(name: vmname)
|
||||||
|
instance = MockInstance.new(name: vmname, disks: [disk])
|
||||||
|
allow(connection).to receive(:get_instance).and_return(instance)
|
||||||
|
snapshots = [MockSnapshot.new(name: snapshot_name)]
|
||||||
|
allow(subject).to receive(:find_snapshot).and_return(snapshots)
|
||||||
|
expect { subject.create_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/Snapshot #{snapshot_name} .+ already exists /)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when snapshot raises an error' do
|
||||||
|
before(:each) do
|
||||||
|
attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}")
|
||||||
|
instance = MockInstance.new(name: vmname, disks: [attached_disk])
|
||||||
|
allow(connection).to receive(:get_instance).and_return(instance)
|
||||||
|
snapshots = nil
|
||||||
|
allow(subject).to receive(:find_snapshot).and_return(snapshots)
|
||||||
|
allow(connection).to receive(:create_disk_snapshot).and_raise(RuntimeError, 'Mock Snapshot Error')
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect { subject.create_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/Mock Snapshot Error/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when snapshot succeeds' do
|
||||||
|
before(:each) do
|
||||||
|
attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}")
|
||||||
|
instance = MockInstance.new(name: vmname, disks: [attached_disk])
|
||||||
|
allow(connection).to receive(:get_instance).and_return(instance)
|
||||||
|
snapshots = nil
|
||||||
|
allow(subject).to receive(:find_snapshot).and_return(snapshots)
|
||||||
|
result = MockResult.new
|
||||||
|
result.status = 'DONE'
|
||||||
|
allow(connection).to receive(:create_disk_snapshot).and_return(result)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true' do
|
||||||
|
expect(subject.create_snapshot(poolname, vmname, snapshot_name)).to be true
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should snapshot each attached disk' do
|
||||||
|
attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}")
|
||||||
|
attached_disk2 = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}-disk1")
|
||||||
|
instance = MockInstance.new(name: vmname, disks: [attached_disk, attached_disk2])
|
||||||
|
allow(connection).to receive(:get_instance).and_return(instance)
|
||||||
|
|
||||||
|
expect(connection.should_receive(:create_disk_snapshot).twice)
|
||||||
|
subject.create_snapshot(poolname, vmname, snapshot_name)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#revert_snapshot' do
|
||||||
|
let(:snapshot_name) { 'snapshot' }
|
||||||
|
|
||||||
|
before(:each) do
|
||||||
|
allow(subject).to receive(:connect_to_gce).and_return(connection)
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when VM does not exist' do
|
||||||
|
before(:each) do
|
||||||
|
allow(connection).to receive(:get_instance).and_raise(create_google_client_error(404, "The resource 'projects/#{project}/zones/#{zone}/instances/#{vmname}' was not found"))
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect { subject.revert_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/VM #{vmname} .+ does not exist/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when snapshot does not exist' do
|
||||||
|
it 'should raise an error' do
|
||||||
|
attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}")
|
||||||
|
instance = MockInstance.new(name: vmname, disks: [attached_disk])
|
||||||
|
allow(connection).to receive(:get_instance).and_return(instance)
|
||||||
|
snapshots = nil
|
||||||
|
allow(subject).to receive(:find_snapshot).and_return(snapshots)
|
||||||
|
expect { subject.revert_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/Snapshot #{snapshot_name} .+ does not exist /)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when instance does not have attached disks' do
|
||||||
|
it 'should skip detaching/deleting disk' do
|
||||||
|
instance = MockInstance.new(name: vmname, disks: nil)
|
||||||
|
allow(connection).to receive(:get_instance).and_return(instance)
|
||||||
|
snapshots = []
|
||||||
|
allow(subject).to receive(:find_snapshot).and_return(snapshots)
|
||||||
|
allow(connection).to receive(:stop_instance)
|
||||||
|
allow(subject).to receive(:wait_for_operation)
|
||||||
|
allow(connection).to receive(:start_instance)
|
||||||
|
expect(subject).not_to receive(:detach_disk)
|
||||||
|
expect(subject).not_to receive(:delete_disk)
|
||||||
|
subject.revert_snapshot(poolname, vmname, snapshot_name)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when revert to snapshot raises an error' do
|
||||||
|
before(:each) do
|
||||||
|
attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}")
|
||||||
|
instance = MockInstance.new(name: vmname, disks: [attached_disk])
|
||||||
|
allow(connection).to receive(:get_instance).and_return(instance)
|
||||||
|
snapshots = [MockSnapshot.new(name: snapshot_name)]
|
||||||
|
allow(subject).to receive(:find_snapshot).and_return(snapshots)
|
||||||
|
allow(connection).to receive(:stop_instance)
|
||||||
|
allow(subject).to receive(:wait_for_operation)
|
||||||
|
expect(connection).to receive(:detach_disk).and_raise(RuntimeError, 'Mock Snapshot Error')
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise an error' do
|
||||||
|
expect { subject.revert_snapshot(poolname, vmname, snapshot_name) }.to raise_error(/Mock Snapshot Error/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when revert to snapshot succeeds' do
|
||||||
|
before(:each) do
|
||||||
|
attached_disk = MockAttachedDisk.new(device_name: vmname, source: "foo/bar/baz/#{vmname}")
|
||||||
|
instance = MockInstance.new(name: vmname, disks: [attached_disk])
|
||||||
|
allow(connection).to receive(:get_instance).and_return(instance)
|
||||||
|
snapshots = [MockSnapshot.new(name: snapshot_name, self_link: "foo/bar/baz/snapshot/#{snapshot_name}", labels: { 'diskname' => vmname })]
|
||||||
|
allow(subject).to receive(:find_snapshot).and_return(snapshots)
|
||||||
|
allow(connection).to receive(:stop_instance)
|
||||||
|
allow(subject).to receive(:wait_for_operation)
|
||||||
|
allow(connection).to receive(:detach_disk)
|
||||||
|
allow(connection).to receive(:delete_disk)
|
||||||
|
new_disk = MockDisk.new(name: vmname, self_link: "foo/bar/baz/disk/#{vmname}")
|
||||||
|
allow(connection).to receive(:insert_disk)
|
||||||
|
allow(connection).to receive(:get_disk).and_return(new_disk)
|
||||||
|
allow(connection).to receive(:attach_disk)
|
||||||
|
allow(connection).to receive(:start_instance)
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should return true' do
|
||||||
|
expect(subject.revert_snapshot(poolname, vmname, snapshot_name)).to be true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#purge_unconfigured_resources' do
|
||||||
|
let(:empty_list) { [] }
|
||||||
|
|
||||||
|
before(:each) do
|
||||||
|
allow(subject).to receive(:connect_to_gce).and_return(connection)
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'with empty allowlist' do
|
||||||
|
before(:each) do
|
||||||
|
allow(subject).to receive(:wait_for_zone_operation)
|
||||||
|
end
|
||||||
|
it 'should attempt to delete unconfigured instances when they dont have a label' do
|
||||||
|
instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo')])
|
||||||
|
disk_list = MockDiskList.new(items: nil)
|
||||||
|
snapshot_list = MockSnapshotList.new(items: nil)
|
||||||
|
# the instance_list is filtered in the real code, and should only return non-configured VMs based on labels
|
||||||
|
# that do not match a real pool name
|
||||||
|
allow(connection).to receive(:list_instances).and_return(instance_list)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
allow(connection).to receive(:list_snapshots).and_return(snapshot_list)
|
||||||
|
expect(connection).to receive(:delete_instance)
|
||||||
|
subject.purge_unconfigured_resources(nil)
|
||||||
|
end
|
||||||
|
it 'should attempt to delete unconfigured instances when they have a label that is not a configured pool' do
|
||||||
|
instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'foobar' })])
|
||||||
|
disk_list = MockDiskList.new(items: nil)
|
||||||
|
snapshot_list = MockSnapshotList.new(items: nil)
|
||||||
|
allow(connection).to receive(:list_instances).and_return(instance_list)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
allow(connection).to receive(:list_snapshots).and_return(snapshot_list)
|
||||||
|
expect(connection).to receive(:delete_instance)
|
||||||
|
subject.purge_unconfigured_resources(nil)
|
||||||
|
end
|
||||||
|
it 'should attempt to delete unconfigured disks and snapshots when they do not have a label' do
|
||||||
|
instance_list = MockInstanceList.new(items: nil)
|
||||||
|
disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo')])
|
||||||
|
snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo')])
|
||||||
|
allow(connection).to receive(:list_instances).and_return(instance_list)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
allow(connection).to receive(:list_snapshots).and_return(snapshot_list)
|
||||||
|
expect(connection).to receive(:delete_disk)
|
||||||
|
expect(connection).to receive(:delete_snapshot)
|
||||||
|
subject.purge_unconfigured_resources(nil)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'with allowlist containing a pool name' do
|
||||||
|
before(:each) do
|
||||||
|
allow(subject).to receive(:wait_for_zone_operation)
|
||||||
|
$allowlist = ['allowed']
|
||||||
|
end
|
||||||
|
it 'should attempt to delete unconfigured instances when they dont have the allowlist label' do
|
||||||
|
instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'not_this' })])
|
||||||
|
disk_list = MockDiskList.new(items: nil)
|
||||||
|
snapshot_list = MockSnapshotList.new(items: nil)
|
||||||
|
allow(connection).to receive(:list_instances).and_return(instance_list)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
allow(connection).to receive(:list_snapshots).and_return(snapshot_list)
|
||||||
|
expect(connection).to receive(:delete_instance)
|
||||||
|
subject.purge_unconfigured_resources($allowlist)
|
||||||
|
end
|
||||||
|
it 'should ignore unconfigured instances when they have a label that is allowed' do
|
||||||
|
instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'allowed' })])
|
||||||
|
disk_list = MockDiskList.new(items: nil)
|
||||||
|
snapshot_list = MockSnapshotList.new(items: nil)
|
||||||
|
allow(connection).to receive(:list_instances).and_return(instance_list)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
allow(connection).to receive(:list_snapshots).and_return(snapshot_list)
|
||||||
|
expect(connection).not_to receive(:delete_instance)
|
||||||
|
subject.purge_unconfigured_resources($allowlist)
|
||||||
|
end
|
||||||
|
it 'should ignore unconfigured disks and snapshots when they have a label that is allowed' do
|
||||||
|
instance_list = MockInstanceList.new(items: nil)
|
||||||
|
disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'pool' => 'allowed' })])
|
||||||
|
snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo', labels: { 'pool' => 'allowed' })])
|
||||||
|
allow(connection).to receive(:list_instances).and_return(instance_list)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
allow(connection).to receive(:list_snapshots).and_return(snapshot_list)
|
||||||
|
expect(connection).not_to receive(:delete_disk)
|
||||||
|
expect(connection).not_to receive(:delete_snapshot)
|
||||||
|
subject.purge_unconfigured_resources($allowlist)
|
||||||
|
end
|
||||||
|
it 'should ignore unconfigured item when they have the empty label that is allowed, which means we allow the pool label to not be set' do
|
||||||
|
$allowlist = ['allowed', '']
|
||||||
|
instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'some' => 'not_important' })])
|
||||||
|
disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'other' => 'thing' })])
|
||||||
|
snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo')])
|
||||||
|
allow(connection).to receive(:list_instances).and_return(instance_list)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
allow(connection).to receive(:list_snapshots).and_return(snapshot_list)
|
||||||
|
expect(connection).not_to receive(:delete_instance)
|
||||||
|
expect(connection).not_to receive(:delete_disk)
|
||||||
|
expect(connection).not_to receive(:delete_snapshot)
|
||||||
|
subject.purge_unconfigured_resources($allowlist)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'with allowlist containing a pool name and the empty string' do
|
||||||
|
before(:each) do
|
||||||
|
allow(subject).to receive(:wait_for_zone_operation)
|
||||||
|
$allowlist = ['allowed', '']
|
||||||
|
end
|
||||||
|
it 'should attempt to delete unconfigured instances when they dont have the allowlist label' do
|
||||||
|
instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'not_this' })])
|
||||||
|
disk_list = MockDiskList.new(items: nil)
|
||||||
|
snapshot_list = MockSnapshotList.new(items: nil)
|
||||||
|
allow(connection).to receive(:list_instances).and_return(instance_list)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
allow(connection).to receive(:list_snapshots).and_return(snapshot_list)
|
||||||
|
expect(connection).to receive(:delete_instance)
|
||||||
|
subject.purge_unconfigured_resources($allowlist)
|
||||||
|
end
|
||||||
|
it 'should ignore unconfigured disks and snapshots when they have a label that is allowed' do
|
||||||
|
instance_list = MockInstanceList.new(items: nil)
|
||||||
|
disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'pool' => 'allowed' })])
|
||||||
|
snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo', labels: { 'pool' => 'allowed' })])
|
||||||
|
allow(connection).to receive(:list_instances).and_return(instance_list)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
allow(connection).to receive(:list_snapshots).and_return(snapshot_list)
|
||||||
|
expect(connection).not_to receive(:delete_disk)
|
||||||
|
expect(connection).not_to receive(:delete_snapshot)
|
||||||
|
subject.purge_unconfigured_resources($allowlist)
|
||||||
|
end
|
||||||
|
it 'should ignore unconfigured item when they have the empty label that is allowed, which means we allow the pool label to not be set' do
|
||||||
|
instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'some' => 'not_important' })])
|
||||||
|
disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'other' => 'thing' })])
|
||||||
|
snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo')])
|
||||||
|
allow(connection).to receive(:list_instances).and_return(instance_list)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
allow(connection).to receive(:list_snapshots).and_return(snapshot_list)
|
||||||
|
expect(connection).not_to receive(:delete_instance)
|
||||||
|
expect(connection).not_to receive(:delete_disk)
|
||||||
|
expect(connection).not_to receive(:delete_snapshot)
|
||||||
|
subject.purge_unconfigured_resources($allowlist)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'with allowlist containing a a fully qualified label that is not pool' do
|
||||||
|
before(:each) do
|
||||||
|
allow(subject).to receive(:wait_for_zone_operation)
|
||||||
|
$allowlist = ['user=Bob']
|
||||||
|
end
|
||||||
|
it 'should attempt to delete unconfigured instances when they dont have the allowlist label' do
|
||||||
|
instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'pool' => 'not_this' })])
|
||||||
|
disk_list = MockDiskList.new(items: nil)
|
||||||
|
snapshot_list = MockSnapshotList.new(items: nil)
|
||||||
|
allow(connection).to receive(:list_instances).and_return(instance_list)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
allow(connection).to receive(:list_snapshots).and_return(snapshot_list)
|
||||||
|
expect(connection).to receive(:delete_instance)
|
||||||
|
subject.purge_unconfigured_resources($allowlist)
|
||||||
|
end
|
||||||
|
it 'should ignore unconfigured item when they match the fully qualified label' do
|
||||||
|
instance_list = MockInstanceList.new(items: [MockInstance.new(name: 'foo', labels: { 'some' => 'not_important', 'user' => 'bob' })])
|
||||||
|
disk_list = MockDiskList.new(items: [MockDisk.new(name: 'diskfoo', labels: { 'other' => 'thing', 'user' => 'bob' })])
|
||||||
|
snapshot_list = MockSnapshotList.new(items: [MockSnapshot.new(name: 'snapfoo', labels: { 'user' => 'bob' })])
|
||||||
|
allow(connection).to receive(:list_instances).and_return(instance_list)
|
||||||
|
allow(connection).to receive(:list_disks).and_return(disk_list)
|
||||||
|
allow(connection).to receive(:list_snapshots).and_return(snapshot_list)
|
||||||
|
expect(connection).not_to receive(:delete_instance)
|
||||||
|
expect(connection).not_to receive(:delete_disk)
|
||||||
|
expect(connection).not_to receive(:delete_snapshot)
|
||||||
|
subject.purge_unconfigured_resources($allowlist)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'should raise any errors' do
|
||||||
|
expect(subject).to receive(:provided_pools).and_throw('mockerror')
|
||||||
|
expect { subject.purge_unconfigured_resources(nil) }.to raise_error(/mockerror/)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe '#get_current_user' do
|
||||||
|
it 'should downcase and replace invalid chars with dashes' do
|
||||||
|
redis_connection_pool.with_metrics do |redis|
|
||||||
|
redis.hset("vmpooler__vm__#{vmname}", 'token:user', 'BOBBY.PUPPET')
|
||||||
|
expect(subject.get_current_user(vmname)).to eq('bobby-puppet')
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'returns "" for nil values' do
|
||||||
|
redis_connection_pool.with_metrics do |_redis|
|
||||||
|
expect(subject.get_current_user(vmname)).to eq('')
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
9
spec/vmpooler-provider-aws/vmpooler_provider_aws_spec.rb
Normal file
9
spec/vmpooler-provider-aws/vmpooler_provider_aws_spec.rb
Normal file
|
|
@ -0,0 +1,9 @@
|
||||||
|
require 'rspec'
|
||||||
|
|
||||||
|
describe 'VmpoolerProviderGce' do
|
||||||
|
context 'when creating class ' do
|
||||||
|
it 'sets a version' do
|
||||||
|
expect(VmpoolerProviderGce::VERSION).not_to be_nil
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
32
vmpooler-provider-aws.gemspec
Normal file
32
vmpooler-provider-aws.gemspec
Normal file
|
|
@ -0,0 +1,32 @@
|
||||||
|
lib = File.expand_path('../lib', __FILE__)
|
||||||
|
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
|
||||||
|
require 'vmpooler-provider-aws/version'
|
||||||
|
|
||||||
|
Gem::Specification.new do |s|
|
||||||
|
s.name = 'vmpooler-provider-aws'
|
||||||
|
s.version = VmpoolerProviderAws::VERSION
|
||||||
|
s.authors = ['Puppet']
|
||||||
|
s.email = ['support@puppet.com']
|
||||||
|
|
||||||
|
s.summary = 'AWS provider for VMPooler'
|
||||||
|
s.homepage = 'https://github.com/puppetlabs/vmpooler-provider-aws'
|
||||||
|
s.license = 'Apache-2.0'
|
||||||
|
s.required_ruby_version = Gem::Requirement.new('>= 2.3.0')
|
||||||
|
|
||||||
|
s.files = Dir[ "lib/**/*" ]
|
||||||
|
s.require_paths = ["lib"]
|
||||||
|
s.add_dependency 'aws-sdk-ec2', '~> 1'
|
||||||
|
|
||||||
|
s.add_development_dependency 'vmpooler', '>= 1.3.0', '~> 2.3'
|
||||||
|
|
||||||
|
# Testing dependencies
|
||||||
|
s.add_development_dependency 'climate_control', '>= 0.2.0'
|
||||||
|
s.add_development_dependency 'mock_redis', '>= 0.17.0'
|
||||||
|
s.add_development_dependency 'pry'
|
||||||
|
s.add_development_dependency 'rack-test', '>= 0.6'
|
||||||
|
s.add_development_dependency 'rspec', '>= 3.2'
|
||||||
|
s.add_development_dependency 'rubocop', '~> 1.1.0'
|
||||||
|
s.add_development_dependency 'simplecov', '>= 0.11.2'
|
||||||
|
s.add_development_dependency 'thor', '~> 1.0', '>= 1.0.1'
|
||||||
|
s.add_development_dependency 'yarjuf', '>= 2.0'
|
||||||
|
end
|
||||||
160
vmpooler.yaml.example
Normal file
160
vmpooler.yaml.example
Normal file
|
|
@ -0,0 +1,160 @@
|
||||||
|
---
|
||||||
|
:providers:
|
||||||
|
# :providers:
|
||||||
|
#
|
||||||
|
# This section contains the VM providers for VMs and Pools
|
||||||
|
# The currently supported backing services are:
|
||||||
|
# - vsphere
|
||||||
|
# - dummy
|
||||||
|
# - gce
|
||||||
|
#
|
||||||
|
# - provider_class
|
||||||
|
# For multiple providers, specify one of the supported backing services (vsphere or dummy or gce)
|
||||||
|
# (optional: will default to it's parent :key: name eg. 'gce')
|
||||||
|
#
|
||||||
|
# - purge_unconfigured_resources
|
||||||
|
# Enable purging of VMs, disks and snapshots
|
||||||
|
# By default will purge resources in the project without a "pool" label, or a "pool" label with the value for an unconfigured pool
|
||||||
|
# An optional allowlist can be provided to ignore purging certain VMs based on pool labels
|
||||||
|
# Setting this on the provider will enable purging for the provider
|
||||||
|
# Expects a boolean value
|
||||||
|
# (optional; default: false)
|
||||||
|
#
|
||||||
|
# - resources_allowlist
|
||||||
|
# For GCE: Specify labels that should be ignored when purging VMs. For example if a VM's label is
|
||||||
|
# set to 'pool' with value 'donotdelete' and there is no pool with that name configured, it would normally be purged,
|
||||||
|
# unless you add a resources_allowlist "donotdelete" in which case it is ignored and not purged.
|
||||||
|
# Additionally the "" (empty string) has a special meaning whereas VMs that do not have the "pool" label are not purged.
|
||||||
|
# Additionally if you want to ignore VM's with an arbitrary label, include it in the allow list as a string with the separator "="
|
||||||
|
# between the label name and value eg user=bob would ignore VMs that include the label "user" with the value "bob"
|
||||||
|
# If any one of the above condition is met, the resource is ignored and not purged
|
||||||
|
# This option is only evaluated when 'purge_unconfigured_resources' is enabled
|
||||||
|
# Expects an array of strings specifying the allowlisted labels by name. The strings should be all lower case, since
|
||||||
|
# no uppercase char is allowed in a label
|
||||||
|
# (optional; default: nil)
|
||||||
|
#
|
||||||
|
# If you want to support more than one provider with different parameters you have to specify the
|
||||||
|
# backing service in the provider_class configuration parameter for example 'vsphere' or 'dummy'. Each pool can specify
|
||||||
|
# the provider to use.
|
||||||
|
#
|
||||||
|
# Multiple providers example:
|
||||||
|
|
||||||
|
:gce1:
|
||||||
|
provider_class: 'gce'
|
||||||
|
project: 'myproject'
|
||||||
|
zone: 'us-central1-f'
|
||||||
|
:gce2:
|
||||||
|
provider_class: 'gce'
|
||||||
|
project: 'myproject-foo'
|
||||||
|
zone: 'us-central1-f'
|
||||||
|
resources_allowlist:
|
||||||
|
- "user=bob"
|
||||||
|
- ""
|
||||||
|
- "custom-pool"
|
||||||
|
|
||||||
|
# :gce:
|
||||||
|
#
|
||||||
|
# This section contains the global variables for the gce provider
|
||||||
|
# some of them can be overwritten at the pool level
|
||||||
|
#
|
||||||
|
# Available configuration parameters:
|
||||||
|
#
|
||||||
|
# - project
|
||||||
|
# The GCE project name to use when creating/deleting resources
|
||||||
|
# (required)
|
||||||
|
# - zone
|
||||||
|
# The GCE zone name to use when creating/deleting resources (vms, disks etc)
|
||||||
|
# Can be overwritten at the pool level
|
||||||
|
# (required)
|
||||||
|
# - machine_type
|
||||||
|
# Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/machine-type
|
||||||
|
# (required)
|
||||||
|
# - network_name
|
||||||
|
# The GCE network_name to use
|
||||||
|
# (required)
|
||||||
|
# - dns_zone_resource_name
|
||||||
|
# The name given to the DNS zone ressource. This is not the domain, but the name identifier of a zone eg example-com
|
||||||
|
# (optional) when not set, the dns setup / teardown is skipped
|
||||||
|
# - domain
|
||||||
|
# Overwrites the global domain parameter. This should match the dns zone domain set for the dns_zone_resource_name.
|
||||||
|
# It is used to infer the domain part of the FQDN ie $vm_name.$domain
|
||||||
|
# When setting multiple providers at the same time, this value should be set for each GCE pools.
|
||||||
|
# (optional) If not explicitely set, the FQDN is inferred using the global 'domain' config parameter
|
||||||
|
# Example:
|
||||||
|
|
||||||
|
:gce:
|
||||||
|
project: 'myproject'
|
||||||
|
zone: 'us-central1-f'
|
||||||
|
machine_type: ''
|
||||||
|
network_name: ''
|
||||||
|
dns_zone_resource_name: 'subdomain-example-com'
|
||||||
|
domain: 'subdomain.example.com'
|
||||||
|
|
||||||
|
# :pools:
|
||||||
|
#
|
||||||
|
# This section contains a list of virtual machine 'pools' for vmpooler to
|
||||||
|
# create and maintain.
|
||||||
|
#
|
||||||
|
# Available configuration parameters (per-pool):
|
||||||
|
#
|
||||||
|
# - name
|
||||||
|
# The name of the pool.
|
||||||
|
# (required)
|
||||||
|
#
|
||||||
|
# - alias
|
||||||
|
# Other names this pool can be requested as.
|
||||||
|
# (optional)
|
||||||
|
#
|
||||||
|
# - template
|
||||||
|
# The template or virtual machine target to spawn clones from. eg projects/debian-cloud/global/images/family/debian-9
|
||||||
|
# (required)
|
||||||
|
#
|
||||||
|
# - size
|
||||||
|
# The number of waiting VMs to keep in a pool.
|
||||||
|
# (required)
|
||||||
|
#
|
||||||
|
# - provider
|
||||||
|
# The name of the VM provider which manage this pool. This should match
|
||||||
|
# a name in the :providers: section above e.g. vsphere
|
||||||
|
# (required; will default to vsphere for backwards compatibility)
|
||||||
|
# If you have more than one provider, this is where you would choose which
|
||||||
|
# one to use for this pool
|
||||||
|
#
|
||||||
|
# - clone_target
|
||||||
|
# Per-pool option to override the global 'clone_target' cluster.
|
||||||
|
# (optional)
|
||||||
|
#
|
||||||
|
# - timeout
|
||||||
|
# How long (in minutes) before marking a clone in 'pending' queues as 'failed' and retrying.
|
||||||
|
# This setting overrides any globally-configured timeout setting.
|
||||||
|
# (optional; default: '15')
|
||||||
|
#
|
||||||
|
# - ready_ttl
|
||||||
|
# How long (in minutes) to keep VMs in 'ready' queues before destroying.
|
||||||
|
# (optional; default: no limit)
|
||||||
|
#
|
||||||
|
# - check_loop_delay_min (optional; default: 5) seconds
|
||||||
|
# - check_loop_delay_max (optional; default: same as check_loop_delay_min) seconds
|
||||||
|
# - check_loop_delay_decay (optional; default: 2.0) Must be greater than 1.0
|
||||||
|
# See the :config: section for information about these settings
|
||||||
|
#
|
||||||
|
# Provider specific pool settings
|
||||||
|
#
|
||||||
|
# Gce provider
|
||||||
|
# - zone
|
||||||
|
# The zone to create the VMs in
|
||||||
|
# (optional: default is global provider zone value)
|
||||||
|
# - machine_type
|
||||||
|
# Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/machine-type
|
||||||
|
# Example:
|
||||||
|
|
||||||
|
:pools:
|
||||||
|
- name: 'debian-8-x86_64'
|
||||||
|
alias: [ 'debian-8-64', 'debian-8-amd64' ]
|
||||||
|
template: 'global/images/my-custom-image'
|
||||||
|
size: 5
|
||||||
|
timeout: 15
|
||||||
|
ready_ttl: 1440
|
||||||
|
provider: gce
|
||||||
|
zone: 'us-new-zone'
|
||||||
|
machine_type: 'zones/us-central1-f/machineTypes/n1-standard-1'
|
||||||
Loading…
Add table
Add a link
Reference in a new issue