Compare commits

..

No commits in common. "main" and "2.1.0" have entirely different histories.
main ... 2.1.0

77 changed files with 3255 additions and 7435 deletions

13
.dockerignore Normal file
View file

@ -0,0 +1,13 @@
**/*.yml
**/*.yaml
**/*.md
**/*example
**/Dockerfile*
Gemfile.lock
Rakefile
Vagrantfile
coverage
docs
examples
scripts
vendor

View file

@ -3,11 +3,6 @@ updates:
- package-ecosystem: bundler
directory: "/"
schedule:
interval: weekly
open-pull-requests-limit: 10
- package-ecosystem: github-actions
directory: "/"
schedule:
interval: weekly
interval: daily
time: "13:00"
open-pull-requests-limit: 10

View file

@ -1,12 +0,0 @@
name: Automated release prep
on:
workflow_dispatch:
jobs:
auto_release_prep:
uses: puppetlabs/release-engineering-repo-standards/.github/workflows/auto_release_prep.yml@v1
secrets: inherit
with:
project-type: ruby
version-file-path: lib/vmpooler/version.rb

View file

@ -1,8 +0,0 @@
name: Dependabot auto-merge
on: pull_request
jobs:
dependabot_merge:
uses: puppetlabs/release-engineering-repo-standards/.github/workflows/dependabot_merge.yml@v1
secrets: inherit

View file

@ -1,8 +0,0 @@
name: Ensure label
on: pull_request
jobs:
ensure_label:
uses: puppetlabs/release-engineering-repo-standards/.github/workflows/ensure_label.yml@v1
secrets: inherit

29
.github/workflows/lightstep.yml vendored Normal file
View file

@ -0,0 +1,29 @@
name: Verify Pre-Deploy Status
on: pull_request
jobs:
build:
runs-on: ubuntu-latest
steps:
# Checkout repo
- name: Checkout
uses: actions/checkout@v2
# Run checks
- name: Lightstep Pre-Deploy Check
uses: lightstep/lightstep-action-predeploy@v0.2.6
id: lightstep-predeploy
with:
lightstep_api_key: ${{ secrets.LIGHTSTEP_API_KEY }}
pagerduty_api_token: ${{ secrets.PAGERDUTY_API_TOKEN }}
# Output status as a comment
- name: Add a Comment
uses: unsplash/comment-on-pr@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
msg: ${{ steps.lightstep-predeploy.outputs.lightstep_predeploy_md }}
check_for_duplicate_msg: true

View file

@ -1,4 +1,4 @@
name: Release Gem
name: Release
on: workflow_dispatch
@ -7,46 +7,25 @@ jobs:
runs-on: ubuntu-latest
if: github.repository == 'puppetlabs/vmpooler'
steps:
- uses: actions/checkout@v4
- name: Get Current Version
uses: actions/github-script@v7
id: cv
with:
script: |
const { data: response } = await github.rest.repos.getLatestRelease({
owner: context.repo.owner,
repo: context.repo.repo,
})
console.log(`The latest release is ${response.tag_name}`)
return response.tag_name
result-encoding: string
- name: Get Next Version
id: nv
- uses: actions/checkout@v2
- name: Get Version
id: gv
run: |
version=$(grep VERSION lib/vmpooler/version.rb |rev |cut -d "'" -f2 |rev)
echo "version=$version" >> $GITHUB_OUTPUT
echo "Found version $version from lib/vmpooler/version.rb"
echo "::set-output name=ver::$(grep VERSION lib/vmpooler/version.rb |rev |cut -d "'" -f2 |rev)"
- name: Tag Release
uses: ncipollo/release-action@v1
with:
tag: ${{ steps.nv.outputs.version }}
tag: ${{ steps.gv.outputs.ver }}
token: ${{ secrets.GITHUB_TOKEN }}
bodyfile: release-notes.md
draft: false
prerelease: false
# This step should closely match what is used in `docker/Dockerfile` in vmpooler-deployment
- name: Install Ruby jruby-9.4.12.1
generateReleaseNotes: true
- name: Install Ruby 2.5.8
uses: ruby/setup-ruby@v1
with:
ruby-version: 'jruby-9.4.12.1'
ruby-version: '2.5.8'
- name: Build gem
run: gem build *.gemspec
- name: Publish gem
run: |
mkdir -p $HOME/.gem

View file

@ -1,39 +0,0 @@
name: Security
on:
workflow_dispatch:
push:
branches:
- main
jobs:
scan:
name: Mend Scanning
runs-on: ubuntu-latest
steps:
- name: checkout repo content
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: setup ruby
uses: ruby/setup-ruby@v1
with:
ruby-version: 2.7
# setup a package lock if one doesn't exist, otherwise do nothing
- name: check lock
run: '[ -f "Gemfile.lock" ] && echo "package lock file exists, skipping" || bundle lock'
# install java
- uses: actions/setup-java@v4
with:
distribution: 'temurin' # See 'Supported distributions' for available options
java-version: '17'
# download mend
- name: download_mend
run: curl -o wss-unified-agent.jar https://unified-agent.s3.amazonaws.com/wss-unified-agent.jar
- name: run mend
run: java -jar wss-unified-agent.jar
env:
WS_APIKEY: ${{ secrets.MEND_API_KEY }}
WS_WSS_URL: https://saas-eu.whitesourcesoftware.com/agent
WS_USERKEY: ${{ secrets.MEND_TOKEN }}
WS_PRODUCTNAME: RE
WS_PROJECTNAME: ${{ github.event.repository.name }}

View file

@ -18,9 +18,9 @@ jobs:
strategy:
matrix:
ruby-version:
- 'jruby-9.4.12.1'
- '2.5.8'
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v2
- name: Set up Ruby
uses: ruby/setup-ruby@v1
with:
@ -34,9 +34,10 @@ jobs:
strategy:
matrix:
ruby-version:
- 'jruby-9.4.12.1'
- '2.5.8'
- 'jruby-9.2.12.0'
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v2
- name: Set up Ruby
uses: ruby/setup-ruby@v1
with:

View file

@ -1,5 +0,0 @@
project=vmpooler
user=puppetlabs
exclude_labels=maintenance
github-api=https://api.github.com
release-branch=main

2
.gitignore vendored
View file

@ -5,5 +5,7 @@ vendor/
.dccache
.ruby-version
Gemfile.local
Gemfile.lock
results.xml
/vmpooler.yaml

10
.lightstep.yml Normal file
View file

@ -0,0 +1,10 @@
organization: Puppet Inc
project: puppet-inc-prod
conditions:
# API /status error >0%
- SZcJVQfy
integrations:
pagerduty:
# VMPooler service
service: P714ID4

View file

@ -66,7 +66,6 @@ Style/Next:
Enabled: false
Metrics/ParameterLists:
Max: 10
MaxOptionalParameters: 10
Style/GuardClause:
Enabled: false

View file

@ -12,7 +12,7 @@
# SupportedStyles: with_first_parameter, with_fixed_indentation
Layout/ParameterAlignment:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 9
# Cop supports --auto-correct.
@ -21,13 +21,13 @@ Layout/ParameterAlignment:
Layout/CaseIndentation:
Exclude:
- 'lib/vmpooler/api/helpers.rb'
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 1
# Cop supports --auto-correct.
Layout/ClosingParenthesisIndentation:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 1
# Cop supports --auto-correct.
@ -58,14 +58,14 @@ Layout/EmptyLinesAroundModuleBody:
Layout/FirstHashElementIndentation:
Exclude:
- 'lib/vmpooler/api/helpers.rb'
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 1
# Cop supports --auto-correct.
# Configuration parameters: Width, IgnoredPatterns.
Layout/IndentationWidth:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 1
# Cop supports --auto-correct.
@ -73,7 +73,7 @@ Layout/IndentationWidth:
# SupportedStyles: symmetrical, new_line, same_line
Layout/MultilineMethodCallBraceLayout:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 1
# Cop supports --auto-correct.
@ -87,14 +87,14 @@ Layout/SpaceAroundEqualsInParameterDefault:
# Cop supports --auto-correct.
Layout/SpaceAroundKeyword:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 1
# Cop supports --auto-correct.
# Configuration parameters: AllowForAlignment.
Layout/SpaceAroundOperators:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 8
# Cop supports --auto-correct.
@ -109,14 +109,14 @@ Layout/SpaceInsideHashLiteralBraces:
# Cop supports --auto-correct.
Layout/SpaceInsideParens:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 2
# Configuration parameters: AllowSafeAssignment.
Lint/AssignmentInCondition:
Exclude:
- 'lib/vmpooler/api/helpers.rb'
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 2
Lint/SuppressedException:
@ -148,7 +148,7 @@ Lint/UselessAssignment:
Style/AndOr:
Exclude:
- 'lib/vmpooler/api/helpers.rb'
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 1
Style/CaseEquality:
@ -169,7 +169,7 @@ Style/For:
Style/HashSyntax:
Exclude:
- 'lib/vmpooler/api/helpers.rb'
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 4
# Cop supports --auto-correct.
@ -177,7 +177,7 @@ Style/HashSyntax:
Style/IfUnlessModifier:
Exclude:
- 'lib/vmpooler/api/helpers.rb'
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 3
# Cop supports --auto-correct.
@ -185,13 +185,13 @@ Style/IfUnlessModifier:
# SupportedStyles: both, prefix, postfix
Style/NegatedIf:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 3
# Cop supports --auto-correct.
Style/Not:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 1
# Cop supports --auto-correct.
@ -200,26 +200,26 @@ Style/Not:
Style/NumericPredicate:
Exclude:
- 'spec/**/*'
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 2
# Cop supports --auto-correct.
Style/ParallelAssignment:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 1
# Cop supports --auto-correct.
# Configuration parameters: AllowSafeAssignment.
Style/ParenthesesAroundCondition:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 2
# Cop supports --auto-correct.
Style/PerlBackrefs:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 1
# Configuration parameters: NamePrefix, NamePrefixBlacklist, NameWhitelist.
@ -235,7 +235,7 @@ Naming/PredicateName:
# Cop supports --auto-correct.
Style/RedundantParentheses:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 2
# Cop supports --auto-correct.
@ -256,7 +256,7 @@ Style/RedundantSelf:
# SupportedStyles: single_quotes, double_quotes
Style/StringLiterals:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 1
# Cop supports --auto-correct.
@ -271,7 +271,7 @@ Style/TernaryParentheses:
# SupportedStyles: snake_case, camelCase
Naming/VariableName:
Exclude:
- 'lib/vmpooler/api/v3.rb'
- 'lib/vmpooler/api/v1.rb'
# Offense count: 1
# Cop supports --auto-correct.

File diff suppressed because it is too large Load diff

View file

@ -1,10 +1,10 @@
# This will cause RE to be assigned review of any opened PRs against
# This will cause DIO to be assigned review of any opened PRs against
# the branches containing this file.
# See https://help.github.com/en/articles/about-code-owners for info on how to
# take ownership of parts of the code base that should be reviewed by another
# team.
# RE will be the default owners for everything in the repo.
* @puppetlabs/release-engineering
# DIO will be the default owners for everything in the repo.
* @puppetlabs/dio

View file

@ -64,7 +64,7 @@ the new documentation or comments added.
* Submit a pull request to the repository in the puppetlabs organization.
* Update your Jira ticket to mark that you have submitted code and are ready for it to be reviewed (Status: Ready for Merge).
* Include a link to the pull request in the ticket.
* The Puppet Release Engineering team looks at Pull Requests on a regular basis.
* The Puppet DIO team looks at Pull Requests on a regular basis.
* After feedback has been given we expect responses within two weeks. After two weeks we may close the pull request if it isn't showing any activity.
## Additional Resources

View file

@ -3,11 +3,11 @@ source ENV['GEM_SOURCE'] || 'https://rubygems.org'
gemspec
# Evaluate Gemfile.local if it exists
if File.exist? "#{__FILE__}.local"
if File.exists? "#{__FILE__}.local"
instance_eval(File.read("#{__FILE__}.local"))
end
# Evaluate ~/.gemfile if it exists
if File.exist?(File.join(Dir.home, '.gemfile'))
if File.exists?(File.join(Dir.home, '.gemfile'))
instance_eval(File.read(File.join(Dir.home, '.gemfile')))
end

View file

@ -1,219 +0,0 @@
PATH
remote: .
specs:
vmpooler (3.8.1)
concurrent-ruby (~> 1.1)
connection_pool (~> 2.4)
deep_merge (~> 1.2)
net-ldap (~> 0.16)
opentelemetry-exporter-jaeger (= 0.23.0)
opentelemetry-instrumentation-concurrent_ruby (= 0.21.1)
opentelemetry-instrumentation-http_client (= 0.22.2)
opentelemetry-instrumentation-rack (= 0.23.4)
opentelemetry-instrumentation-redis (= 0.25.3)
opentelemetry-instrumentation-sinatra (= 0.23.2)
opentelemetry-resource_detectors (= 0.24.2)
opentelemetry-sdk (~> 1.8)
pickup (~> 0.0.11)
prometheus-client (>= 2, < 5)
puma (>= 5.0.4, < 7)
rack (>= 2.2, < 4.0)
rake (~> 13.0)
redis (~> 5.0)
sinatra (>= 2, < 4)
spicy-proton (~> 2.1)
statsd-ruby (~> 1.4)
GEM
remote: https://rubygems.org/
specs:
ast (2.4.3)
base64 (0.1.2)
bindata (2.5.1)
builder (3.3.0)
climate_control (1.2.0)
coderay (1.1.3)
concurrent-ruby (1.3.5)
connection_pool (2.5.3)
deep_merge (1.2.2)
diff-lcs (1.6.2)
docile (1.4.1)
faraday (2.13.1)
faraday-net_http (>= 2.0, < 3.5)
json
logger
faraday-net_http (3.4.0)
net-http (>= 0.5.0)
ffi (1.17.2-java)
google-cloud-env (2.2.1)
faraday (>= 1.0, < 3.a)
json (2.12.2)
json (2.12.2-java)
language_server-protocol (3.17.0.5)
logger (1.7.0)
method_source (1.1.0)
mock_redis (0.37.0)
mustermann (3.0.3)
ruby2_keywords (~> 0.0.1)
net-http (0.6.0)
uri
net-ldap (0.19.0)
nio4r (2.7.4)
nio4r (2.7.4-java)
opentelemetry-api (1.5.0)
opentelemetry-common (0.20.1)
opentelemetry-api (~> 1.0)
opentelemetry-exporter-jaeger (0.23.0)
opentelemetry-api (~> 1.1)
opentelemetry-common (~> 0.20)
opentelemetry-sdk (~> 1.2)
opentelemetry-semantic_conventions
thrift
opentelemetry-instrumentation-base (0.22.3)
opentelemetry-api (~> 1.0)
opentelemetry-registry (~> 0.1)
opentelemetry-instrumentation-concurrent_ruby (0.21.1)
opentelemetry-api (~> 1.0)
opentelemetry-instrumentation-base (~> 0.22.1)
opentelemetry-instrumentation-http_client (0.22.2)
opentelemetry-api (~> 1.0)
opentelemetry-common (~> 0.20.0)
opentelemetry-instrumentation-base (~> 0.22.1)
opentelemetry-instrumentation-rack (0.23.4)
opentelemetry-api (~> 1.0)
opentelemetry-common (~> 0.20.0)
opentelemetry-instrumentation-base (~> 0.22.1)
opentelemetry-instrumentation-redis (0.25.3)
opentelemetry-api (~> 1.0)
opentelemetry-common (~> 0.20.0)
opentelemetry-instrumentation-base (~> 0.22.1)
opentelemetry-instrumentation-sinatra (0.23.2)
opentelemetry-api (~> 1.0)
opentelemetry-common (~> 0.20.0)
opentelemetry-instrumentation-base (~> 0.22.1)
opentelemetry-instrumentation-rack (~> 0.21)
opentelemetry-registry (0.4.0)
opentelemetry-api (~> 1.1)
opentelemetry-resource_detectors (0.24.2)
google-cloud-env
opentelemetry-sdk (~> 1.0)
opentelemetry-sdk (1.8.0)
opentelemetry-api (~> 1.1)
opentelemetry-common (~> 0.20)
opentelemetry-registry (~> 0.2)
opentelemetry-semantic_conventions
opentelemetry-semantic_conventions (1.11.0)
opentelemetry-api (~> 1.0)
parallel (1.27.0)
parser (3.3.8.0)
ast (~> 2.4.1)
racc
pickup (0.0.11)
prism (1.4.0)
prometheus-client (4.2.4)
base64
pry (0.15.2)
coderay (~> 1.1)
method_source (~> 1.0)
pry (0.15.2-java)
coderay (~> 1.1)
method_source (~> 1.0)
spoon (~> 0.0)
puma (6.6.0)
nio4r (~> 2.0)
puma (6.6.0-java)
nio4r (~> 2.0)
racc (1.8.1)
racc (1.8.1-java)
rack (2.2.17)
rack-protection (3.2.0)
base64 (>= 0.1.0)
rack (~> 2.2, >= 2.2.4)
rack-test (2.2.0)
rack (>= 1.3)
rainbow (3.1.1)
rake (13.3.0)
redis (5.4.0)
redis-client (>= 0.22.0)
redis-client (0.24.0)
connection_pool
regexp_parser (2.10.0)
rexml (3.4.1)
rspec (3.13.1)
rspec-core (~> 3.13.0)
rspec-expectations (~> 3.13.0)
rspec-mocks (~> 3.13.0)
rspec-core (3.13.4)
rspec-support (~> 3.13.0)
rspec-expectations (3.13.5)
diff-lcs (>= 1.2.0, < 2.0)
rspec-support (~> 3.13.0)
rspec-mocks (3.13.5)
diff-lcs (>= 1.2.0, < 2.0)
rspec-support (~> 3.13.0)
rspec-support (3.13.4)
rubocop (1.56.4)
base64 (~> 0.1.1)
json (~> 2.3)
language_server-protocol (>= 3.17.0)
parallel (~> 1.10)
parser (>= 3.2.2.3)
rainbow (>= 2.2.2, < 4.0)
regexp_parser (>= 1.8, < 3.0)
rexml (>= 3.2.5, < 4.0)
rubocop-ast (>= 1.28.1, < 2.0)
ruby-progressbar (~> 1.7)
unicode-display_width (>= 2.4.0, < 3.0)
rubocop-ast (1.44.1)
parser (>= 3.3.7.2)
prism (~> 1.4)
ruby-progressbar (1.13.0)
ruby2_keywords (0.0.5)
simplecov (0.22.0)
docile (~> 1.1)
simplecov-html (~> 0.11)
simplecov_json_formatter (~> 0.1)
simplecov-html (0.13.1)
simplecov_json_formatter (0.1.4)
sinatra (3.2.0)
mustermann (~> 3.0)
rack (~> 2.2, >= 2.2.4)
rack-protection (= 3.2.0)
tilt (~> 2.0)
spicy-proton (2.1.15)
bindata (~> 2.3)
spoon (0.0.6)
ffi
statsd-ruby (1.5.0)
thor (1.3.2)
thrift (0.22.0)
tilt (2.6.0)
unicode-display_width (2.6.0)
uri (1.0.3)
yarjuf (2.0.0)
builder
rspec (~> 3)
PLATFORMS
arm64-darwin-22
arm64-darwin-23
arm64-darwin-25
universal-java-11
universal-java-17
x86_64-darwin-22
x86_64-linux
DEPENDENCIES
climate_control (>= 0.2.0)
mock_redis (= 0.37.0)
pry
rack-test (>= 0.6)
rspec (>= 3.2)
rubocop (~> 1.56.0)
simplecov (>= 0.11.2)
thor (~> 1.0, >= 1.0.1)
vmpooler!
yarjuf (>= 2.0)
BUNDLED WITH
2.4.18

160
README.md
View file

@ -2,91 +2,34 @@
# VMPooler
- [VMPooler](#vmpooler)
- [Usage](#usage)
- [Migrating to v3](#migrating-to-v3)
- [v2.0.0 note](#v200-note)
- [Installation](#installation)
- [Dependencies](#dependencies)
- [Redis](#redis)
- [Other gems](#other-gems)
- [Configuration](#configuration)
- [Components](#components)
- [API](#api)
- [Dashboard](#dashboard)
- [Related tools and resources](#related-tools-and-resources)
- [Command-line Utility](#command-line-utility)
- [Vagrant plugin](#vagrant-plugin)
- [Development](#development)
- [docker-compose](#docker-compose)
- [Running docker-compose inside Vagrant](#running-docker-compose-inside-vagrant)
- [URLs when using docker-compose](#urls-when-using-docker-compose)
- [Update the Gemfile Lock](#update-the-gemfile-lock)
- [Releasing](#releasing)
- [License](#license)
VMPooler provides configurable 'pools' of instantly-available (pre-provisioned) and/or on-demand (provisioned on request) virtual machines.
## Usage
At [Puppet, Inc.](http://puppet.com) we run acceptance tests on thousands of disposable VMs every day. VMPooler manages the life cycle of these VMs from request through deletion, with options available to pool ready instances, and provision on demand.
The recommended method for deploying VMPooler is via [https://github.com/puppetlabs/vmpooler-deployment](vmpooler-deployment).
### Migrating to v3
Starting with the v3.x release, management of DNS records is implemented as DNS plugins, similar to compute providers. This means each pool configuration should be pointing to a configuration object in `:dns_config` to determine it's method of record management.
For those using the global `DOMAIN` environment variable or global `:config.domain` key, this means records were not previously being managed by VMPooler (presumably managed via dynamic dns), so it's value should be moved to `:dns_configs:<INSERT_YOUR_OWN_SYMBOL>:domain` with the value for `dns_class` for the config set to `dynamic-dns`.
For example, the following < v3.x configuration:
```yaml
:config:
domain: 'example.com'
```
becomes:
```yaml
:dns_configs:
:example:
dns_class: dynamic-dns
domain: 'example.com'
```
Then any pools that should have records created via the dns config above should now reference the named dns config in the `dns_plugin` key:
```yaml
:pools:
- name: 'debian-8-x86_64'
dns_plugin: 'example'
```
For those using the GCE provider, [vmpooler-provider-gce](https://github.com/puppetlabs/vmpooler-provider-gce), as of version 1.x the DNS management has been decoupled. See <https://github.com/puppetlabs/vmpooler-provider-gce#migrating-to-v1>
### v2.0.0 note
As of version 2.0.0, all providers other than the dummy one are now separate gems. Historically the vSphere provider was included within VMPooler itself. That code has been moved to the [puppetlabs/vmpooler-provider-vsphere](https://github.com/puppetlabs/vmpooler-provider-vsphere) repository and the `vmpooler-provider-vsphere` gem. To migrate from VMPooler 1.x to 2.0 you will need to ensure that `vmpooler-provider-vsphere` is installed along side the `vmpooler` gem. See the [Provider API](docs/PROVIDER_API.md) docs for more information.
## Installation
The recommended method of installation is via the Helm chart located in [puppetlabs/vmpooler-deployment](https://github.com/puppetlabs/vmpooler-deployment). That repository also provides Docker images of VMPooler.
### Prerequisites
VMPooler is available as a gem. To use the gem run `gem install vmpooler` or add it to your Gemfile and install via bundler. You will also need to install any needed providers in the same manner.
### Dependencies
#### Redis
VMPooler requires a [Redis](http://redis.io/) server. This is the data store used for VMPooler's inventory and queuing services.
#### Other gems
VMPooler itself and the dev environment talked about below require additional Ruby gems to function. You can update the currently required ones for VMPooler by running `./update-gemfile-lock.sh`. The gems for the dev environment can be updated by running `./docker/update-gemfile-lock.sh`. These scripts will utilize the container on the FROM line of the Dockerfile to update the Gemfile.lock in the root of this repo and in the docker folder, respectively.
## Configuration
### Configuration
Configuration for VMPooler may be provided via environment variables, or a configuration file.
#### Note on JRuby 9.2.11.x
We have found when running VMPooler on JRuby 9.2.11.x we occasionally encounter a stack overflow error that causes the pool\_manager application component to fail and stop doing work. To address this issue on JRuby 9.2.11.x we recommend setting the JRuby option `invokedynamic.yield=false`. To set this with JRuby 9.2.11.1 you can specify the environment variable `JRUBY_OPTS` with the value `-Xinvokedynamic.yield=false`.
The provided configuration defaults are reasonable for small VMPooler instances with a few pools. If you plan to run a large VMPooler instance it is important to consider configuration values appropriate for the instance of your size in order to avoid starving the provider, or Redis, of connections.
VMPooler uses a connection pool for Redis to improve efficiency and ensure thread safe usage. At Puppet, we run an instance with about 100 pools at any given time. We have to provide it with 200 Redis connections to the Redis connection pool, and a timeout for connections of 40 seconds, to avoid timeouts. Because metrics are generated for connection available and waited, your metrics provider will need to be able to cope with this volume. Prometheus or StatsD is recommended to ensure metrics get delivered reliably.
@ -128,7 +71,39 @@ The following YAML configuration sets up two pools, `debian-7-i386` and `debian-
See the provided YAML configuration example, [vmpooler.yaml.example](vmpooler.yaml.example), for additional configuration options and parameters or for supporting multiple providers.
## Components
### Running via Docker
A [Dockerfile](/docker/Dockerfile) is included in this repository to allow running VMPooler inside a Docker container. A configuration file can be used via volume mapping, and specifying the destination as the configuration file via environment variables, or the application can be configured with environment variables alone. The Dockerfile provides an entrypoint so you may choose whether to run API, or manager services. The default behavior will run both. To build and run:
```bash
docker build -t vmpooler . && docker run -e VMPOOLER_CONFIG -p 80:4567 -it vmpooler
```
To run only the API and dashboard:
```bash
docker run -p 80:4567 -it vmpooler api
```
To run only the manager component:
```bash
docker run -it vmpooler manager
```
### docker-compose
A docker-compose file is provided to support running VMPooler easily via docker-compose. This is useful for development because your local code is used to build the gem used in the docker-compose environment.
```bash
docker-compose -f docker/docker-compose.yml up
```
### Running Docker inside Vagrant
A Vagrantfile is included in this repository. Please see [vagrant instructions](docs/vagrant.md) for details.
## API and Dashboard
VMPooler provides an API and web front-end (dashboard) on port `:4567`. See the provided YAML configuration example, [vmpooler.yaml.example](vmpooler.yaml.example), to specify an alternative port to listen on.
@ -154,60 +129,13 @@ A dashboard is provided to offer real-time statistics and historical graphs. It
- [vagrant-vmpooler](https://github.com/briancain/vagrant-vmpooler): Use Vagrant to create and manage your VMPooler instances.
## Development
### docker-compose
A docker-compose file is provided to support running VMPooler and associated tools locally. This is useful for development because your local code is used to build the gem used in the docker-compose environment. The compose environment also pulls in the latest providers via git. Details of this setup are stored in the `docker/` folder.
```bash
docker-compose -f docker/docker-compose.yml build && \
docker-compose -f docker/docker-compose.yml up
```
### Running docker-compose inside Vagrant
A Vagrantfile is included in this repository so as to provide a reproducible development environment.
```bash
vagrant up
vagrant ssh
cd /vagrant
docker-compose -f docker/docker-compose.yml build && \
docker-compose -f docker/docker-compose.yml up
```
The Vagrant environment also contains multiple rubies you can utilize for spec test and the like. You can see a list of the pre-installed ones when you log in as part of the message of the day.
## Development and further documentation
For more information about setting up a development instance of VMPooler or other subjects, see the [docs/](docs) directory.
### URLs when using docker-compose
### Build status
| Endpoint | URL |
|-------------------|-----------------------------------------------------------------------|
| Redis Commander | [http://localhost:8079](http://localhost:8079) |
| API | [http://localhost:8080/api/v1]([http://localhost:8080/api/v1) |
| Dashboard | [http://localhost:8080/dashboard/](http://localhost:8080/dashboard/) |
| Metrics (API) | [http://localhost:8080/prometheus]([http://localhost:8080/prometheus) |
| Metrics (Manager) | [http://localhost:8081/prometheus]([http://localhost:8081/prometheus) |
| Jaeger | [http://localhost:8082](http://localhost:8082) |
Additionally, the Redis instance can be accessed at `localhost:6379`.
## Update the Gemfile Lock
To update the `Gemfile.lock` run `./update-gemfile-lock`.
Verify, and update if needed, that the docker tag in the script and GitHub action workflows matches what is used in the [vmpooler-deployment Dockerfile](https://github.com/puppetlabs/vmpooler-deployment/blob/main/docker/Dockerfile).
## Releasing
Follow these steps to publish a new GitHub release, and build and push the gem to <https://rubygems.org>.
1. Bump the "VERSION" in `lib/vmpooler/version.rb` appropriately based on changes in `CHANGELOG.md` since the last release.
2. Run `./release-prep` to update `Gemfile.lock` and `CHANGELOG.md`.
3. Commit and push changes to a new branch, then open a pull request against `main` and be sure to add the "maintenance" label.
4. After the pull request is approved and merged, then navigate to Actions --> Release Gem --> run workflow --> Branch: main --> Run workflow.
[![Testing](https://github.com/puppetlabs/vmpooler/actions/workflows/testing.yml/badge.svg)](https://github.com/puppetlabs/vmpooler/actions/workflows/testing.yml)
## License

2
Vagrantfile vendored
View file

@ -2,8 +2,6 @@
Vagrant.configure("2") do |config|
config.vm.box = "genebean/centos-7-rvm-multi"
config.vm.network "forwarded_port", guest: 4567, host: 4567 # for when not running docker-compose
config.vm.network "forwarded_port", guest: 6379, host: 6379 # Redis
config.vm.network "forwarded_port", guest: 8079, host: 8079 # Redis Commander
config.vm.network "forwarded_port", guest: 8080, host: 8080 # VMPooler api in docker-compose
config.vm.network "forwarded_port", guest: 8081, host: 8081 # VMPooler manager in docker-compose
config.vm.network "forwarded_port", guest: 8082, host: 8082 # Jaeger in docker-compose

29
docker/Dockerfile Normal file
View file

@ -0,0 +1,29 @@
# Run vmpooler in a Docker container! Configuration can either be embedded
# and built within the current working directory, or stored in a
# VMPOOLER_CONFIG environment value and passed to the Docker daemon.
#
# BUILD:
# docker build -t vmpooler .
#
# RUN:
# docker run -e VMPOOLER_CONFIG -p 80:4567 -it vmpooler
FROM jruby:9.2-jdk
ARG vmpooler_version=0.11.3
COPY docker/docker-entrypoint.sh /usr/local/bin/
ENV LOGFILE=/dev/stdout \
RACK_ENV=production
RUN apt-get update -qq && \
apt-get install -y --no-install-recommends make && \
apt-get clean autoclean && \
apt-get autoremove -y && \
rm -rf /var/lib/apt/lists/*
RUN gem install vmpooler -v ${vmpooler_version} && \
chmod +x /usr/local/bin/docker-entrypoint.sh
ENTRYPOINT ["docker-entrypoint.sh"]

39
docker/Dockerfile-aio Normal file
View file

@ -0,0 +1,39 @@
# Run vmpooler in a Docker container! Configuration can either be embedded
# and built within the current working directory, or stored in a
# VMPOOLER_CONFIG environment value and passed to the Docker daemon.
#
# BUILD:
# docker build -t vmpooler .
#
# RUN:
# docker run -e VMPOOLER_CONFIG -p 80:4567 -it vmpooler
FROM jruby:9.2.9-jdk
RUN mkdir -p /var/lib/vmpooler
WORKDIR /var/lib/vmpooler
RUN echo "deb http://httpredir.debian.org/debian jessie main" >/etc/apt/sources.list.d/jessie-main.list
RUN apt-get update -qq && \
apt-get install -y --no-install-recommends make redis-server && \
apt-get clean autoclean && \
apt-get autoremove -y && \
rm -rf /var/lib/apt/lists/*
ADD Gemfile* /var/lib/vmpooler/
RUN bundle install --system
RUN ln -s /opt/jruby/bin/jruby /usr/bin/jruby
COPY . /var/lib/vmpooler
ENV VMPOOLER_LOG /var/log/vmpooler.log
CMD \
/etc/init.d/redis-server start \
&& /var/lib/vmpooler/scripts/vmpooler_init.sh start \
&& while [ ! -f ${VMPOOLER_LOG} ]; do sleep 1; done ; \
tail -f ${VMPOOLER_LOG}

36
docker/Dockerfile_local Normal file
View file

@ -0,0 +1,36 @@
# Run vmpooler in a Docker container! Configuration can either be embedded
# and built within the current working directory, or stored in a
# VMPOOLER_CONFIG environment value and passed to the Docker daemon.
#
# BUILD:
# docker build -t vmpooler .
#
# RUN:
# docker run -e VMPOOLER_CONFIG -p 80:4567 -it vmpooler
FROM jruby:9.2-jdk
ENV RACK_ENV=production
RUN apt-get update -qq && \
apt-get install -y --no-install-recommends make && \
apt-get clean autoclean && \
apt-get autoremove -y && \
rm -rf /var/lib/apt/lists/*
COPY docker/docker-entrypoint.sh /usr/local/bin/
COPY ./Gemfile ./
COPY ./vmpooler.gemspec ./
COPY ./lib/vmpooler/version.rb ./lib/vmpooler/version.rb
RUN gem install bundler && \
bundle config set --local jobs 3 && \
bundle install
COPY ./ ./
RUN gem build vmpooler.gemspec && \
gem install vmpooler*.gem && \
chmod +x /usr/local/bin/docker-entrypoint.sh
ENTRYPOINT ["docker-entrypoint.sh"]

73
docker/docker-compose.yml Normal file
View file

@ -0,0 +1,73 @@
# For local development run with a dummy provider
version: '3.8'
services:
vmpooler-api:
build:
context: ../
dockerfile: docker/Dockerfile_local
volumes:
- type: bind
source: ${PWD}/vmpooler.yaml
target: /etc/vmpooler/vmpooler.yaml
ports:
- "8080:4567"
networks:
- redis-net
environment:
- VMPOOLER_DEBUG=true # for use of dummy auth
- VMPOOLER_CONFIG_FILE=/etc/vmpooler/vmpooler.yaml
- REDIS_SERVER=redislocal
- LOGFILE=/dev/null
- JRUBY_OPTS=-Xinvokedynamic.yield=false
- VMPOOLER_TRACING_ENABLED=true
- VMPOOLER_TRACING_JAEGER_HOST=http://jaeger-aio:14268/api/traces
image: vmpooler-local
command: api
depends_on:
- redislocal
vmpooler-manager:
build:
context: ../
dockerfile: docker/Dockerfile_local
volumes:
- type: bind
source: ${PWD}/vmpooler.yaml
target: /etc/vmpooler/vmpooler.yaml
ports:
- "8081:4567"
networks:
- redis-net
environment:
- VMPOOLER_DEBUG=true # for use of dummy auth
- VMPOOLER_CONFIG_FILE=/etc/vmpooler/vmpooler.yaml
- REDIS_SERVER=redislocal
- LOGFILE=/dev/null
- JRUBY_OPTS=-Xinvokedynamic.yield=false
- VMPOOLER_TRACING_ENABLED=true
- VMPOOLER_TRACING_JAEGER_HOST=http://jaeger-aio:14268/api/traces
image: vmpooler-local
command: manager
depends_on:
- redislocal
redislocal:
image: redis
# Uncomment this if you don't want the redis data to persist
#command: "redis-server --save '' --appendonly no"
ports:
- "6379:6379"
networks:
- redis-net
jaeger-aio:
image: jaegertracing/all-in-one:1.18
ports:
- "14250:14250"
- "8082:16686"
networks:
- redis-net
user: '1001'
read_only: true
cap_drop:
- ALL
networks:
redis-net:

View file

@ -0,0 +1,6 @@
#!/bin/sh
set -e
set -- vmpooler "$@"
exec "$@"

View file

@ -12,30 +12,6 @@
vmpooler provides a REST API for VM management. The following examples use `curl` for communication.
## Major change in V3 versus V2
The api/v1 and api/v2 endpoints have been removed. Additionally, the generic api endpoint that reroutes to a versioned endpoint has been removed.
The api/v3 endpoint removes the deprecated "domain" key returned in some of the operations like getting a VM, etc. If there is a "domain" configured in the top level configuration or for a specific provider,
the hostname now returns an FQDN including that domain. That is to say, we can now have multiple, different domains for each pool instead of only a single domain for all pools, or a domain restricted to a particular provider.
Clients using some of the direct API paths (without specifying api/v1 or api/v2) will now now need to specify the versioned endpoint (api/v3).
## Major change in V2 versus V1
The api/v2 endpoint removes a separate "domain" key returned in some of the operations like getting a VM, etc. If there is a "domain" configured in the top level configuration or for a specific provider,
the hostname now returns an FQDN including that domain. That is to say, we can now have multiple, different domains for each provider instead of only one.
Clients using some of the direct API paths (without specifying api/v1 or api/v2) will still be redirected to v1, but this behavior is notw deprecated and will be changed to v2 in the next major version.
### updavig clients from v1 to v2
Clients need to update their paths to using api/v2 instead of api/v1. Please note the API responses that used to return a "domain" key, will no longer have a separate "domain" key and now return
the FQDN (includes the domain in the hostname).
One way to support both v1 and v2 in the client logic is to look for a "domain" and append it to the hostname if it exists (existing v1 behavior). If the "domain" key does not exist, you can use the hostname
as is since it is a FQDN (v2 behavor).
#### Token operations <a name="token"></a>
Token-based authentication can be used when requesting or modifying VMs. The `/token` route can be used to create, query, or delete tokens. See the provided YAML configuration example, [vmpooler.yaml.example](vmpooler.yaml.example), for information on configuring an authentication store to use when performing token operations.
@ -50,7 +26,7 @@ Return codes:
* 404 when config:auth not found or other error
```
$ curl -u jdoe --url vmpooler.example.com/api/v3/token
$ curl -u jdoe --url vmpooler.example.com/api/v1/token
Enter host password for user 'jdoe':
```
```json
@ -72,7 +48,7 @@ Return codes:
* 404 when config:auth not found
```
$ curl -X POST -u jdoe --url vmpooler.example.com/api/v3/token
$ curl -X POST -u jdoe --url vmpooler.example.com/api/v1/token
Enter host password for user 'jdoe':
```
```json
@ -91,7 +67,7 @@ Return codes:
* 404 when config:auth or token not found
```
$ curl --url vmpooler.example.com/api/v3/token/utpg2i2xswor6h8ttjhu3d47z53yy47y
$ curl --url vmpooler.example.com/api/v1/token/utpg2i2xswor6h8ttjhu3d47z53yy47y
```
```json
{
@ -120,7 +96,7 @@ Return codes:
* 404 when config:auth not found
```
$ curl -X DELETE -u jdoe --url vmpooler.example.com/api/v3/token/utpg2i2xswor6h8ttjhu3d47z53yy47y
$ curl -X DELETE -u jdoe --url vmpooler.example.com/api/v1/token/utpg2i2xswor6h8ttjhu3d47z53yy47y
Enter host password for user 'jdoe':
```
```json
@ -139,7 +115,7 @@ Return codes:
* 200 OK
```
$ curl --url vmpooler.example.com/api/v3/vm
$ curl --url vmpooler.example.com/api/v1/vm
```
```json
[
@ -160,20 +136,21 @@ Return codes:
* 503 when the vm failed to allocate a vm, or the pool is empty
```
$ curl -d '{"debian-7-i386":"2","debian-7-x86_64":"1"}' --url vmpooler.example.com/api/v3/vm
$ curl -d '{"debian-7-i386":"2","debian-7-x86_64":"1"}' --url vmpooler.example.com/api/v1/vm
```
```json
{
"ok": true,
"debian-7-i386": {
"hostname": [
"o41xtodlvnvu5cw.example.com",
"khirruvwfjlmx3y.example.com"
"o41xtodlvnvu5cw",
"khirruvwfjlmx3y"
]
},
"debian-7-x86_64": {
"hostname": "y91qbrpbfj6d13q.example.com"
}
"hostname": "y91qbrpbfj6d13q"
},
"domain": "example.com"
}
```
@ -189,21 +166,22 @@ Return codes:
* 503 when the vm failed to allocate a vm, or the pool is empty
```
$ curl -d --url vmpooler.example.com/api/v3/vm/debian-7-i386
$ curl -d --url vmpooler.example.com/api/v1/vm/debian-7-i386
```
```json
{
"ok": true,
"debian-7-i386": {
"hostname": "fq6qlpjlsskycq6.example.com"
}
"hostname": "fq6qlpjlsskycq6"
},
"domain": "example.com"
}
```
Multiple VMs can be requested by using multiple query parameters in the URL:
```
$ curl -d --url vmpooler.example.com/api/v3/vm/debian-7-i386+debian-7-i386+debian-7-x86_64
$ curl -d --url vmpooler.example.com/api/v1/vm/debian-7-i386+debian-7-i386+debian-7-x86_64
```
```json
@ -211,13 +189,14 @@ $ curl -d --url vmpooler.example.com/api/v3/vm/debian-7-i386+debian-7-i386+debia
"ok": true,
"debian-7-i386": {
"hostname": [
"sc0o4xqtodlul5w.example.com",
"4m4dkhqiufnjmxy.example.com"
"sc0o4xqtodlul5w",
"4m4dkhqiufnjmxy"
]
},
"debian-7-x86_64": {
"hostname": "zb91y9qbrbf6d3q.example.com"
}
"hostname": "zb91y9qbrbf6d3q"
},
"domain": "example.com"
}
```
@ -232,7 +211,7 @@ Return codes:
* 404 when requesting an invalid VM hostname
```
$ curl --url vmpooler.example.com/api/v3/vm/pxpmtoonx7fiqg6
$ curl --url vmpooler.example.com/api/v1/vm/pxpmtoonx7fiqg6
```
```json
{
@ -248,6 +227,7 @@ $ curl --url vmpooler.example.com/api/v3/vm/pxpmtoonx7fiqg6
"user": "jdoe"
},
"ip": "192.168.0.1",
"domain": "example.com",
"host": "host1.example.com",
"migrated": "true"
}
@ -276,7 +256,7 @@ Return codes:
* 400 when supplied PUT parameters fail validation
```
$ curl -X PUT -d '{"lifetime":"2"}' --url vmpooler.example.com/api/v3/vm/fq6qlpjlsskycq6
$ curl -X PUT -d '{"lifetime":"2"}' --url vmpooler.example.com/api/v1/vm/fq6qlpjlsskycq6
```
```json
{
@ -285,7 +265,7 @@ $ curl -X PUT -d '{"lifetime":"2"}' --url vmpooler.example.com/api/v3/vm/fq6qlpj
```
```
$ curl -X PUT -d '{"tags":{"department":"engineering","user":"jdoe"}}' --url vmpooler.example.com/api/v3/vm/fq6qlpjlsskycq6
$ curl -X PUT -d '{"tags":{"department":"engineering","user":"jdoe"}}' --url vmpooler.example.com/api/v1/vm/fq6qlpjlsskycq6
```
```json
{
@ -303,7 +283,7 @@ Return codes:
* 404 when requesting an invalid VM hostname
```
$ curl -X DELETE --url vmpooler.example.com/api/v3/vm/fq6qlpjlsskycq6
$ curl -X DELETE --url vmpooler.example.com/api/v1/vm/fq6qlpjlsskycq6
```
```json
{
@ -323,7 +303,7 @@ Return codes:
* 404 when requesting an invalid VM hostname or size is not an integer
````
$ curl -X POST -H X-AUTH-TOKEN:a9znth9dn01t416hrguu56ze37t790bl --url vmpooler.example.com/api/v3/vm/fq6qlpjlsskycq6/disk/8
$ curl -X POST -H X-AUTH-TOKEN:a9znth9dn01t416hrguu56ze37t790bl --url vmpooler.example.com/api/v1/vm/fq6qlpjlsskycq6/disk/8
````
````json
{
@ -337,7 +317,7 @@ $ curl -X POST -H X-AUTH-TOKEN:a9znth9dn01t416hrguu56ze37t790bl --url vmpooler.e
Provisioning and attaching disks can take a moment, but once the task completes it will be reflected in a `GET /vm/<hostname>` query:
````
$ curl --url vmpooler.example.com/api/v3/vm/fq6qlpjlsskycq6
$ curl --url vmpooler.example.com/api/v1/vm/fq6qlpjlsskycq6
````
````json
{
@ -349,7 +329,8 @@ $ curl --url vmpooler.example.com/api/v3/vm/fq6qlpjlsskycq6
"state": "running",
"disk": [
"+8gb"
]
],
"domain": "delivery.puppetlabs.net"
}
}
@ -367,7 +348,7 @@ Return codes:
* 404 when requesting an invalid VM hostname
````
$ curl -X POST -H X-AUTH-TOKEN:a9znth9dn01t416hrguu56ze37t790bl --url vmpooler.example.com/api/v3/vm/fq6qlpjlsskycq6/snapshot
$ curl -X POST -H X-AUTH-TOKEN:a9znth9dn01t416hrguu56ze37t790bl --url vmpooler.example.com/api/v1/vm/fq6qlpjlsskycq6/snapshot
````
````json
{
@ -381,7 +362,7 @@ $ curl -X POST -H X-AUTH-TOKEN:a9znth9dn01t416hrguu56ze37t790bl --url vmpooler.e
Snapshotting a live VM can take a moment, but once the snapshot task completes it will be reflected in a `GET /vm/<hostname>` query:
````
$ curl --url vmpooler.example.com/api/v3/vm/fq6qlpjlsskycq6
$ curl --url vmpooler.example.com/api/v1/vm/fq6qlpjlsskycq6
````
````json
{
@ -393,7 +374,8 @@ $ curl --url vmpooler.example.com/api/v3/vm/fq6qlpjlsskycq6
"state": "running",
"snapshots": [
"n4eb4kdtp7rwv4x158366vd9jhac8btq"
]
],
"domain": "delivery.puppetlabs.net"
}
}
````
@ -408,7 +390,7 @@ Return codes:
* 404 when requesting an invalid VM hostname or snapshot is not valid
````
$ curl X POST -H X-AUTH-TOKEN:a9znth9dn01t416hrguu56ze37t790bl --url vmpooler.example.com/api/v3/vm/fq6qlpjlsskycq6/snapshot/n4eb4kdtp7rwv4x158366vd9jhac8btq
$ curl X POST -H X-AUTH-TOKEN:a9znth9dn01t416hrguu56ze37t790bl --url vmpooler.example.com/api/v1/vm/fq6qlpjlsskycq6/snapshot/n4eb4kdtp7rwv4x158366vd9jhac8btq
````
````json
{
@ -423,7 +405,7 @@ $ curl X POST -H X-AUTH-TOKEN:a9znth9dn01t416hrguu56ze37t790bl --url vmpooler.ex
A "live" status endpoint, representing the current state of the service.
```
$ curl --url vmpooler.example.com/api/v3/status
$ curl --url vmpooler.example.com/api/v1/status
```
```json
{
@ -475,7 +457,7 @@ The top level sections are: "capacity", "queue", "clone", "boot", "pools" and "s
If the query parameter 'view' is provided, it will be used to select which top level
element to compute and return. Select them by specifying which one you want in a comma
separated list.
For example `vmpooler.example.com/api/v3/status?view=capacity,boot`
For example `vmpooler.example.com/api/v1/status?view=capacity,boot`
##### GET /summary[?from=YYYY-MM-DD[&to=YYYY-MM-DD]]
@ -492,7 +474,7 @@ Return codes:
```
$ curl --url vmpooler.example.com/api/v3/summary
$ curl --url vmpooler.example.com/api/v1/summary
```
```json
{
@ -584,7 +566,7 @@ $ curl --url vmpooler.example.com/api/v3/summary
```
$ curl -G -d 'from=2015-03-10' -d 'to=2015-03-11' --url vmpooler.example.com/api/v3/summary
$ curl -G -d 'from=2015-03-10' -d 'to=2015-03-11' --url vmpooler.example.com/api/v1/summary
```
```json
{
@ -648,9 +630,9 @@ $ curl -G -d 'from=2015-03-10' -d 'to=2015-03-11' --url vmpooler.example.com/api
```
You can also query only the specific top level section you want by including it after `summary/`.
The valid sections are "boot", "clone" or "tag" eg. `vmpooler.example.com/api/v3/summary/boot/`.
The valid sections are "boot", "clone" or "tag" eg. `vmpooler.example.com/api/v1/summary/boot/`.
You can further drill-down the data by specifying the second level parameter to query eg
`vmpooler.example.com/api/v3/summary/tag/created_by`
`vmpooler.example.com/api/v1/summary/tag/created_by`
##### GET /poolstat?pool=FOO
@ -662,7 +644,7 @@ Return codes
* 200 OK
```
$ curl https://vmpooler.example.com/api/v3/poolstat?pool=centos-6-x86_64
$ curl https://vmpooler.example.com/api/v1/poolstat?pool=centos-6-x86_64
```
```json
{
@ -687,7 +669,7 @@ Return codes
* 200 OK
```
$ curl https://vmpooler.example.com/api/v3/totalrunning
$ curl https://vmpooler.example.com/api/v1/totalrunning
```
```json
@ -709,7 +691,7 @@ Return codes
* 400 No configuration found
```
$ curl https://vmpooler.example.com/api/v3/config
$ curl https://vmpooler.example.com/api/v1/config
```
```json
{
@ -753,7 +735,7 @@ Responses:
* 404 - An unknown error occurred
* 405 - The endpoint is disabled because experimental features are disabled
```
$ curl -X POST -H "Content-Type: application/json" -d '{"debian-7-i386":"2","debian-7-x86_64":"1"}' --url https://vmpooler.example.com/api/v3/config/poolsize
$ curl -X POST -H "Content-Type: application/json" -d '{"debian-7-i386":"2","debian-7-x86_64":"1"}' --url https://vmpooler.example.com/api/v1/config/poolsize
```
```json
{
@ -773,7 +755,7 @@ Return codes:
* 405 - The endpoint is disabled because experimental features are disabled
```
$ curl -X DELETE -u jdoe --url vmpooler.example.com/api/v3/poolsize/almalinux-8-x86_64
$ curl -X DELETE -u jdoe --url vmpooler.example.com/api/v1/poolsize/almalinux-8-x86_64
```
```json
{
@ -807,7 +789,7 @@ Responses:
* 404 - An unknown error occurred
* 405 - The endpoint is disabled because experimental features are disabled
```
$ curl -X POST -H "Content-Type: application/json" -d '{"debian-7-i386":"templates/debian-7-i386"}' --url https://vmpooler.example.com/api/v3/config/pooltemplate
$ curl -X POST -H "Content-Type: application/json" -d '{"debian-7-i386":"templates/debian-7-i386"}' --url https://vmpooler.example.com/api/v1/config/pooltemplate
```
```json
{
@ -827,7 +809,7 @@ Return codes:
* 405 - The endpoint is disabled because experimental features are disabled
```
$ curl -X DELETE -u jdoe --url vmpooler.example.com/api/v3/pooltemplate/almalinux-8-x86_64
$ curl -X DELETE -u jdoe --url vmpooler.example.com/api/v1/pooltemplate/almalinux-8-x86_64
```
```json
{
@ -855,7 +837,7 @@ Responses:
* 404 - An unknown error occurred
* 405 - The endpoint is disabled because experimental features are disabled
```
$ curl -X POST -H "Content-Type: application/json" -d '{"debian-7-i386":"1"}' --url https://vmpooler.example.com/api/v3/poolreset
$ curl -X POST -H "Content-Type: application/json" -d '{"debian-7-i386":"1"}' --url https://vmpooler.example.com/api/v1/poolreset
```
```json
{
@ -883,7 +865,7 @@ Responses:
* 404 - A pool was requested, which is not available in the running configuration, or an unknown error occurred.
* 409 - A request of the matching ID has already been created
```
$ curl -X POST -H "Content-Type: application/json" -d '{"debian-7-i386":"4"}' --url https://vmpooler.example.com/api/v3/ondemandvm
$ curl -X POST -H "Content-Type: application/json" -d '{"debian-7-i386":"4"}' --url https://vmpooler.example.com/api/v1/ondemandvm
```
```json
{
@ -905,7 +887,7 @@ Responses:
* 202 - The request is not ready yet
* 404 - The request can not be found, or an unknown error occurred
```
$ curl https://vmpooler.example.com/api/v3/ondemandvm/e3ff6271-d201-4f31-a315-d17f4e15863a
$ curl https://vmpooler.example.com/api/v1/ondemandvm/e3ff6271-d201-4f31-a315-d17f4e15863a
```
```json
{
@ -945,7 +927,7 @@ Responses:
* 401 - No auth token provided, or provided auth token is not valid, and auth is enabled
* 404 - The request can not be found, or an unknown error occurred.
```
$ curl -X DELETE https://vmpooler.example.com/api/v3/ondemandvm/e3ff6271-d201-4f31-a315-d17f4e15863a
$ curl -X DELETE https://vmpooler.example.com/api/v1/ondemandvm/e3ff6271-d201-4f31-a315-d17f4e15863a
```
```json
{

View file

@ -19,6 +19,11 @@ Provide the entire configuration as a blob of yaml. Individual parameters passed
Path to a the file to use when loading the vmpooler configuration. This is only evaluated if `VMPOOLER_CONFIG` has not been specified.
### DOMAIN
If set, returns a top-level 'domain' JSON key in POST requests
(optional)
### REDIS\_SERVER
The redis server to use for vmpooler.
@ -246,18 +251,6 @@ This can be a string providing a single DN. For multiple DNs please specify the
The LDAP object-type used to designate a user object.
(optional)
### LDAP\_SERVICE_ACCOUNT\_HASH
A hash containing the following parameters for a service account to perform the
initial bind. After the initial bind, then a search query is performed using the
'base' and 'user_object', then re-binds as the returned user.
- :user_dn: The full distinguished name (DN) of the service account used to bind.
- :password: The password for the service account used to bind.
(optional)
### SITE\_NAME
The name of your deployment.

236
docs/dev-setup.md Normal file
View file

@ -0,0 +1,236 @@
# Setting up a vmpooler development environment
## Docker is the preferred development environment
The docker compose file is the easiest way to get vmpooler running with any local code changes. The docker compose file expects to find a vmpooler.yaml configuration file in the root vmpooler directory. The file is mapped into the running container for the vmpooler application. This file primarily contains the pools configuration. Nearly all other configuration can be supplied with environment variables.
## Requirements for local installation directly on your system (not recommended)
* Supported on OSX, Windows and Linux
* Ruby or JRuby
Note - It is recommended to user Bundler instead of installing gems into the system repository
* A local Redis server
Either a containerized instance of Redis or a local version is fine.
## Setup source and ruby
* Clone repository, either from your own fork or the original source
* Perform a bundle install
```
~/ > git clone https://github.com/puppetlabs/vmpooler.git
Cloning into 'vmpooler'...
remote: Counting objects: 3411, done.
...
~/ > cd vmpooler
~/vmpooler/ > bundle install
Fetching gem metadata from https://rubygems.org/.........
Fetching version metadata from https://rubygems.org/..
Resolving dependencies...
Installing rake 12.0.0
...
Bundle complete! 16 Gemfile dependencies, 37 gems now installed.
```
## Setup environment variables
### `VMPOOLER_DEBUG`
Setting the `VMPOOLER_DEBUG` environment variable will instruct vmpooler to:
* Output log messages to STDOUT
* Allow the use of the dummy authentication method
* Add interrupt traps so you can stop vmpooler when run interactively
Linux, OSX
```bash
~/vmpooler/ > export VMPOOLER_DEBUG=true
```
Windows (PowerShell)
```powershell
C:\vmpooler > $ENV:VMPOOLER_DEBUG = 'true'
```
### `VMPOOLER_CONFIG`
When `VMPOOLER_CONFIG` is set, vmpooler will read its configuration from the content of the environment variable.
Note that this variable does not point to a different configuration file, but stores the contents of a configuration file. You may use `VMPOOLER_CONFIG_FILE` instead to specify a filename.
### `VMPOOLER_CONFIG_FILE`
When `VMPOOLER_CONFIG_FILE` is set, vmpooler will read its configuration from the file specified in the environment variable.
Note that this variable points to a different configuration file, unlike `VMPOOLER_CONFIG`.
## Setup vmpooler Configuration
You can create a `vmpooler.yaml` file, set the `VMPOOLER_CONFIG` environment variable with the equivalent content, or set the `VMPOOLER_CONFIG_FILE` environment variable with the name of another configuration file to use. `VMPOOLER_CONFIG` takes precedence over `VMPOOLER_CONFIG_FILE`.
Example minimal configuration file:
```yaml
---
:providers:
:dummy:
:redis:
server: 'localhost'
:auth:
provider: dummy
:tagfilter:
url: '(.*)\/'
:config:
site_name: 'vmpooler'
# Need to change this on Windows
logfile: '/var/log/vmpooler.log'
task_limit: 10
timeout: 15
vm_lifetime: 12
vm_lifetime_auth: 24
allowed_tags:
- 'created_by'
- 'project'
domain: 'example.com'
prefix: 'poolvm-'
# Uncomment the lines below to suppress metrics to STDOUT
# :statsd:
# server: 'localhost'
# prefix: 'vmpooler'
# port: 8125
:pools:
- name: 'pool01'
size: 5
provider: dummy
- name: 'pool02'
size: 5
provider: dummy
```
## Running vmpooler locally
* Run `bundle exec ruby vmpooler`
If using JRuby, you may need to use `bundle exec jruby vmpooler`
You should see output similar to:
```
~/vmpooler/ > bundle exec ruby vmpooler
[2017-06-16 14:50:31] starting vmpooler
[2017-06-16 14:50:31] [!] Creating provider 'dummy'
[2017-06-16 14:50:31] [dummy] ConnPool - Creating a connection pool of size 1 with timeout 10
[2017-06-16 14:50:31] [*] [disk_manager] starting worker thread
[2017-06-16 14:50:31] [*] [snapshot_manager] starting worker thread
[2017-06-16 14:50:31] [*] [pool01] starting worker thread
[2017-06-16 14:50:31] [*] [pool02] starting worker thread
[2017-06-16 14:50:31] [dummy] ConnPool - Creating a connection object ID 1784
== Sinatra (v1.4.8) has taken the stage on 4567 for production with backup from Puma
*** SIGUSR2 not implemented, signal based restart unavailable!
*** SIGUSR1 not implemented, signal based restart unavailable!
*** SIGHUP not implemented, signal based logs reopening unavailable!
Puma starting in single mode...
* Version 3.9.1 (ruby 2.3.1-p112), codename: Private Caller
* Min threads: 0, max threads: 16
* Environment: development
* Listening on tcp://0.0.0.0:4567
Use Ctrl-C to stop
[2017-06-16 14:50:31] [!] [pool02] is empty
[2017-06-16 14:50:31] [!] [pool01] is empty
[2017-06-16 14:50:31] [ ] [pool02] Starting to clone 'poolvm-nexs1w50m4djap5'
[2017-06-16 14:50:31] [ ] [pool01] Starting to clone 'poolvm-r543eibo4b6tjer'
[2017-06-16 14:50:31] [ ] [pool01] Starting to clone 'poolvm-neqmu7wj7aukyjy'
[2017-06-16 14:50:31] [ ] [pool02] Starting to clone 'poolvm-nsdnrhhy22lnemo'
[2017-06-16 14:50:31] [ ] [pool01] 'poolvm-r543eibo4b6tjer' is being cloned from ''
[2017-06-16 14:50:31] [ ] [pool01] 'poolvm-neqmu7wj7aukyjy' is being cloned from ''
[2017-06-16 14:50:31] [ ] [pool02] 'poolvm-nexs1w50m4djap5' is being cloned from ''
[2017-06-16 14:50:31] [ ] [pool01] Starting to clone 'poolvm-edzlp954lyiozli'
[2017-06-16 14:50:31] [ ] [pool01] Starting to clone 'poolvm-nb0uci0yrwbxr6x'
[2017-06-16 14:50:31] [ ] [pool02] Starting to clone 'poolvm-y2yxgnovaneymvy'
[2017-06-16 14:50:31] [ ] [pool01] Starting to clone 'poolvm-nur59d25s1y8jko'
...
```
### Common Errors
* Forget to set VMPOOLER_DEBUG environment variable
vmpooler will fail to start with an error similar to below
```
~/vmpooler/ > bundle exec ruby vmpooler
~/vmpooler/lib/vmpooler.rb:44:in `config': Dummy authentication should not be used outside of debug mode; please set environment variable VMPOOLER_DEBUG to 'true' if you want to use dummy authentication (RuntimeError)
from vmpooler:8:in `<main>'
...
```
* Error in vmpooler configuration
If there is an error in the vmpooler configuration file, or any other fatal error in the Pool Manager, vmpooler will appear to be running but no log information is displayed. This is due to the error not being displayed until you press `Ctrl-C` and then suddenly you can see the cause of the issue.
For example, when running vmpooler on Windows, but with a unix style filename for the vmpooler log
```powershell
C:\vmpooler > bundle exec ruby vmpooler
[2017-06-16 14:49:57] starting vmpooler
== Sinatra (v1.4.8) has taken the stage on 4567 for production with backup from Puma
*** SIGUSR2 not implemented, signal based restart unavailable!
*** SIGUSR1 not implemented, signal based restart unavailable!
*** SIGHUP not implemented, signal based logs reopening unavailable!
Puma starting in single mode...
* Version 3.9.1 (ruby 2.3.1-p112), codename: Private Caller
* Min threads: 0, max threads: 16
* Environment: development
* Listening on tcp://0.0.0.0:4567
Use Ctrl-C to stop
# [ NOTHING ELSE IS LOGGED ]
```
Once `Ctrl-C` is pressed the error is shown
```powershell
...
== Sinatra has ended his set (crowd applauds)
Shutting down.
C:/tools/ruby2.3.1x64/lib/ruby/2.3.0/open-uri.rb:37:in `initialize': No such file or directory @ rb_sysopen - /var/log/vmpooler.log (Errno::ENOENT)
from C:/tools/ruby2.3.1x64/lib/ruby/2.3.0/open-uri.rb:37:in `open'
from C:/tools/ruby2.3.1x64/lib/ruby/2.3.0/open-uri.rb:37:in `open'
from C:/vmpooler/lib/vmpooler/logger.rb:17:in `log'
from C:/vmpooler/lib/vmpooler/pool_manager.rb:709:in `execute!'
from vmpooler:26:in `block in <main>'
```
## Default vmpooler URLs
| Endpoint | URL |
|-----------|----------------------------------------------------------------------|
| Dashboard | [http://localhost:4567/dashboard/](http://localhost:4567/dashboard/) |
| API | [http://localhost:4567/api/v1]([http://localhost:4567/api/v1) |
## Use the vmpooler API locally
Once a local vmpooler instance is running you can use any tool you need to interact with the API. The dummy authentication provider will allow a user to connect if the username and password are not the same:
* Authentication is successful for username `Alice` with password `foo`
* Authentication will fail for username `Alice` with password `Alice`
Like normal vmpooler, tokens will be created for the user and can be used for regular vmpooler operations.

45
docs/vagrant.md Normal file
View file

@ -0,0 +1,45 @@
A [Vagrantfile](Vagrantfile) is also included in this repository so that you dont have to run Docker on your local computer.
To use it run:
```
vagrant up
vagrant ssh
docker run -p 8080:4567 -v /vagrant/vmpooler.yaml.example:/var/lib/vmpooler/vmpooler.yaml -it --rm --name pooler vmpooler
```
To run vmpooler with the example dummy provider you can replace the above docker command with this:
```
docker run -e VMPOOLER_DEBUG=true -p 8080:4567 -v /vagrant/vmpooler.yaml.dummy-example:/var/lib/vmpooler/vmpooler.yaml -e VMPOOLER_LOG='/var/log/vmpooler/vmpooler.log' -it --rm --name pooler vmpooler
```
Either variation will allow you to access the dashboard from [localhost:8080](http://localhost:8080/).
### Running directly in Vagrant
You can also run vmpooler directly in the Vagrant box. To do so run this:
```
vagrant up
vagrant ssh
cd /vagrant
# Do this if using the dummy provider
export VMPOOLER_DEBUG=true
cp vmpooler.yaml.dummy-example vmpooler.yaml
# vmpooler needs a redis server.
sudo yum -y install redis
sudo systemctl start redis
# Optional: Choose your ruby version or use jruby
# ruby 2.4.x is used by default
rvm list
rvm use jruby-9.1.7.0
gem install bundler
bundle install
bundle exec ruby vmpooler
```
When run this way you can access vmpooler from your local computer via [localhost:4567](http://localhost:4567/).

View file

@ -17,20 +17,15 @@
logfile: '/Users/samuel/workspace/vmpooler/vmpooler.log'
task_limit: 10
timeout: 15
timeout_notification: 5
vm_checktime: 1
vm_lifetime: 12
vm_lifetime_auth: 24
allowed_tags:
- 'created_by'
- 'project'
domain: 'example.com'
prefix: 'poolvm-'
:dns_configs:
:example:
dns_class: dynamic-dns
domain: 'example.com'
:pools:
- name: 'debian-7-i386'
alias: [ 'debian-7-32' ]
@ -39,10 +34,8 @@
datastore: 'vmstorage'
size: 5
timeout: 15
timeout_notification: 5
ready_ttl: 1440
provider: dummy
dns_plugin: 'example'
- name: 'debian-7-i386-stringalias'
alias: 'debian-7-32-stringalias'
template: 'Templates/debian-7-i386'
@ -50,10 +43,8 @@
datastore: 'vmstorage'
size: 5
timeout: 15
timeout_notification: 5
ready_ttl: 1440
provider: dummy
dns_plugin: 'example'
- name: 'debian-7-x86_64'
alias: [ 'debian-7-64', 'debian-7-amd64' ]
template: 'Templates/debian-7-x86_64'
@ -61,20 +52,16 @@
datastore: 'vmstorage'
size: 5
timeout: 15
timeout_notification: 5
ready_ttl: 1440
provider: dummy
dns_plugin: 'example'
- name: 'debian-7-i386-noalias'
template: 'Templates/debian-7-i386'
folder: 'Pooled VMs/debian-7-i386'
datastore: 'vmstorage'
size: 5
timeout: 15
timeout_notification: 5
ready_ttl: 1440
provider: dummy
dns_plugin: 'example'
- name: 'debian-7-x86_64-alias-otherpool-extended'
alias: [ 'debian-7-x86_64' ]
template: 'Templates/debian-7-x86_64'
@ -82,7 +69,6 @@
datastore: 'other-vmstorage'
size: 5
timeout: 15
timeout_notification: 5
ready_ttl: 1440
provider: dummy
dns_plugin: 'example'

View file

@ -3,7 +3,6 @@
module Vmpooler
require 'concurrent'
require 'date'
require 'deep_merge'
require 'json'
require 'net/ldap'
require 'open-uri'
@ -17,7 +16,6 @@ module Vmpooler
# Dependencies for tracing
require 'opentelemetry-instrumentation-concurrent_ruby'
require 'opentelemetry-instrumentation-http_client'
require 'opentelemetry-instrumentation-redis'
require 'opentelemetry-instrumentation-sinatra'
require 'opentelemetry-sdk'
@ -34,7 +32,7 @@ module Vmpooler
config_string = ENV['VMPOOLER_CONFIG']
# Parse the YAML config into a Hash
# Allow the Symbol class
parsed_config = YAML.safe_load(config_string, permitted_classes: [Symbol])
parsed_config = YAML.safe_load(config_string, [Symbol])
else
# Take the name of the config file either from an ENV variable or from the filepath argument
config_file = ENV['VMPOOLER_CONFIG_FILE'] || filepath
@ -43,9 +41,8 @@ module Vmpooler
if parsed_config[:config]['extra_config']
extra_configs = parsed_config[:config]['extra_config'].split(',')
extra_configs.each do |config|
puts "loading extra_config file #{config}"
extra_config = YAML.load_file(config)
parsed_config.deep_merge(extra_config)
parsed_config.merge!(extra_config)
end
end
end
@ -76,13 +73,9 @@ module Vmpooler
parsed_config[:config]['prefix'] = ENV['PREFIX'] || parsed_config[:config]['prefix'] || ''
parsed_config[:config]['logfile'] = ENV['LOGFILE'] if ENV['LOGFILE']
parsed_config[:config]['site_name'] = ENV['SITE_NAME'] if ENV['SITE_NAME']
if !parsed_config[:config]['domain'].nil? || !ENV['DOMAIN'].nil?
puts '[!] [error] The "domain" config setting has been removed in v3. Please see the docs for migrating the domain config to use a dns plugin at https://github.com/puppetlabs/vmpooler/blob/main/README.md#migrating-to-v3'
exit 1
end
parsed_config[:config]['domain'] = ENV['DOMAIN'] if ENV['DOMAIN']
parsed_config[:config]['clone_target'] = ENV['CLONE_TARGET'] if ENV['CLONE_TARGET']
parsed_config[:config]['timeout'] = string_to_int(ENV['TIMEOUT']) if ENV['TIMEOUT']
parsed_config[:config]['timeout_notification'] = string_to_int(ENV['TIMEOUT_NOTIFICATION']) if ENV['TIMEOUT_NOTIFICATION']
parsed_config[:config]['vm_lifetime_auth'] = string_to_int(ENV['VM_LIFETIME_AUTH']) if ENV['VM_LIFETIME_AUTH']
parsed_config[:config]['max_tries'] = string_to_int(ENV['MAX_TRIES']) if ENV['MAX_TRIES']
parsed_config[:config]['retry_factor'] = string_to_int(ENV['RETRY_FACTOR']) if ENV['RETRY_FACTOR']
@ -104,7 +97,7 @@ module Vmpooler
parsed_config[:redis]['data_ttl'] = string_to_int(ENV['REDIS_DATA_TTL']) || parsed_config[:redis]['data_ttl'] || 168
parsed_config[:redis]['connection_pool_size'] = string_to_int(ENV['REDIS_CONNECTION_POOL_SIZE']) || parsed_config[:redis]['connection_pool_size'] || 10
parsed_config[:redis]['connection_pool_timeout'] = string_to_int(ENV['REDIS_CONNECTION_POOL_TIMEOUT']) || parsed_config[:redis]['connection_pool_timeout'] || 5
parsed_config[:redis]['reconnect_attempts'] = string_array_to_array(ENV['REDIS_RECONNECT_ATTEMPTS']) || parsed_config[:redis]['reconnect_attempts'] || 10
parsed_config[:redis]['reconnect_attempts'] = string_to_int(ENV['REDIS_RECONNECT_ATTEMPTS']) || parsed_config[:redis]['reconnect_attempts'] || 10
parsed_config[:statsd] = parsed_config[:statsd] || {} if ENV['STATSD_SERVER']
parsed_config[:statsd]['server'] = ENV['STATSD_SERVER'] if ENV['STATSD_SERVER']
@ -138,7 +131,6 @@ module Vmpooler
# Create an index of pool aliases
parsed_config[:pool_names] = Set.new
unless parsed_config[:pools]
puts 'loading pools configuration from redis, since the config[:pools] is empty'
redis = new_redis(parsed_config[:redis]['server'], parsed_config[:redis]['port'], parsed_config[:redis]['password'])
parsed_config[:pools] = load_pools_from_redis(redis)
end
@ -210,13 +202,8 @@ module Vmpooler
end
def self.new_redis(host = 'localhost', port = nil, password = nil, redis_reconnect_attempts = 10)
Redis.new(
host: host,
port: port,
password: password,
reconnect_attempts: redis_reconnect_attempts,
connect_timeout: 300
)
Redis.new(host: host, port: port, password: password, reconnect_attempts: redis_reconnect_attempts, reconnect_delay: 1.5,
reconnect_delay_max: 10.0)
end
def self.pools(conf)
@ -241,13 +228,6 @@ module Vmpooler
Integer(s)
end
def self.string_array_to_array(s)
# Returns an array from an array like string
return if s.nil?
JSON.parse(s)
end
def self.true?(obj)
obj.to_s.downcase == 'true'
end
@ -284,7 +264,6 @@ module Vmpooler
OpenTelemetry::SDK.configure do |c|
c.use 'OpenTelemetry::Instrumentation::Sinatra'
c.use 'OpenTelemetry::Instrumentation::ConcurrentRuby'
c.use 'OpenTelemetry::Instrumentation::HttpClient'
c.use 'OpenTelemetry::Instrumentation::Redis'
c.add_span_processor(span_processor)

View file

@ -3,7 +3,7 @@
module Vmpooler
class API < Sinatra::Base
# Load API components
%w[helpers dashboard v3 request_logger healthcheck].each do |lib|
%w[helpers dashboard reroute v1 request_logger healthcheck].each do |lib|
require "vmpooler/api/#{lib}"
end
# Load dashboard components
@ -52,7 +52,8 @@ module Vmpooler
use Vmpooler::Dashboard
use Vmpooler::API::Dashboard
use Vmpooler::API::V3
use Vmpooler::API::Reroute
use Vmpooler::API::V1
end
# Get thee started O WebServer

View file

@ -1,198 +1,141 @@
# frozen_string_literal: true
require 'vmpooler/api/input_validator'
module Vmpooler
class API
module Helpers
include InputValidator
def tracer
@tracer ||= OpenTelemetry.tracer_provider.tracer('api', Vmpooler::VERSION)
end
def has_token?
request.env['HTTP_X_AUTH_TOKEN'].nil? ? false : true
end
def valid_token?(backend)
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
return false unless has_token?
return false unless has_token?
backend.exists?("vmpooler__token__#{request.env['HTTP_X_AUTH_TOKEN']}") ? true : false
end
backend.exists?("vmpooler__token__#{request.env['HTTP_X_AUTH_TOKEN']}") ? true : false
end
def validate_token(backend)
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
if valid_token?(backend)
backend.hset("vmpooler__token__#{request.env['HTTP_X_AUTH_TOKEN']}", 'last', Time.now.to_s)
if valid_token?(backend)
backend.hset("vmpooler__token__#{request.env['HTTP_X_AUTH_TOKEN']}", 'last', Time.now)
return true
end
content_type :json
result = { 'ok' => false }
headers['WWW-Authenticate'] = 'Basic realm="Authentication required"'
halt 401, JSON.pretty_generate(result)
return true
end
content_type :json
result = { 'ok' => false }
headers['WWW-Authenticate'] = 'Basic realm="Authentication required"'
halt 401, JSON.pretty_generate(result)
end
def validate_auth(backend)
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
return if authorized?
return if authorized?
content_type :json
content_type :json
result = { 'ok' => false }
result = { 'ok' => false }
headers['WWW-Authenticate'] = 'Basic realm="Authentication required"'
halt 401, JSON.pretty_generate(result)
end
headers['WWW-Authenticate'] = 'Basic realm="Authentication required"'
halt 401, JSON.pretty_generate(result)
end
def authorized?
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
@auth ||= Rack::Auth::Basic::Request.new(request.env)
@auth ||= Rack::Auth::Basic::Request.new(request.env)
if @auth.provided? and @auth.basic? and @auth.credentials
username, password = @auth.credentials
if @auth.provided? and @auth.basic? and @auth.credentials
username, password = @auth.credentials
if authenticate(Vmpooler::API.settings.config[:auth], username, password)
return true
end
if authenticate(Vmpooler::API.settings.config[:auth], username, password)
return true
end
return false
end
return false
end
def authenticate_ldap(port, host, encryption_hash, user_object, base, username_str, password_str, service_account_hash = nil)
tracer.in_span(
"Vmpooler::API::Helpers.#{__method__}",
attributes: {
'net.peer.name' => host,
'net.peer.port' => port,
'net.transport' => 'ip_tcp',
'enduser.id' => username_str
},
kind: :client
) do
if service_account_hash
username = service_account_hash[:user_dn]
password = service_account_hash[:password]
else
username = "#{user_object}=#{username_str},#{base}"
password = password_str
end
def authenticate_ldap(port, host, encryption_hash, user_object, base, username_str, password_str)
ldap = Net::LDAP.new(
:host => host,
:port => port,
:encryption => encryption_hash,
:base => base,
:auth => {
:method => :simple,
:username => "#{user_object}=#{username_str},#{base}",
:password => password_str
}
)
ldap = Net::LDAP.new(
:host => host,
:port => port,
:encryption => encryption_hash,
:base => base,
:auth => {
:method => :simple,
:username => username,
:password => password
}
)
return true if ldap.bind
if service_account_hash
return true if ldap.bind_as(
:base => base,
:filter => "(#{user_object}=#{username_str})",
:password => password_str
)
elsif ldap.bind
return true
else
return false
end
return false
end
return false
end
def authenticate(auth, username_str, password_str)
tracer.in_span(
"Vmpooler::API::Helpers.#{__method__}",
attributes: {
'enduser.id' => username_str
case auth['provider']
when 'dummy'
return (username_str != password_str)
when 'ldap'
ldap_base = auth[:ldap]['base']
ldap_port = auth[:ldap]['port'] || 389
ldap_user_obj = auth[:ldap]['user_object']
ldap_host = auth[:ldap]['host']
ldap_encryption_hash = auth[:ldap]['encryption'] || {
:method => :start_tls,
:tls_options => { :ssl_version => 'TLSv1' }
}
) do
case auth['provider']
when 'dummy'
return (username_str != password_str)
when 'ldap'
ldap_base = auth[:ldap]['base']
ldap_port = auth[:ldap]['port'] || 389
ldap_user_obj = auth[:ldap]['user_object']
ldap_host = auth[:ldap]['host']
ldap_encryption_hash = auth[:ldap]['encryption'] || {
:method => :start_tls,
:tls_options => { :ssl_version => 'TLSv1' }
}
service_account_hash = auth[:ldap]['service_account_hash']
unless ldap_base.is_a? Array
ldap_base = ldap_base.split
end
unless ldap_user_obj.is_a? Array
ldap_user_obj = ldap_user_obj.split
end
ldap_base.each do |search_base|
ldap_user_obj.each do |search_user_obj|
result = authenticate_ldap(
ldap_port,
ldap_host,
ldap_encryption_hash,
search_user_obj,
search_base,
username_str,
password_str,
service_account_hash
)
return true if result
end
end
return false
unless ldap_base.is_a? Array
ldap_base = ldap_base.split
end
unless ldap_user_obj.is_a? Array
ldap_user_obj = ldap_user_obj.split
end
ldap_base.each do |search_base|
ldap_user_obj.each do |search_user_obj|
result = authenticate_ldap(
ldap_port,
ldap_host,
ldap_encryption_hash,
search_user_obj,
search_base,
username_str,
password_str
)
return true if result
end
end
return false
end
end
def export_tags(backend, hostname, tags)
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
backend.pipelined do |pipeline|
tags.each_pair do |tag, value|
next if value.nil? or value.empty?
backend.pipelined do
tags.each_pair do |tag, value|
next if value.nil? or value.empty?
pipeline.hset("vmpooler__vm__#{hostname}", "tag:#{tag}", value)
pipeline.hset("vmpooler__tag__#{Date.today}", "#{hostname}:#{tag}", value)
end
backend.hset("vmpooler__vm__#{hostname}", "tag:#{tag}", value)
backend.hset("vmpooler__tag__#{Date.today}", "#{hostname}:#{tag}", value)
end
end
end
def filter_tags(tags)
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
return unless Vmpooler::API.settings.config[:tagfilter]
return unless Vmpooler::API.settings.config[:tagfilter]
tags.each_pair do |tag, value|
next unless filter = Vmpooler::API.settings.config[:tagfilter][tag]
tags.each_pair do |tag, value|
next unless filter = Vmpooler::API.settings.config[:tagfilter][tag]
tags[tag] = value.match(filter).captures.join if value.match(filter)
end
tags
tags[tag] = value.match(filter).captures.join if value.match(filter)
end
tags
end
def mean(list)
@ -204,347 +147,310 @@ module Vmpooler
/^\d{4}-\d{2}-\d{2}$/ === date_str
end
def hostname_shorten(hostname)
hostname[/[^.]+/]
def hostname_shorten(hostname, domain=nil)
if domain && hostname =~ /^[\w-]+\.#{domain}$/
hostname = hostname[/[^.]+/]
end
hostname
end
def get_task_times(backend, task, date_str)
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
backend.hvals("vmpooler__#{task}__" + date_str).map(&:to_f)
end
backend.hvals("vmpooler__#{task}__" + date_str).map(&:to_f)
end
# Takes the pools and a key to run scard on
# returns an integer for the total count
def get_total_across_pools_redis_scard(pools, key, backend)
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
# using pipelined is much faster than querying each of the pools and adding them
# as we get the result.
res = backend.pipelined do |pipeline|
pools.each do |pool|
pipeline.scard(key + pool['name'])
end
# using pipelined is much faster than querying each of the pools and adding them
# as we get the result.
res = backend.pipelined do
pools.each do |pool|
backend.scard(key + pool['name'])
end
res.inject(0) { |m, x| m + x }.to_i
end
res.inject(0) { |m, x| m + x }.to_i
end
# Takes the pools and a key to run scard on
# returns a hash with each pool name as key and the value being the count as integer
def get_list_across_pools_redis_scard(pools, key, backend)
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
# using pipelined is much faster than querying each of the pools and adding them
# as we get the result.
temp_hash = {}
res = backend.pipelined do |pipeline|
pools.each do |pool|
pipeline.scard(key + pool['name'])
end
# using pipelined is much faster than querying each of the pools and adding them
# as we get the result.
temp_hash = {}
res = backend.pipelined do
pools.each do |pool|
backend.scard(key + pool['name'])
end
pools.each_with_index do |pool, i|
temp_hash[pool['name']] = res[i].to_i
end
temp_hash
end
pools.each_with_index do |pool, i|
temp_hash[pool['name']] = res[i].to_i
end
temp_hash
end
# Takes the pools and a key to run hget on
# returns a hash with each pool name as key and the value as string
def get_list_across_pools_redis_hget(pools, key, backend)
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
# using pipelined is much faster than querying each of the pools and adding them
# as we get the result.
temp_hash = {}
res = backend.pipelined do |pipeline|
pools.each do |pool|
pipeline.hget(key, pool['name'])
end
# using pipelined is much faster than querying each of the pools and adding them
# as we get the result.
temp_hash = {}
res = backend.pipelined do
pools.each do |pool|
backend.hget(key, pool['name'])
end
pools.each_with_index do |pool, i|
temp_hash[pool['name']] = res[i].to_s
end
temp_hash
end
pools.each_with_index do |pool, i|
temp_hash[pool['name']] = res[i].to_s
end
temp_hash
end
def get_capacity_metrics(pools, backend)
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
capacity = {
current: 0,
total: 0,
percent: 0
}
capacity = {
current: 0,
total: 0,
percent: 0
}
pools.each do |pool|
capacity[:total] += pool['size'].to_i
end
capacity[:current] = get_total_across_pools_redis_scard(pools, 'vmpooler__ready__', backend)
if capacity[:total] > 0
capacity[:percent] = (capacity[:current].fdiv(capacity[:total]) * 100.0).round(1)
end
capacity
pools.each do |pool|
capacity[:total] += pool['size'].to_i
end
capacity[:current] = get_total_across_pools_redis_scard(pools, 'vmpooler__ready__', backend)
if capacity[:total] > 0
capacity[:percent] = (capacity[:current].fdiv(capacity[:total]) * 100.0).round(1)
end
capacity
end
def get_queue_metrics(pools, backend)
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
queue = {
requested: 0,
pending: 0,
cloning: 0,
booting: 0,
ready: 0,
running: 0,
completed: 0,
total: 0
}
queue = {
pending: 0,
cloning: 0,
booting: 0,
ready: 0,
running: 0,
completed: 0,
total: 0
}
# Use a single pipeline to fetch all queue counts at once for better performance
results = backend.pipelined do |pipeline|
# Order matters - we'll use indices to extract values
pools.each do |pool|
pipeline.scard("vmpooler__provisioning__request#{pool['name']}") # 0..n-1
pipeline.scard("vmpooler__provisioning__processing#{pool['name']}") # n..2n-1
pipeline.scard("vmpooler__odcreate__task#{pool['name']}") # 2n..3n-1
pipeline.scard("vmpooler__pending__#{pool['name']}") # 3n..4n-1
pipeline.scard("vmpooler__ready__#{pool['name']}") # 4n..5n-1
pipeline.scard("vmpooler__running__#{pool['name']}") # 5n..6n-1
pipeline.scard("vmpooler__completed__#{pool['name']}") # 6n..7n-1
end
pipeline.get('vmpooler__tasks__clone') # 7n
pipeline.get('vmpooler__tasks__ondemandclone') # 7n+1
end
queue[:pending] = get_total_across_pools_redis_scard(pools, 'vmpooler__pending__', backend)
queue[:ready] = get_total_across_pools_redis_scard(pools, 'vmpooler__ready__', backend)
queue[:running] = get_total_across_pools_redis_scard(pools, 'vmpooler__running__', backend)
queue[:completed] = get_total_across_pools_redis_scard(pools, 'vmpooler__completed__', backend)
n = pools.length
# Safely extract results with default to empty array if slice returns nil
queue[:requested] = (results[0...n] || []).sum(&:to_i) +
(results[n...(2 * n)] || []).sum(&:to_i) +
(results[(2 * n)...(3 * n)] || []).sum(&:to_i)
queue[:pending] = (results[(3 * n)...(4 * n)] || []).sum(&:to_i)
queue[:ready] = (results[(4 * n)...(5 * n)] || []).sum(&:to_i)
queue[:running] = (results[(5 * n)...(6 * n)] || []).sum(&:to_i)
queue[:completed] = (results[(6 * n)...(7 * n)] || []).sum(&:to_i)
queue[:cloning] = (results[7 * n] || 0).to_i + (results[7 * n + 1] || 0).to_i
queue[:booting] = queue[:pending].to_i - queue[:cloning].to_i
queue[:booting] = 0 if queue[:booting] < 0
queue[:total] = queue[:requested] + queue[:pending].to_i + queue[:ready].to_i + queue[:running].to_i + queue[:completed].to_i
queue[:cloning] = backend.get('vmpooler__tasks__clone').to_i + backend.get('vmpooler__tasks__ondemandclone').to_i
queue[:booting] = queue[:pending].to_i - queue[:cloning].to_i
queue[:booting] = 0 if queue[:booting] < 0
queue[:total] = queue[:pending].to_i + queue[:ready].to_i + queue[:running].to_i + queue[:completed].to_i
queue
end
queue
end
def get_tag_metrics(backend, date_str, opts = {})
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
opts = {:only => false}.merge(opts)
opts = {:only => false}.merge(opts)
tags = {}
tags = {}
backend.hgetall("vmpooler__tag__#{date_str}").each do |key, value|
hostname = 'unknown'
tag = 'unknown'
backend.hgetall("vmpooler__tag__#{date_str}").each do |key, value|
hostname = 'unknown'
tag = 'unknown'
if key =~ /:/
hostname, tag = key.split(':', 2)
end
next if opts[:only] && tag != opts[:only]
tags[tag] ||= {}
tags[tag][value] ||= 0
tags[tag][value] += 1
tags[tag]['total'] ||= 0
tags[tag]['total'] += 1
if key =~ /:/
hostname, tag = key.split(':', 2)
end
tags
next if opts[:only] && tag != opts[:only]
tags[tag] ||= {}
tags[tag][value] ||= 0
tags[tag][value] += 1
tags[tag]['total'] ||= 0
tags[tag]['total'] += 1
end
tags
end
def get_tag_summary(backend, from_date, to_date, opts = {})
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
opts = {:only => false}.merge(opts)
opts = {:only => false}.merge(opts)
result = {
tag: {},
daily: []
result = {
tag: {},
daily: []
}
(from_date..to_date).each do |date|
daily = {
date: date.to_s,
tag: get_tag_metrics(backend, date.to_s, opts)
}
result[:daily].push(daily)
end
(from_date..to_date).each do |date|
daily = {
date: date.to_s,
tag: get_tag_metrics(backend, date.to_s, opts)
}
result[:daily].push(daily)
end
result[:daily].each do |daily|
daily[:tag].each_key do |tag|
result[:tag][tag] ||= {}
result[:daily].each do |daily|
daily[:tag].each_key do |tag|
result[:tag][tag] ||= {}
daily[:tag][tag].each do |key, value|
result[:tag][tag][key] ||= 0
result[:tag][tag][key] += value
end
daily[:tag][tag].each do |key, value|
result[:tag][tag][key] ||= 0
result[:tag][tag][key] += value
end
end
result
end
result
end
def get_task_metrics(backend, task_str, date_str, opts = {})
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
opts = {:bypool => false, :only => false}.merge(opts)
opts = {:bypool => false, :only => false}.merge(opts)
task = {
duration: {
average: 0,
min: 0,
max: 0,
total: 0
},
count: {
total: 0
}
}
task = {
duration: {
average: 0,
min: 0,
max: 0,
total: 0
},
count: {
total: 0
}
}
task[:count][:total] = backend.hlen("vmpooler__#{task_str}__#{date_str}").to_i
task[:count][:total] = backend.hlen("vmpooler__#{task_str}__#{date_str}").to_i
if task[:count][:total] > 0
if opts[:bypool] == true
task_times_bypool = {}
if task[:count][:total] > 0
if opts[:bypool] == true
task_times_bypool = {}
task[:count][:pool] = {}
task[:duration][:pool] = {}
task[:count][:pool] = {}
task[:duration][:pool] = {}
backend.hgetall("vmpooler__#{task_str}__#{date_str}").each do |key, value|
pool = 'unknown'
hostname = 'unknown'
backend.hgetall("vmpooler__#{task_str}__#{date_str}").each do |key, value|
pool = 'unknown'
hostname = 'unknown'
if key =~ /:/
pool, hostname = key.split(':')
else
hostname = key
end
task[:count][:pool][pool] ||= {}
task[:duration][:pool][pool] ||= {}
task_times_bypool[pool] ||= []
task_times_bypool[pool].push(value.to_f)
if key =~ /:/
pool, hostname = key.split(':')
else
hostname = key
end
task_times_bypool.each_key do |pool|
task[:count][:pool][pool][:total] = task_times_bypool[pool].length
task[:count][:pool][pool] ||= {}
task[:duration][:pool][pool] ||= {}
task[:duration][:pool][pool][:total] = task_times_bypool[pool].reduce(:+).to_f
task[:duration][:pool][pool][:average] = (task[:duration][:pool][pool][:total] / task[:count][:pool][pool][:total]).round(1)
task[:duration][:pool][pool][:min], task[:duration][:pool][pool][:max] = task_times_bypool[pool].minmax
end
task_times_bypool[pool] ||= []
task_times_bypool[pool].push(value.to_f)
end
task_times = get_task_times(backend, task_str, date_str)
task_times_bypool.each_key do |pool|
task[:count][:pool][pool][:total] = task_times_bypool[pool].length
task[:duration][:total] = task_times.reduce(:+).to_f
task[:duration][:average] = (task[:duration][:total] / task[:count][:total]).round(1)
task[:duration][:min], task[:duration][:max] = task_times.minmax
end
if opts[:only]
task.each_key do |key|
task.delete(key) unless key.to_s == opts[:only]
task[:duration][:pool][pool][:total] = task_times_bypool[pool].reduce(:+).to_f
task[:duration][:pool][pool][:average] = (task[:duration][:pool][pool][:total] / task[:count][:pool][pool][:total]).round(1)
task[:duration][:pool][pool][:min], task[:duration][:pool][pool][:max] = task_times_bypool[pool].minmax
end
end
task
task_times = get_task_times(backend, task_str, date_str)
task[:duration][:total] = task_times.reduce(:+).to_f
task[:duration][:average] = (task[:duration][:total] / task[:count][:total]).round(1)
task[:duration][:min], task[:duration][:max] = task_times.minmax
end
if opts[:only]
task.each_key do |key|
task.delete(key) unless key.to_s == opts[:only]
end
end
task
end
def get_task_summary(backend, task_str, from_date, to_date, opts = {})
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
opts = {:bypool => false, :only => false}.merge(opts)
opts = {:bypool => false, :only => false}.merge(opts)
task_sym = task_str.to_sym
task_sym = task_str.to_sym
result = {
task_sym => {},
daily: []
result = {
task_sym => {},
daily: []
}
(from_date..to_date).each do |date|
daily = {
date: date.to_s,
task_sym => get_task_metrics(backend, task_str, date.to_s, opts)
}
result[:daily].push(daily)
end
(from_date..to_date).each do |date|
daily = {
date: date.to_s,
task_sym => get_task_metrics(backend, task_str, date.to_s, opts)
}
result[:daily].push(daily)
end
daily_task = {}
daily_task_bypool = {} if opts[:bypool] == true
daily_task = {}
daily_task_bypool = {} if opts[:bypool] == true
result[:daily].each do |daily|
daily[task_sym].each_key do |type|
result[task_sym][type] ||= {}
daily_task[type] ||= {}
result[:daily].each do |daily|
daily[task_sym].each_key do |type|
result[task_sym][type] ||= {}
daily_task[type] ||= {}
['min', 'max'].each do |key|
if daily[task_sym][type][key]
daily_task[type][:data] ||= []
daily_task[type][:data].push(daily[task_sym][type][key])
end
end
result[task_sym][type][:total] ||= 0
result[task_sym][type][:total] += daily[task_sym][type][:total]
if opts[:bypool] == true
result[task_sym][type][:pool] ||= {}
daily_task_bypool[type] ||= {}
next unless daily[task_sym][type][:pool]
daily[task_sym][type][:pool].each_key do |pool|
result[task_sym][type][:pool][pool] ||= {}
daily_task_bypool[type][pool] ||= {}
['min', 'max'].each do |key|
if daily[task_sym][type][:pool][pool][key.to_sym]
daily_task_bypool[type][pool][:data] ||= []
daily_task_bypool[type][pool][:data].push(daily[task_sym][type][:pool][pool][key.to_sym])
end
end
result[task_sym][type][:pool][pool][:total] ||= 0
result[task_sym][type][:pool][pool][:total] += daily[task_sym][type][:pool][pool][:total]
end
['min', 'max'].each do |key|
if daily[task_sym][type][key]
daily_task[type][:data] ||= []
daily_task[type][:data].push(daily[task_sym][type][key])
end
end
end
result[task_sym].each_key do |type|
if daily_task[type][:data]
result[task_sym][type][:min], result[task_sym][type][:max] = daily_task[type][:data].minmax
result[task_sym][type][:average] = mean(daily_task[type][:data])
end
result[task_sym][type][:total] ||= 0
result[task_sym][type][:total] += daily[task_sym][type][:total]
if opts[:bypool] == true
result[task_sym].each_key do |type|
result[task_sym][type][:pool].each_key do |pool|
if daily_task_bypool[type][pool][:data]
result[task_sym][type][:pool][pool][:min], result[task_sym][type][:pool][pool][:max] = daily_task_bypool[type][pool][:data].minmax
result[task_sym][type][:pool][pool][:average] = mean(daily_task_bypool[type][pool][:data])
result[task_sym][type][:pool] ||= {}
daily_task_bypool[type] ||= {}
next unless daily[task_sym][type][:pool]
daily[task_sym][type][:pool].each_key do |pool|
result[task_sym][type][:pool][pool] ||= {}
daily_task_bypool[type][pool] ||= {}
['min', 'max'].each do |key|
if daily[task_sym][type][:pool][pool][key.to_sym]
daily_task_bypool[type][pool][:data] ||= []
daily_task_bypool[type][pool][:data].push(daily[task_sym][type][:pool][pool][key.to_sym])
end
end
result[task_sym][type][:pool][pool][:total] ||= 0
result[task_sym][type][:pool][pool][:total] += daily[task_sym][type][:pool][pool][:total]
end
end
end
end
result[task_sym].each_key do |type|
if daily_task[type][:data]
result[task_sym][type][:min], result[task_sym][type][:max] = daily_task[type][:data].minmax
result[task_sym][type][:average] = mean(daily_task[type][:data])
end
if opts[:bypool] == true
result[task_sym].each_key do |type|
result[task_sym][type][:pool].each_key do |pool|
if daily_task_bypool[type][pool][:data]
result[task_sym][type][:pool][pool][:min], result[task_sym][type][:pool][pool][:max] = daily_task_bypool[type][pool][:data].minmax
result[task_sym][type][:pool][pool][:average] = mean(daily_task_bypool[type][pool][:data])
end
end
end
end
result
end
result
end
def pool_index(pools)
@ -558,13 +464,11 @@ module Vmpooler
end
def template_ready?(pool, backend)
tracer.in_span("Vmpooler::API::Helpers.#{__method__}") do
prepared_template = backend.hget('vmpooler__template__prepared', pool['name'])
return false if prepared_template.nil?
return true if pool['template'] == prepared_template
prepared_template = backend.hget('vmpooler__template__prepared', pool['name'])
return false if prepared_template.nil?
return true if pool['template'] == prepared_template
return false
end
return false
end
def is_integer?(x)
@ -575,19 +479,10 @@ module Vmpooler
end
def open_socket(host, domain = nil, timeout = 1, port = 22, &_block)
tracer.in_span(
"Vmpooler::API::Helpers.#{__method__}",
attributes: {
'net.peer.port' => port,
'net.transport' => 'ip_tcp'
},
kind: :client
) do
Timeout.timeout(timeout) do
target_host = host
target_host = "#{host}.#{domain}" if domain
span = OpenTelemetry::Trace.current_span
span.set_attribute('net.peer.name', target_host)
sock = TCPSocket.new(target_host, port, connect_timeout: timeout)
sock = TCPSocket.new target_host, port
begin
yield sock if block_given?
ensure
@ -595,6 +490,16 @@ module Vmpooler
end
end
end
def vm_ready?(vm_name, domain = nil)
begin
open_socket(vm_name, domain)
rescue StandardError => _e
return false
end
true
end
end
end
end

View file

@ -1,159 +0,0 @@
# frozen_string_literal: true
module Vmpooler
class API
# Input validation helpers to enhance security
module InputValidator
# Maximum lengths to prevent abuse
MAX_HOSTNAME_LENGTH = 253
MAX_TAG_KEY_LENGTH = 50
MAX_TAG_VALUE_LENGTH = 255
MAX_REASON_LENGTH = 500
MAX_POOL_NAME_LENGTH = 100
MAX_TOKEN_LENGTH = 64
# Valid patterns
HOSTNAME_PATTERN = /\A[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?(\.[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?)* \z/ix.freeze
POOL_NAME_PATTERN = /\A[a-zA-Z0-9_-]+\z/.freeze
TAG_KEY_PATTERN = /\A[a-zA-Z0-9_\-.]+\z/.freeze
TOKEN_PATTERN = /\A[a-zA-Z0-9\-_]+\z/.freeze
INTEGER_PATTERN = /\A\d+\z/.freeze
class ValidationError < StandardError; end
# Validate hostname format and length
def validate_hostname(hostname)
return error_response('Hostname is required') if hostname.nil? || hostname.empty?
return error_response('Hostname too long') if hostname.length > MAX_HOSTNAME_LENGTH
return error_response('Invalid hostname format') unless hostname.match?(HOSTNAME_PATTERN)
true
end
# Validate pool/template name
def validate_pool_name(pool_name)
return error_response('Pool name is required') if pool_name.nil? || pool_name.empty?
return error_response('Pool name too long') if pool_name.length > MAX_POOL_NAME_LENGTH
return error_response('Invalid pool name format') unless pool_name.match?(POOL_NAME_PATTERN)
true
end
# Validate tag key and value
def validate_tag(key, value)
return error_response('Tag key is required') if key.nil? || key.empty?
return error_response('Tag key too long') if key.length > MAX_TAG_KEY_LENGTH
return error_response('Invalid tag key format') unless key.match?(TAG_KEY_PATTERN)
if value
return error_response('Tag value too long') if value.length > MAX_TAG_VALUE_LENGTH
# Sanitize value to prevent injection attacks
sanitized_value = value.gsub(/[^\w\s\-.@:\/]/, '')
return error_response('Tag value contains invalid characters') if sanitized_value != value
end
true
end
# Validate token format
def validate_token_format(token)
return error_response('Token is required') if token.nil? || token.empty?
return error_response('Token too long') if token.length > MAX_TOKEN_LENGTH
return error_response('Invalid token format') unless token.match?(TOKEN_PATTERN)
true
end
# Validate integer parameter
def validate_integer(value, name = 'value', min: nil, max: nil)
return error_response("#{name} is required") if value.nil?
value_str = value.to_s
return error_response("#{name} must be a valid integer") unless value_str.match?(INTEGER_PATTERN)
int_value = value.to_i
return error_response("#{name} must be at least #{min}") if min && int_value < min
return error_response("#{name} must be at most #{max}") if max && int_value > max
int_value
end
# Validate VM request count
def validate_vm_count(count)
validated = validate_integer(count, 'VM count', min: 1, max: 100)
return validated if validated.is_a?(Hash) # error response
validated
end
# Validate disk size
def validate_disk_size(size)
validated = validate_integer(size, 'Disk size', min: 1, max: 2048)
return validated if validated.is_a?(Hash) # error response
validated
end
# Validate lifetime (TTL) in hours
def validate_lifetime(lifetime)
validated = validate_integer(lifetime, 'Lifetime', min: 1, max: 168) # max 1 week
return validated if validated.is_a?(Hash) # error response
validated
end
# Validate reason text
def validate_reason(reason)
return true if reason.nil? || reason.empty?
return error_response('Reason too long') if reason.length > MAX_REASON_LENGTH
# Sanitize to prevent XSS/injection
sanitized = reason.gsub(/[<>"']/, '')
return error_response('Reason contains invalid characters') if sanitized != reason
true
end
# Sanitize JSON body to prevent injection
def sanitize_json_body(body)
return {} if body.nil? || body.empty?
begin
parsed = JSON.parse(body)
return error_response('Request body must be a JSON object') unless parsed.is_a?(Hash)
# Limit depth and size to prevent DoS
return error_response('Request body too complex') if json_depth(parsed) > 5
return error_response('Request body too large') if body.length > 10_240 # 10KB max
parsed
rescue JSON::ParserError => e
error_response("Invalid JSON: #{e.message}")
end
end
# Check if validation result is an error
def validation_error?(result)
result.is_a?(Hash) && result['ok'] == false
end
private
def error_response(message)
{ 'ok' => false, 'error' => message }
end
def json_depth(obj, depth = 0)
return depth unless obj.is_a?(Hash) || obj.is_a?(Array)
return depth + 1 if obj.empty?
if obj.is_a?(Hash)
depth + 1 + obj.values.map { |v| json_depth(v, 0) }.max
else
depth + 1 + obj.map { |v| json_depth(v, 0) }.max
end
end
end
end
end

View file

@ -1,116 +0,0 @@
# frozen_string_literal: true
module Vmpooler
class API
# Rate limiter middleware to protect against abuse
# Uses Redis to track request counts per IP and token
class RateLimiter
DEFAULT_LIMITS = {
global_per_ip: { limit: 100, period: 60 }, # 100 requests per minute per IP
authenticated: { limit: 500, period: 60 }, # 500 requests per minute with token
vm_creation: { limit: 20, period: 60 }, # 20 VM creations per minute
vm_deletion: { limit: 50, period: 60 } # 50 VM deletions per minute
}.freeze
def initialize(app, redis, config = {})
@app = app
@redis = redis
@config = DEFAULT_LIMITS.merge(config[:rate_limits] || {})
@enabled = config.fetch(:rate_limiting_enabled, true)
end
def call(env)
return @app.call(env) unless @enabled
request = Rack::Request.new(env)
client_id = identify_client(request)
endpoint_type = classify_endpoint(request)
# Check rate limits
return rate_limit_response(client_id, endpoint_type) if rate_limit_exceeded?(client_id, endpoint_type, request)
# Track the request
increment_request_count(client_id, endpoint_type)
@app.call(env)
end
private
def identify_client(request)
# Prioritize token-based identification for authenticated requests
token = request.env['HTTP_X_AUTH_TOKEN']
return "token:#{token}" if token && !token.empty?
# Fall back to IP address
ip = request.ip || request.env['REMOTE_ADDR'] || 'unknown'
"ip:#{ip}"
end
def classify_endpoint(request)
path = request.path
method = request.request_method
return :vm_creation if method == 'POST' && path.include?('/vm')
return :vm_deletion if method == 'DELETE' && path.include?('/vm')
return :authenticated if request.env['HTTP_X_AUTH_TOKEN']
:global_per_ip
end
def rate_limit_exceeded?(client_id, endpoint_type, _request)
limit_config = @config[endpoint_type] || @config[:global_per_ip]
key = "vmpooler__ratelimit__#{endpoint_type}__#{client_id}"
current_count = @redis.get(key).to_i
current_count >= limit_config[:limit]
rescue StandardError => e
# If Redis fails, allow the request through (fail open)
warn "Rate limiter Redis error: #{e.message}"
false
end
def increment_request_count(client_id, endpoint_type)
limit_config = @config[endpoint_type] || @config[:global_per_ip]
key = "vmpooler__ratelimit__#{endpoint_type}__#{client_id}"
@redis.pipelined do |pipeline|
pipeline.incr(key)
pipeline.expire(key, limit_config[:period])
end
rescue StandardError => e
# Log error but don't fail the request
warn "Rate limiter increment error: #{e.message}"
end
def rate_limit_response(client_id, endpoint_type)
limit_config = @config[endpoint_type] || @config[:global_per_ip]
key = "vmpooler__ratelimit__#{endpoint_type}__#{client_id}"
begin
ttl = @redis.ttl(key)
rescue StandardError
ttl = limit_config[:period]
end
headers = {
'Content-Type' => 'application/json',
'X-RateLimit-Limit' => limit_config[:limit].to_s,
'X-RateLimit-Remaining' => '0',
'X-RateLimit-Reset' => (Time.now.to_i + ttl).to_s,
'Retry-After' => ttl.to_s
}
body = JSON.pretty_generate({
'ok' => false,
'error' => 'Rate limit exceeded',
'limit' => limit_config[:limit],
'period' => limit_config[:period],
'retry_after' => ttl
})
[429, headers, [body]]
end
end
end
end

View file

@ -0,0 +1,73 @@
# frozen_string_literal: true
module Vmpooler
class API
class Reroute < Sinatra::Base
api_version = '1'
get '/status/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/status")
end
get '/summary/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/summary")
end
get '/summary/:route/?:key?/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/summary/#{params[:route]}/#{params[:key]}")
end
get '/token/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/token")
end
post '/token/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/token")
end
get '/token/:token/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/token/#{params[:token]}")
end
delete '/token/:token/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/token/#{params[:token]}")
end
get '/vm/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/vm")
end
post '/vm/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/vm")
end
post '/vm/:template/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/vm/#{params[:template]}")
end
get '/vm/:hostname/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/vm/#{params[:hostname]}")
end
delete '/vm/:hostname/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/vm/#{params[:hostname]}")
end
put '/vm/:hostname/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/vm/#{params[:hostname]}")
end
post '/vm/:hostname/snapshot/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/vm/#{params[:hostname]}/snapshot")
end
post '/vm/:hostname/snapshot/:snapshot/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/vm/#{params[:hostname]}/snapshot/#{params[:snapshot]}")
end
put '/vm/:hostname/disk/:size/?' do
call env.merge('PATH_INFO' => "/api/v#{api_version}/vm/#{params[:hostname]}/disk/#{params[:size]}")
end
end
end
end

1634
lib/vmpooler/api/v1.rb Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,91 +0,0 @@
# frozen_string_literal: true
require 'pathname'
module Vmpooler
class Dns
# Load one or more VMPooler DNS plugin gems by name
#
# @param names [Array<String>] The list of gem names to load
def self.load_by_name(names)
names = Array(names)
instance = new
names.map { |name| instance.load_from_gems(name) }.flatten
end
# Returns the plugin class for the specified dns config by name
#
# @param config [Object] The entire VMPooler config object
# @param name [Symbol] The name of the dns config key to get the dns class
# @return [String] The plugin class for the specifid dns config
def self.get_dns_plugin_class_by_name(config, name)
dns_configs = config[:dns_configs].keys
plugin_class = ''
dns_configs.map do |dns_config_name|
plugin_class = config[:dns_configs][dns_config_name]['dns_class'] if dns_config_name.to_s == name
end
plugin_class
end
# Returns the domain for the specified pool
#
# @param config [String] - the full config structure
# @param pool_name [String] - the name of the pool
# @return [String] - domain name for pool, which is set via reference to the dns_configs block
def self.get_domain_for_pool(config, pool_name)
pool = config[:pools].find { |p| p['name'] == pool_name }
pool_dns_config = pool['dns_plugin']
dns_configs = config[:dns_configs].keys
dns_configs.map do |dns_config_name|
return config[:dns_configs][dns_config_name]['domain'] if dns_config_name.to_s == pool_dns_config
end
end
# Returns the plugin domain for the specified dns config by name
#
# @param config [Object] The entire VMPooler config object
# @param name [Symbol] The name of the dns config key to get the dns domain
# @return [String] The domain for the specifid dns config
def self.get_dns_plugin_domain_by_name(config, name)
dns_configs = config[:dns_configs].keys
dns_configs.map do |dns_config_name|
return config[:dns_configs][dns_config_name]['domain'] if dns_config_name.to_s == name
end
end
# Returns a list of DNS plugin classes specified in the vmpooler configuration
#
# @param config [Object] The entire VMPooler config object
# @return nil || [Array<String>] A list of DNS plugin classes
def self.get_dns_plugin_config_classes(config)
return nil unless config[:dns_configs]
dns_configs = config[:dns_configs].keys
dns_plugins = dns_configs.map do |dns_config_name|
if config[:dns_configs][dns_config_name] && config[:dns_configs][dns_config_name]['dns_class']
config[:dns_configs][dns_config_name]['dns_class'].to_s
else
dns_config_name.to_s
end
end.compact.uniq
# dynamic-dns is not actually a class, it's just used as a value to denote
# that dynamic dns is used so no loading or record management is needed
dns_plugins.delete('dynamic-dns')
dns_plugins
end
# Load a single DNS plugin gem by name
#
# @param name [String] The name of the DNS plugin gem to load
# @return [String] The full require path to the specified gem
def load_from_gems(name = nil)
require_path = "vmpooler/dns/#{name.gsub('-', '/')}"
require require_path
require_path
end
end
end

View file

@ -1,81 +0,0 @@
# frozen_string_literal: true
module Vmpooler
class PoolManager
class Dns
class Base
# These defs must be overidden in child classes
# Helper Methods
# Global Logger object
attr_reader :logger
# Global Metrics object
attr_reader :metrics
# Provider options passed in during initialization
attr_reader :dns_options
def initialize(config, logger, metrics, redis_connection_pool, name, options)
@config = config
@logger = logger
@metrics = metrics
@redis = redis_connection_pool
@dns_plugin_name = name
@dns_options = options
logger.log('s', "[!] Creating dns plugin '#{name}'")
end
def pool_config(pool_name)
# Get the configuration of a specific pool
@config[:pools].each do |pool|
return pool if pool['name'] == pool_name
end
nil
end
# Returns this dns plugin's configuration
#
# @returns [Hashtable] This dns plugins's configuration from the config file. Returns nil if the dns plugin config does not exist
def dns_config
@config[:dns_configs].each do |dns|
# Convert the symbol from the config into a string for comparison
return (dns[1].nil? ? {} : dns[1]) if dns[0].to_s == @dns_plugin_name
end
nil
end
def global_config
# This entire VM Pooler config
@config
end
def name
@dns_plugin_name
end
def get_ip(vm_name)
@redis.with_metrics do |redis|
redis.hget("vmpooler__vm__#{vm_name}", 'ip')
end
end
# returns
# Array[String] : Array of pool names this provider services
def provided_pools
@config[:pools].select { |pool| pool['dns_config'] == name }.map { |pool| pool['name'] }
end
def create_or_replace_record(hostname)
raise("#{self.class.name} does not implement create_or_replace_record #{hostname}")
end
def delete_record(hostname)
raise("#{self.class.name} does not implement delete_record for #{hostname}")
end
end
end
end
end

View file

@ -329,30 +329,6 @@ module Vmpooler
buckets: REDIS_CONNECT_BUCKETS,
docstring: 'vmpooler redis connection wait time',
param_labels: %i[type provider]
},
vmpooler_health: {
mtype: M_GAUGE,
torun: %i[manager],
docstring: 'vmpooler health check metrics',
param_labels: %i[metric_path]
},
vmpooler_purge: {
mtype: M_GAUGE,
torun: %i[manager],
docstring: 'vmpooler purge metrics',
param_labels: %i[metric_path]
},
vmpooler_destroy: {
mtype: M_GAUGE,
torun: %i[manager],
docstring: 'vmpooler destroy metrics',
param_labels: %i[poolname]
},
vmpooler_clone: {
mtype: M_GAUGE,
torun: %i[manager],
docstring: 'vmpooler clone metrics',
param_labels: %i[poolname]
}
}
end

File diff suppressed because it is too large Load diff

View file

@ -58,10 +58,6 @@ module Vmpooler
nil
end
def dns_config(dns_config_name)
Vmpooler::Dns.get_dns_plugin_domain_by_name(@config, dns_config_name)
end
# returns
# [Hashtable] : The entire VMPooler configuration
def global_config
@ -246,7 +242,7 @@ module Vmpooler
# returns
# nil when successful. Raises error when encountered
def create_template_delta_disks(_pool)
puts("#{self.class.name} does not implement create_template_delta_disks")
raise("#{self.class.name} does not implement create_template_delta_disks")
end
# inputs
@ -261,10 +257,6 @@ module Vmpooler
raise("#{self.class.name} does not implement purge_unconfigured_resources")
end
def get_vm_ip_address(vm_name, pool_name)
raise("#{self.class.name} does not implement get_vm_ip_address for vm #{vm_name} in pool #{pool_name}")
end
# DEPRECATED if a provider does not implement the new method, it will hit this base class method
# and return a deprecation message
def purge_unconfigured_folders(_deprecated, _deprecated2, allowlist)

View file

@ -29,10 +29,10 @@ Date.prototype.yyyymmdd = function() {
var data_url = {
'capacity': '/dashboard/stats/vmpooler/pool',
'pools' : '/api/v3/vm',
'pools' : '/api/v1/vm',
'running' : '/dashboard/stats/vmpooler/running',
'status' : '/api/v3/status',
'summary' : '/api/v3/summary'
'status' : '/api/v1/status',
'summary' : '/api/v1/summary'
};

View file

@ -1,6 +1,6 @@
# frozen_string_literal: true
# utility class shared between apps api and pool_manager
# utility class shared between apps
module Vmpooler
class Parsing
def self.get_platform_pool_count(requested, &_block)

View file

@ -1,5 +1,5 @@
# frozen_string_literal: true
module Vmpooler
VERSION = '3.8.1'
VERSION = '2.1.0'
end

View file

@ -1,15 +0,0 @@
## [3.8.1](https://github.com/puppetlabs/vmpooler/tree/3.8.1) (2026-01-14)
[Full Changelog](https://github.com/puppetlabs/vmpooler/compare/3.7.0...3.8.1)
**Implemented enhancements:**
- \(P4DEVOPS-9434\) Add rate limiting and input validation security enhancements [\#690](https://github.com/puppetlabs/vmpooler/pull/690) ([mahima-singh](https://github.com/mahima-singh))
- \(P4DEVOPS-8570\) Add Phase 2 optimizations: status API caching and improved Redis pipelining [\#689](https://github.com/puppetlabs/vmpooler/pull/689) ([mahima-singh](https://github.com/mahima-singh))
- \(P4DEVOPS-8567\) Add DLQ, auto-purge, and health checks for Redis queues [\#688](https://github.com/puppetlabs/vmpooler/pull/688) ([mahima-singh](https://github.com/mahima-singh))
- Add retry logic for immediate clone failures [\#687](https://github.com/puppetlabs/vmpooler/pull/687) ([mahima-singh](https://github.com/mahima-singh))
**Fixed bugs:**
- \(P4DEVOPS-8567\) Prevent VM allocation for already-deleted request-ids [\#688](https://github.com/puppetlabs/vmpooler/pull/688) ([mahima-singh](https://github.com/mahima-singh))
- Prevent re-queueing requests already marked as failed [\#687](https://github.com/puppetlabs/vmpooler/pull/687) ([mahima-singh](https://github.com/mahima-singh))

View file

@ -1,16 +0,0 @@
#!/usr/bin/env bash
# The container tag should closely match what is used in `docker/Dockerfile` in vmpooler-deployment
#
# Update Gemfile.lock
docker run -t --rm \
-v $(pwd):/app \
jruby:9.4.12.1-jdk11 \
/bin/bash -c 'apt-get update -qq && apt-get install -y --no-install-recommends git make netbase && cd /app && gem install bundler && bundle install --jobs 3; echo "LOCK_FILE_UPDATE_EXIT_CODE=$?"'
# Update Changelog
docker run -t --rm -e CHANGELOG_GITHUB_TOKEN -v $(pwd):/usr/local/src/your-app \
githubchangeloggenerator/github-changelog-generator:1.16.4 \
github_changelog_generator --future-release $(grep VERSION lib/vmpooler/version.rb |rev |cut -d "'" -f2 |rev) \
--token $CHANGELOG_GITHUB_TOKEN --release-branch main

View file

@ -1,4 +0,0 @@
---
:providers:
:alice:
foo: "foo"

View file

@ -1,12 +0,0 @@
---
:providers:
:bob:
foo: "foo_bob"
bar: "bar"
:pools:
- name: 'pool05'
size: 5
provider: dummy
dns_plugin: 'example'
ready_ttl: 5

View file

@ -23,13 +23,9 @@
allowed_tags:
- 'created_by'
- 'project'
domain: 'example.com'
prefix: 'poolvm-'
:dns_configs:
:example:
dns_class: dynamic-dns
domain: 'example.com'
# Uncomment the lines below to suppress metrics to STDOUT
# :statsd:
# server: 'localhost'
@ -40,10 +36,8 @@
- name: 'pool01'
size: 5
provider: dummy
dns_plugin: 'example'
ready_ttl: 5
- name: 'pool02'
size: 5
provider: dummy
dns_plugin: 'example'
ready_ttl: 5

View file

@ -23,13 +23,9 @@
allowed_tags:
- 'created_by'
- 'project'
domain: 'example.com'
prefix: 'poolvm-'
:dns_configs:
:example:
dns_class: dynamic-dns
domain: 'example.com'
# Uncomment the lines below to suppress metrics to STDOUT
# :statsd:
# server: 'localhost'
@ -40,10 +36,8 @@
- name: 'pool03'
size: 5
provider: dummy
dns_plugin: 'example'
ready_ttl: 5
- name: 'pool04'
size: 5
provider: dummy
dns_plugin: 'example'
ready_ttl: 5

View file

@ -1,45 +0,0 @@
---
:providers:
:dummy:
:redis:
server: 'localhost'
:auth:
provider: dummy
:tagfilter:
url: '(.*)\/'
:config:
site_name: 'vmpooler'
# Need to change this on Windows
logfile: '/var/log/vmpooler.log'
task_limit: 10
timeout: 15
vm_checktime: 1
vm_lifetime: 12
vm_lifetime_auth: 24
allowed_tags:
- 'created_by'
- 'project'
domain: 'example.com'
prefix: 'poolvm-'
# Uncomment the lines below to suppress metrics to STDOUT
# :statsd:
# server: 'localhost'
# prefix: 'vmpooler'
# port: 8125
:pools:
- name: 'pool01'
size: 5
provider: dummy
dns_plugin: 'example'
ready_ttl: 5
- name: 'pool02'
size: 5
provider: dummy
dns_plugin: 'example'
ready_ttl: 5

View file

@ -14,16 +14,6 @@ class MockLogger
end
end
class MockPoolManagerDnsBase
def delete_record(hostname)
end
def create_or_replace_record(hostname)
end
end
def expect_json(ok = true, http = 200)
expect(last_response.header['Content-Type']).to eq('application/json')

View file

@ -1,7 +1,7 @@
require 'spec_helper'
require 'rack/test'
describe Vmpooler::API::V3 do
describe Vmpooler::API::V1 do
include Rack::Test::Methods
def app()
@ -37,7 +37,7 @@ describe Vmpooler::API::V3 do
}
describe '/config/pooltemplate' do
let(:prefix) { '/api/v3' }
let(:prefix) { '/api/v1' }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:current_time) { Time.now }
@ -388,7 +388,7 @@ describe Vmpooler::API::V3 do
end
describe 'GET /config' do
let(:prefix) { '/api/v3' }
let(:prefix) { '/api/v1' }
it 'returns pool configuration when set' do
get "#{prefix}/config"

View file

@ -1,7 +1,7 @@
require 'spec_helper'
require 'rack/test'
describe Vmpooler::API::V3 do
describe Vmpooler::API::V1 do
include Rack::Test::Methods
def app()
@ -15,7 +15,7 @@ describe Vmpooler::API::V3 do
end
describe '/ondemandvm' do
let(:prefix) { '/api/v3' }
let(:prefix) { '/api/v1' }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:config) {
{
@ -28,25 +28,16 @@ describe Vmpooler::API::V3 do
'compute2' => 0
}
},
dns_configs: {
:mock => {
'dns_class' => 'mock',
'domain' => 'example.com'
}
},
pools: [
{'name' => 'pool1', 'size' => 0, 'clone_target' => 'compute1', 'dns_plugin' => 'mock'},
{'name' => 'pool2', 'size' => 0, 'clone_target' => 'compute2', 'dns_plugin' => 'mock'},
{'name' => 'pool3', 'size' => 0, 'clone_target' => 'compute1', 'dns_plugin' => 'mock'}
{'name' => 'pool1', 'size' => 0, 'clone_target' => 'compute1'},
{'name' => 'pool2', 'size' => 0, 'clone_target' => 'compute2'},
{'name' => 'pool3', 'size' => 0, 'clone_target' => 'compute1'}
],
alias: {
'poolone' => ['pool1'],
'pool2' => ['pool1']
},
pool_names: [ 'pool1', 'pool2', 'pool3', 'poolone' ],
providers: {
:dummy => {},
}
pool_names: [ 'pool1', 'pool2', 'pool3', 'poolone' ]
}
}
let(:current_time) { Time.now }
@ -120,6 +111,24 @@ describe Vmpooler::API::V3 do
expect(redis).to receive(:hset).with("vmpooler__odrequest__#{uuid}", 'requested', 'pool2:pool1:1')
post "#{prefix}/ondemandvm", '{"pool2":"1"}'
end
context 'with domain set in the config' do
let(:domain) { 'example.com' }
before(:each) do
config[:config]['domain'] = domain
end
it 'should include domain in the return reply' do
post "#{prefix}/ondemandvm", '{"poolone":"1"}'
expect_json(true, 201)
expected = {
"ok": true,
"request_id": uuid,
"domain": domain
}
expect(last_response.body).to eq(JSON.pretty_generate(expected))
end
end
end
context 'with a resource request that exceeds the specified limit' do
@ -253,12 +262,35 @@ describe Vmpooler::API::V3 do
"ready": true,
"pool1": {
"hostname": [
"#{vmname}.example.com"
vmname
]
}
}
expect(last_response.body).to eq(JSON.pretty_generate(expected))
end
context 'with domain set' do
let(:domain) { 'example.com' }
before(:each) do
config[:config]['domain'] = domain
end
it 'should include the domain in the result' do
get "#{prefix}/ondemandvm/#{uuid}"
expected = {
"ok": true,
"request_id": uuid,
"ready": true,
"pool1": {
"hostname": [
vmname
]
},
"domain": domain
}
expect(last_response.body).to eq(JSON.pretty_generate(expected))
end
end
end
context 'with a deleted request' do

View file

@ -1,7 +1,7 @@
require 'spec_helper'
require 'rack/test'
describe Vmpooler::API::V3 do
describe Vmpooler::API::V1 do
include Rack::Test::Methods
def app()
@ -30,7 +30,7 @@ describe Vmpooler::API::V3 do
}
describe '/poolreset' do
let(:prefix) { '/api/v3' }
let(:prefix) { '/api/v1' }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:current_time) { Time.now }

View file

@ -5,7 +5,7 @@ def has_set_tag?(vm, tag, value)
value == redis.hget("vmpooler__vm__#{vm}", "tag:#{tag}")
end
describe Vmpooler::API::V3 do
describe Vmpooler::API::V1 do
include Rack::Test::Methods
def app()
@ -17,12 +17,10 @@ describe Vmpooler::API::V3 do
# https://rubydoc.info/gems/sinatra/Sinatra/Base#reset!-class_method
before(:each) do
app.reset!
# Clear status cache to prevent test interference
Vmpooler::API::V3.clear_status_cache
end
describe 'status and metrics endpoints' do
let(:prefix) { '/api/v3' }
let(:prefix) { '/api/v1' }
let(:config) {
{

View file

@ -1,7 +1,7 @@
require 'spec_helper'
require 'rack/test'
describe Vmpooler::API::V3 do
describe Vmpooler::API::V1 do
include Rack::Test::Methods
def app()
@ -16,7 +16,7 @@ describe Vmpooler::API::V3 do
end
describe '/token' do
let(:prefix) { '/api/v3' }
let(:prefix) { '/api/v1' }
let(:current_time) { Time.now }
let(:config) { {
config: {}
@ -111,7 +111,7 @@ describe Vmpooler::API::V3 do
end
describe '/token/:token' do
let(:prefix) { '/api/v3' }
let(:prefix) { '/api/v1' }
let(:current_time) { Time.now }
before(:each) do

View file

@ -5,7 +5,7 @@ def has_set_tag?(vm, tag, value)
value == redis.hget("vmpooler__vm__#{vm}", "tag:#{tag}")
end
describe Vmpooler::API::V3 do
describe Vmpooler::API::V1 do
include Rack::Test::Methods
def app()
@ -20,7 +20,7 @@ describe Vmpooler::API::V3 do
end
describe '/vm/:hostname' do
let(:prefix) { '/api/v3' }
let(:prefix) { '/api/v1' }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:config) {

View file

@ -1,7 +1,7 @@
require 'spec_helper'
require 'rack/test'
describe Vmpooler::API::V3 do
describe Vmpooler::API::V1 do
include Rack::Test::Methods
def app()
@ -16,7 +16,7 @@ describe Vmpooler::API::V3 do
end
describe '/vm' do
let(:prefix) { '/api/v3' }
let(:prefix) { '/api/v1' }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:config) {
{
@ -24,29 +24,10 @@ describe Vmpooler::API::V3 do
'site_name' => 'test pooler',
'vm_lifetime_auth' => 2
},
providers: {
vsphere: {},
gce: {},
foo: {}
},
dns_configs: {
:one => {
'dns_class' => 'mock',
'domain' => 'one.example.com'
},
:two => {
'dns_class' => 'mock',
'domain' => 'two.example.com'
},
:three => {
'dns_class' => 'mock',
'domain' => 'three.example.com'
}
},
pools: [
{'name' => 'pool1', 'size' => 5, 'provider' => 'vsphere', 'dns_plugin' => 'one'},
{'name' => 'pool2', 'size' => 10, 'provider' => 'gce', 'dns_plugin' => 'two'},
{'name' => 'pool3', 'size' => 10, 'provider' => 'foo', 'dns_plugin' => 'three'}
{'name' => 'pool1', 'size' => 5},
{'name' => 'pool2', 'size' => 10},
{'name' => 'pool3', 'size' => 10}
],
statsd: { 'prefix' => 'stats_prefix'},
alias: { 'poolone' => ['pool1'] },
@ -98,7 +79,7 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
pool1: {
hostname: "#{vmname}.one.example.com"
hostname: vmname
}
}
@ -116,7 +97,7 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
poolone: {
hostname: "#{vmname}.one.example.com"
hostname: vmname
}
}
@ -143,7 +124,8 @@ describe Vmpooler::API::V3 do
end
it 'returns 503 for empty pool referenced by alias' do
create_ready_vm 'pool2', vmname, redis
create_ready_vm 'pool1', vmname, redis
post "#{prefix}/vm/poolone"
post "#{prefix}/vm/poolone"
expected = { ok: false }
@ -164,10 +146,10 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
pool1: {
hostname: "#{vmname}.one.example.com"
hostname: vmname
},
pool2: {
hostname: 'qrstuvwxyz012345.two.example.com'
hostname: 'qrstuvwxyz012345'
}
}
@ -195,8 +177,8 @@ describe Vmpooler::API::V3 do
result = JSON.parse(last_response.body)
expect(result['ok']).to eq(true)
expect(result['pool1']['hostname']).to include('1abcdefghijklmnop.one.example.com', '2abcdefghijklmnop.one.example.com')
expect(result['pool2']['hostname']).to eq('qrstuvwxyz012345.two.example.com')
expect(result['pool1']['hostname']).to include('1abcdefghijklmnop', '2abcdefghijklmnop')
expect(result['pool2']['hostname']).to eq('qrstuvwxyz012345')
expect_json(ok = true, http = 200)
end
@ -224,8 +206,8 @@ describe Vmpooler::API::V3 do
result = JSON.parse(last_response.body)
expect(result['ok']).to eq(true)
expect(result['pool1']['hostname']).to include('1abcdefghijklmnop.one.example.com', '2abcdefghijklmnop.one.example.com')
expect(result['pool2']['hostname']).to include('1qrstuvwxyz012345.two.example.com', '2qrstuvwxyz012345.two.example.com', '3qrstuvwxyz012345.two.example.com')
expect(result['pool1']['hostname']).to include('1abcdefghijklmnop', '2abcdefghijklmnop')
expect(result['pool2']['hostname']).to include('1qrstuvwxyz012345', '2qrstuvwxyz012345', '3qrstuvwxyz012345')
expect_json(ok = true, http = 200)
end
@ -250,7 +232,7 @@ describe Vmpooler::API::V3 do
result = JSON.parse(last_response.body)
expect(result['ok']).to eq(true)
expect(result['genericpool']['hostname']).to include('1abcdefghijklmnop.one.example.com', '2abcdefghijklmnop.two.example.com', '1qrstuvwxyz012345.three.example.com')
expect(result['genericpool']['hostname']).to include('1abcdefghijklmnop', '2abcdefghijklmnop', '1qrstuvwxyz012345')
expect_json(ok = true, http = 200)
end
@ -267,7 +249,7 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
"pool1": {
"hostname": "1abcdefghijklmnop.one.example.com"
"hostname": "1abcdefghijklmnop"
}
}
@ -356,7 +338,7 @@ describe Vmpooler::API::V3 do
end
it 'returns the second VM when the first fails to respond' do
create_running_vm 'pool1', vmname, redis
create_ready_vm 'pool1', vmname, redis
create_ready_vm 'pool1', "2#{vmname}", redis
allow_any_instance_of(Vmpooler::API::Helpers).to receive(:open_socket).with(vmname, nil).and_raise('mockerror')
@ -368,7 +350,7 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
pool1: {
hostname: "2#{vmname}.one.example.com"
hostname: "2#{vmname}"
}
}
@ -393,7 +375,7 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
pool1: {
hostname: 'abcdefghijklmnop.one.example.com'
hostname: 'abcdefghijklmnop'
}
}
expect(last_response.body).to eq(JSON.pretty_generate(expected))
@ -419,7 +401,7 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
pool1: {
hostname: 'abcdefghijklmnop.one.example.com'
hostname: 'abcdefghijklmnop'
}
}
expect(last_response.body).to eq(JSON.pretty_generate(expected))
@ -440,7 +422,7 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
pool1: {
hostname: 'abcdefghijklmnop.one.example.com'
hostname: 'abcdefghijklmnop'
}
}
expect(last_response.body).to eq(JSON.pretty_generate(expected))

View file

@ -1,7 +1,7 @@
require 'spec_helper'
require 'rack/test'
describe Vmpooler::API::V3 do
describe Vmpooler::API::V1 do
include Rack::Test::Methods
def app()
@ -16,7 +16,7 @@ describe Vmpooler::API::V3 do
end
describe '/vm/:template' do
let(:prefix) { '/api/v3' }
let(:prefix) { '/api/v1' }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:config) {
{
@ -24,17 +24,10 @@ describe Vmpooler::API::V3 do
'site_name' => 'test pooler',
'vm_lifetime_auth' => 2,
},
dns_configs: {
:example => {
'dns_class' => 'mock',
'domain' => 'example.com'
}
},
providers: { vsphere: {} },
pools: [
{'name' => 'pool1', 'size' => 5, 'provider' => 'vsphere', 'dns_plugin' => 'example'},
{'name' => 'pool2', 'size' => 10, 'provider' => 'vsphere', 'dns_plugin' => 'example'},
{'name' => 'poolone', 'size' => 1, 'provider' => 'vsphere', 'dns_plugin' => 'example'}
{'name' => 'pool1', 'size' => 5},
{'name' => 'pool2', 'size' => 10},
{'name' => 'poolone', 'size' => 0}
],
statsd: { 'prefix' => 'stats_prefix'},
alias: { 'poolone' => 'pool1' },
@ -66,7 +59,7 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
pool1: {
hostname: 'abcdefghijklmnop.example.com'
hostname: 'abcdefghijklmnop'
}
}
@ -83,7 +76,7 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
poolone: {
hostname: 'abcdefghijklmnop.example.com'
hostname: 'abcdefghijklmnop'
}
}
expect_json(ok = true, http = 200)
@ -111,7 +104,7 @@ describe Vmpooler::API::V3 do
end
it 'returns 503 for empty pool referenced by alias' do
create_ready_vm 'pool2', 'abcdefghijklmnop', redis
create_ready_vm 'pool1', 'abcdefghijklmnop', redis
post "#{prefix}/vm/poolone"
expected = { ok: false }
@ -132,10 +125,10 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
pool1: {
hostname: 'abcdefghijklmnop.example.com'
hostname: 'abcdefghijklmnop'
},
pool2: {
hostname: 'qrstuvwxyz012345.example.com'
hostname: 'qrstuvwxyz012345'
}
}
@ -157,17 +150,17 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
pool1: {
hostname: [ '1abcdefghijklmnop.example.com', '2abcdefghijklmnop.example.com' ]
hostname: [ '1abcdefghijklmnop', '2abcdefghijklmnop' ]
},
pool2: {
hostname: [ '1qrstuvwxyz012345.example.com', '2qrstuvwxyz012345.example.com', '3qrstuvwxyz012345.example.com' ]
hostname: [ '1qrstuvwxyz012345', '2qrstuvwxyz012345', '3qrstuvwxyz012345' ]
}
}
result = JSON.parse(last_response.body)
expect(result['ok']).to eq(true)
expect(result['pool1']['hostname']).to include('1abcdefghijklmnop.example.com', '2abcdefghijklmnop.example.com')
expect(result['pool2']['hostname']).to include('1qrstuvwxyz012345.example.com', '2qrstuvwxyz012345.example.com', '3qrstuvwxyz012345.example.com')
expect(result['pool1']['hostname']).to include('1abcdefghijklmnop', '2abcdefghijklmnop')
expect(result['pool2']['hostname']).to include('1qrstuvwxyz012345', '2qrstuvwxyz012345', '3qrstuvwxyz012345')
expect_json(ok = true, http = 200)
end
@ -271,7 +264,7 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
pool1: {
hostname: 'abcdefghijklmnop.example.com'
hostname: 'abcdefghijklmnop'
}
}
@ -297,7 +290,7 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
pool1: {
hostname: 'abcdefghijklmnop.example.com'
hostname: 'abcdefghijklmnop'
}
}
expect(last_response.body).to eq(JSON.pretty_generate(expected))
@ -317,7 +310,7 @@ describe Vmpooler::API::V3 do
expected = {
ok: true,
pool1: {
hostname: 'abcdefghijklmnop.example.com'
hostname: 'abcdefghijklmnop'
}
}
expect_json(ok = true, http = 200)

View file

@ -16,12 +16,14 @@ describe Vmpooler::API::Helpers do
describe '#hostname_shorten' do
[
['example.com', 'example'],
['sub.example.com', 'sub'],
['adjective-noun.example.com', 'adjective-noun'],
['abc123.example.com', 'abc123']
].each do |hostname, expected|
it { expect(subject.hostname_shorten(hostname)).to eq expected }
['example.com', 'not-example.com', 'example.com'],
['example.com', 'example.com', 'example.com'],
['sub.example.com', 'example.com', 'sub'],
['adjective-noun.example.com', 'example.com', 'adjective-noun'],
['abc123.example.com', 'example.com', 'abc123'],
['example.com', nil, 'example.com']
].each do |hostname, domain, expected|
it { expect(subject.hostname_shorten(hostname, domain)).to eq expected }
end
end
@ -116,7 +118,7 @@ describe Vmpooler::API::Helpers do
allow(redis).to receive(:pipelined).with(no_args).and_return [0]
allow(redis).to receive(:get).and_return 0
expect(subject.get_queue_metrics([], redis)).to eq({requested: 0, pending: 0, cloning: 0, booting: 0, ready: 0, running: 0, completed: 0, total: 0})
expect(subject.get_queue_metrics([], redis)).to eq({pending: 0, cloning: 0, booting: 0, ready: 0, running: 0, completed: 0, total: 0})
end
it 'adds pool queues correctly' do
@ -125,14 +127,10 @@ describe Vmpooler::API::Helpers do
{'name' => 'p2'}
]
# Mock returns 7*2 + 2 = 16 results (7 queue types for 2 pools + 2 global counters)
# For each pool: [request, processing, odcreate, pending, ready, running, completed]
# Plus 2 global counters: clone (1), ondemandclone (0)
# Results array: [1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1,1, 1, 0]
# [req, proc, odc, pend, rdy, run, comp, clone, odc]
allow(redis).to receive(:pipelined).with(no_args).and_return [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0]
allow(redis).to receive(:pipelined).with(no_args).and_return [1,1]
allow(redis).to receive(:get).and_return(1,0)
expect(subject.get_queue_metrics(pools, redis)).to eq({requested: 6, pending: 2, cloning: 1, booting: 1, ready: 2, running: 2, completed: 2, total: 14})
expect(subject.get_queue_metrics(pools, redis)).to eq({pending: 2, cloning: 1, booting: 1, ready: 2, running: 2, completed: 2, total: 8})
end
it 'sets booting to 0 when negative calculation' do
@ -141,10 +139,10 @@ describe Vmpooler::API::Helpers do
{'name' => 'p2'}
]
# Mock returns 7*2 + 2 = 16 results with clone=5 to cause negative booting
allow(redis).to receive(:pipelined).with(no_args).and_return [1,1,1,1,1,1,1,1,1,1,1,1,1,1,5,0]
allow(redis).to receive(:pipelined).with(no_args).and_return [1,1]
allow(redis).to receive(:get).and_return(5,0)
expect(subject.get_queue_metrics(pools, redis)).to eq({requested: 6, pending: 2, cloning: 5, booting: 0, ready: 2, running: 2, completed: 2, total: 14})
expect(subject.get_queue_metrics(pools, redis)).to eq({pending: 2, cloning: 5, booting: 0, ready: 2, running: 2, completed: 2, total: 8})
end
end
@ -272,62 +270,22 @@ describe Vmpooler::API::Helpers do
:tls_options => { :ssl_version => 'TLSv1' }
}
end
it 'should attempt ldap authentication' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base, username_str, password_str)
context 'without a service account' do
it 'should attempt ldap authentication' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base, username_str, password_str, nil)
subject.authenticate(auth, username_str, password_str)
end
it 'should return true when authentication is successful' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base, username_str, password_str, nil).and_return(true)
expect(subject.authenticate(auth, username_str, password_str)).to be true
end
it 'should return false when authentication fails' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base, username_str, password_str, nil).and_return(false)
expect(subject.authenticate(auth, username_str, password_str)).to be false
end
subject.authenticate(auth, username_str, password_str)
end
context 'with a service account' do
let(:service_account_hash) do
{
:user_dn => 'cn=Service Account,ou=users,dc=example,dc=com',
:password => 's3cr3t'
}
end
let(:auth) {
{
'provider' => 'ldap',
ldap: {
'host' => host,
'base' => base,
'user_object' => user_object,
'service_account_hash' => service_account_hash
}
}
}
it 'should attempt ldap authentication' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base, username_str, password_str, service_account_hash)
it 'should return true when authentication is successful' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base, username_str, password_str).and_return(true)
subject.authenticate(auth, username_str, password_str)
end
expect(subject.authenticate(auth, username_str, password_str)).to be true
end
it 'should return true when authentication is successful' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base, username_str, password_str, service_account_hash).and_return(true)
it 'should return false when authentication fails' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base, username_str, password_str).and_return(false)
expect(subject.authenticate(auth, username_str, password_str)).to be true
end
it 'should return false when authentication fails' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base, username_str, password_str, service_account_hash).and_return(false)
expect(subject.authenticate(auth, username_str, password_str)).to be false
end
expect(subject.authenticate(auth, username_str, password_str)).to be false
end
context 'with an alternate ssl_version' do
@ -342,7 +300,7 @@ describe Vmpooler::API::Helpers do
end
it 'should specify the alternate ssl_version when authenticating' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, secure_encryption, user_object, base, username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, secure_encryption, user_object, base, username_str, password_str)
subject.authenticate(auth, username_str, password_str)
end
@ -355,7 +313,7 @@ describe Vmpooler::API::Helpers do
end
it 'should specify the alternate port when authenticating' do
expect(subject).to receive(:authenticate_ldap).with(alternate_port, host, default_encryption, user_object, base, username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(alternate_port, host, default_encryption, user_object, base, username_str, password_str)
subject.authenticate(auth, username_str, password_str)
end
@ -375,7 +333,7 @@ describe Vmpooler::API::Helpers do
end
it 'should specify the secure port and encryption options when authenticating' do
expect(subject).to receive(:authenticate_ldap).with(secure_port, host, secure_encryption, user_object, base, username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(secure_port, host, secure_encryption, user_object, base, username_str, password_str)
subject.authenticate(auth, username_str, password_str)
end
@ -393,36 +351,36 @@ describe Vmpooler::API::Helpers do
end
it 'should attempt to bind with each base' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[0], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[1], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[0], username_str, password_str)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[1], username_str, password_str)
subject.authenticate(auth, username_str, password_str)
end
it 'should not search the second base when the first binds' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[0], username_str, password_str, nil).and_return(true)
expect(subject).to_not receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[1], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[0], username_str, password_str).and_return(true)
expect(subject).to_not receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[1], username_str, password_str)
subject.authenticate(auth, username_str, password_str)
end
it 'should search the second base when the first bind fails' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[0], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[1], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[0], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[1], username_str, password_str)
subject.authenticate(auth, username_str, password_str)
end
it 'should return true when any bind succeeds' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[0], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[1], username_str, password_str, nil).and_return(true)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[0], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[1], username_str, password_str).and_return(true)
expect(subject.authenticate(auth, username_str, password_str)).to be true
end
it 'should return false when all bind attempts fail' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[0], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[1], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[0], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object, base[1], username_str, password_str).and_return(false)
expect(subject.authenticate(auth, username_str, password_str)).to be false
end
@ -440,36 +398,36 @@ describe Vmpooler::API::Helpers do
end
it 'should attempt to bind with each user object' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base, username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base, username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base, username_str, password_str)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base, username_str, password_str)
subject.authenticate(auth, username_str, password_str)
end
it 'should not search the second user object when the first binds' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base, username_str, password_str, nil).and_return(true)
expect(subject).to_not receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base, username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base, username_str, password_str).and_return(true)
expect(subject).to_not receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base, username_str, password_str)
subject.authenticate(auth, username_str, password_str)
end
it 'should search the second user object when the first bind fails' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base, username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base, username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base, username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base, username_str, password_str)
subject.authenticate(auth, username_str, password_str)
end
it 'should return true when any bind succeeds' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base, username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base, username_str, password_str, nil).and_return(true)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base, username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base, username_str, password_str).and_return(true)
expect(subject.authenticate(auth, username_str, password_str)).to be true
end
it 'should return false when all bind attempts fail' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base, username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base, username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base, username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base, username_str, password_str).and_return(false)
expect(subject.authenticate(auth, username_str, password_str)).to be false
end
@ -494,64 +452,64 @@ describe Vmpooler::API::Helpers do
end
it 'should attempt to bind with each user object and base' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[0], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[0], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[1], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[1], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[0], username_str, password_str)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[0], username_str, password_str)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[1], username_str, password_str)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[1], username_str, password_str)
subject.authenticate(auth, username_str, password_str)
end
it 'should not continue searching when the first combination binds' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[0], username_str, password_str, nil).and_return(true)
expect(subject).to_not receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[0], username_str, password_str, nil)
expect(subject).to_not receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[1], username_str, password_str, nil)
expect(subject).to_not receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[1], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[0], username_str, password_str).and_return(true)
expect(subject).to_not receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[0], username_str, password_str)
expect(subject).to_not receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[1], username_str, password_str)
expect(subject).to_not receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[1], username_str, password_str)
subject.authenticate(auth, username_str, password_str)
end
it 'should search the remaining combinations when the first bind fails' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[0], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[0], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[1], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[1], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[0], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[0], username_str, password_str)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[1], username_str, password_str)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[1], username_str, password_str)
subject.authenticate(auth, username_str, password_str)
end
it 'should search the remaining combinations when the first two binds fail' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[0], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[0], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[1], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[1], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[0], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[0], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[1], username_str, password_str)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[1], username_str, password_str)
subject.authenticate(auth, username_str, password_str)
end
it 'should search the remaining combination when the first three binds fail' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[0], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[0], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[1], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[1], username_str, password_str, nil)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[0], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[0], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[1], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[1], username_str, password_str)
subject.authenticate(auth, username_str, password_str)
end
it 'should return true when any bind succeeds' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[0], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[0], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[1], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[1], username_str, password_str, nil).and_return(true)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[0], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[0], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[1], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[1], username_str, password_str).and_return(true)
expect(subject.authenticate(auth, username_str, password_str)).to be true
end
it 'should return false when all bind attempts fail' do
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[0], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[0], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[1], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[1], username_str, password_str, nil).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[0], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[0], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[0], base[1], username_str, password_str).and_return(false)
expect(subject).to receive(:authenticate_ldap).with(default_port, host, default_encryption, user_object[1], base[1], username_str, password_str).and_return(false)
expect(subject.authenticate(auth, username_str, password_str)).to be false
end
@ -585,12 +543,6 @@ describe Vmpooler::API::Helpers do
:tls_options => { :ssl_version => 'TLSv1' }
}
end
let(:service_account_hash) do
{
:user_dn => 'cn=Service Account,ou=users,dc=example,dc=com',
:password => 's3cr3t'
}
end
let(:ldap) { double('ldap') }
it 'should create a new ldap connection' do
allow(ldap).to receive(:bind)
@ -622,20 +574,6 @@ describe Vmpooler::API::Helpers do
expect(subject.authenticate_ldap(port, host, encryption, user_object, base, username_str, password_str)).to be false
end
it 'should return true when a bind_as is successful' do
expect(Net::LDAP).to receive(:new).and_return(ldap)
expect(ldap).to receive(:bind_as).and_return(true)
expect(subject.authenticate_ldap(port, host, encryption, user_object, base, username_str, password_str, service_account_hash)).to be true
end
it 'should return false when a bind_as fails' do
expect(Net::LDAP).to receive(:new).and_return(ldap)
expect(ldap).to receive(:bind_as).and_return(false)
expect(subject.authenticate_ldap(port, host, encryption, user_object, base, username_str, password_str, service_account_hash)).to be false
end
end
end

View file

@ -1,184 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
require 'rack/test'
require 'vmpooler/api/input_validator'
describe Vmpooler::API::InputValidator do
let(:test_class) do
Class.new do
include Vmpooler::API::InputValidator
end
end
let(:validator) { test_class.new }
describe '#validate_hostname' do
it 'accepts valid hostnames' do
expect(validator.validate_hostname('test-host.example.com')).to be true
expect(validator.validate_hostname('host123')).to be true
end
it 'rejects invalid hostnames' do
result = validator.validate_hostname('invalid_host!')
expect(result['ok']).to be false
expect(result['error']).to include('Invalid hostname format')
end
it 'rejects hostnames that are too long' do
long_hostname = 'a' * 300
result = validator.validate_hostname(long_hostname)
expect(result['ok']).to be false
expect(result['error']).to include('too long')
end
it 'rejects empty hostnames' do
result = validator.validate_hostname('')
expect(result['ok']).to be false
expect(result['error']).to include('required')
end
end
describe '#validate_pool_name' do
it 'accepts valid pool names' do
expect(validator.validate_pool_name('centos-7-x86_64')).to be true
expect(validator.validate_pool_name('ubuntu-2204')).to be true
end
it 'rejects invalid pool names' do
result = validator.validate_pool_name('invalid pool!')
expect(result['ok']).to be false
expect(result['error']).to include('Invalid pool name format')
end
it 'rejects pool names that are too long' do
result = validator.validate_pool_name('a' * 150)
expect(result['ok']).to be false
expect(result['error']).to include('too long')
end
end
describe '#validate_tag' do
it 'accepts valid tags' do
expect(validator.validate_tag('project', 'test-123')).to be true
expect(validator.validate_tag('owner', 'user@example.com')).to be true
end
it 'rejects tags with invalid keys' do
result = validator.validate_tag('invalid key!', 'value')
expect(result['ok']).to be false
expect(result['error']).to include('Invalid tag key format')
end
it 'rejects tags with invalid characters in value' do
result = validator.validate_tag('key', 'value<script>')
expect(result['ok']).to be false
expect(result['error']).to include('invalid characters')
end
it 'rejects tags that are too long' do
result = validator.validate_tag('key', 'a' * 300)
expect(result['ok']).to be false
expect(result['error']).to include('too long')
end
end
describe '#validate_vm_count' do
it 'accepts valid VM counts' do
expect(validator.validate_vm_count(5)).to eq(5)
expect(validator.validate_vm_count('10')).to eq(10)
end
it 'rejects counts less than 1' do
result = validator.validate_vm_count(0)
expect(result['ok']).to be false
expect(result['error']).to include('at least 1')
end
it 'rejects counts greater than 100' do
result = validator.validate_vm_count(150)
expect(result['ok']).to be false
expect(result['error']).to include('at most 100')
end
it 'rejects non-integer values' do
result = validator.validate_vm_count('abc')
expect(result['ok']).to be false
expect(result['error']).to include('valid integer')
end
end
describe '#validate_disk_size' do
it 'accepts valid disk sizes' do
expect(validator.validate_disk_size(50)).to eq(50)
expect(validator.validate_disk_size('100')).to eq(100)
end
it 'rejects sizes less than 1' do
result = validator.validate_disk_size(0)
expect(result['ok']).to be false
end
it 'rejects sizes greater than 2048' do
result = validator.validate_disk_size(3000)
expect(result['ok']).to be false
end
end
describe '#validate_lifetime' do
it 'accepts valid lifetimes' do
expect(validator.validate_lifetime(24)).to eq(24)
expect(validator.validate_lifetime('48')).to eq(48)
end
it 'rejects lifetimes greater than 168 hours (1 week)' do
result = validator.validate_lifetime(200)
expect(result['ok']).to be false
expect(result['error']).to include('at most 168')
end
end
describe '#sanitize_json_body' do
it 'parses valid JSON' do
result = validator.sanitize_json_body('{"key": "value"}')
expect(result).to eq('key' => 'value')
end
it 'rejects invalid JSON' do
result = validator.sanitize_json_body('{invalid}')
expect(result['ok']).to be false
expect(result['error']).to include('Invalid JSON')
end
it 'rejects non-object JSON' do
result = validator.sanitize_json_body('["array"]')
expect(result['ok']).to be false
expect(result['error']).to include('must be a JSON object')
end
it 'rejects deeply nested JSON' do
deep_json = '{"a":{"b":{"c":{"d":{"e":{"f":"too deep"}}}}}}'
result = validator.sanitize_json_body(deep_json)
expect(result['ok']).to be false
expect(result['error']).to include('too complex')
end
it 'rejects bodies that are too large' do
large_json = '{"data":"' + ('a' * 20000) + '"}'
result = validator.sanitize_json_body(large_json)
expect(result['ok']).to be false
expect(result['error']).to include('too large')
end
end
describe '#validation_error?' do
it 'returns true for error responses' do
error = { 'ok' => false, 'error' => 'test error' }
expect(validator.validation_error?(error)).to be true
end
it 'returns false for successful responses' do
expect(validator.validation_error?(true)).to be false
expect(validator.validation_error?(5)).to be false
end
end
end

View file

@ -1,172 +0,0 @@
require 'spec_helper'
require 'vmpooler/dns/base'
# This spec does not really exercise code paths but is merely used
# to enforce that certain methods are defined in the base classes
describe 'Vmpooler::PoolManager::Dns::Base' do
let(:logger) { MockLogger.new }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:config) { {} }
let(:dns_plugin_name) { 'base' }
let(:dns_options) { { 'param' => 'value' } }
let(:fake_vm) {
fake_vm = {}
fake_vm['name'] = 'vm1'
fake_vm['hostname'] = 'vm1'
fake_vm['template'] = 'pool1'
fake_vm['boottime'] = Time.now
fake_vm['powerstate'] = 'PoweredOn'
fake_vm
}
let(:redis_connection_pool) { Vmpooler::PoolManager::GenericConnectionPool.new(
metrics: metrics,
connpool_type: 'redis_connection_pool',
connpool_provider: 'testprovider',
size: 1,
timeout: 5
) { MockRedis.new }
}
subject { Vmpooler::PoolManager::Dns::Base.new(config, logger, metrics, redis_connection_pool, dns_plugin_name, dns_options) }
# Helper attr_reader methods
describe '#logger' do
it 'should come from the provider initialization' do
expect(subject.logger).to be(logger)
end
end
describe '#metrics' do
it 'should come from the provider initialization' do
expect(subject.metrics).to be(metrics)
end
end
describe '#dns_options' do
it 'should come from the provider initialization' do
expect(subject.dns_options).to be(dns_options)
end
end
describe '#pool_config' do
let(:poolname) { 'pool1' }
let(:config) { YAML.load(<<-EOT
---
:pools:
- name: '#{poolname}'
alias: [ 'mockpool' ]
template: 'Templates/pool1'
folder: 'Pooler/pool1'
datastore: 'datastore0'
size: 5
timeout: 10
ready_ttl: 1440
clone_target: 'cluster1'
EOT
)
}
context 'Given a pool that does not exist' do
it 'should return nil' do
expect(subject.pool_config('missing_pool')).to be_nil
end
end
context 'Given a pool that does exist' do
it 'should return the pool\'s configuration' do
result = subject.pool_config(poolname)
expect(result['name']).to eq(poolname)
end
end
end
describe '#dns_config' do
let(:poolname) { 'pool1' }
let(:config) { YAML.load(<<-EOT
---
:dns_configs:
:#{dns_plugin_name}:
option1: 'value1'
EOT
)
}
context 'Given a dns plugin with no configuration' do
let(:config) { YAML.load(<<-EOT
---
:dns_configs:
:bad_dns:
option1: 'value1'
option2: 'value1'
EOT
)
}
it 'should return nil' do
expect(subject.dns_config).to be_nil
end
end
context 'Given a correct dns config name' do
it 'should return the dns\'s configuration' do
result = subject.dns_config
expect(result['option1']).to eq('value1')
end
end
end
describe '#global_config' do
it 'should come from the dns initialization' do
expect(subject.global_config).to be(config)
end
end
describe '#name' do
it "should come from the dns initialization" do
expect(subject.name).to eq(dns_plugin_name)
end
end
describe '#get_ip' do
it 'calls redis hget with vm name and ip' do
redis_connection_pool.with do |redis|
expect(redis).to receive(:hget).with("vmpooler__vm__vm1", 'ip')
end
subject.get_ip(fake_vm['name'])
end
end
describe '#provided_pools' do
let(:config) { YAML.load(<<-EOT
---
:pools:
- name: 'pool1'
dns_config: 'base'
- name: 'pool2'
dns_config: 'base'
- name: 'otherpool'
dns_config: 'other provider'
- name: 'no name'
EOT
)
}
it "should return pools serviced by this provider" do
expect(subject.provided_pools).to eq(['pool1','pool2'])
end
end
describe '#create_or_replace_record' do
it 'should raise error' do
expect{subject.create_or_replace_record('pool')}.to raise_error(/does not implement create_or_replace_record/)
end
end
describe '#delete_record' do
it 'should raise error' do
expect{subject.delete_record('pool')}.to raise_error(/does not implement delete_record/)
end
end
end

View file

@ -1,62 +0,0 @@
require 'spec_helper'
describe 'Vmpooler::Dns' do
let(:dns_class) { 'mock-dnsservice' }
let(:dns_config_name) { 'mock' }
let(:pool) { 'pool1' }
let(:config) { YAML.load(<<~EOT
---
:dns_configs:
:mock:
dns_class: 'mock'
domain: 'example.com'
:pools:
- name: 'pool1'
dns_plugin: 'mock'
EOT
)}
subject { Vmpooler::Dns.new }
describe '.get_dns_plugin_class_by_name' do
it 'returns the plugin class for the specified config' do
result = Vmpooler::Dns.get_dns_plugin_class_by_name(config, dns_config_name)
expect(result).to eq('mock')
end
end
describe '.get_domain_for_pool' do
it 'returns the domain for the specified pool' do
result = Vmpooler::Dns.get_domain_for_pool(config, pool)
expect(result).to eq('example.com')
end
end
describe '.get_dns_plugin_domain_by_name' do
it 'returns the domain for the specified config' do
result = Vmpooler::Dns.get_dns_plugin_domain_by_name(config, dns_config_name)
expect(result).to eq('example.com')
end
end
describe '.get_dns_plugin_config_classes' do
it 'returns the list of dns plugin classes' do
result = Vmpooler::Dns.get_dns_plugin_config_classes(config)
expect(result).to eq(['mock'])
end
end
describe '#load_from_gems' do
let(:gem_name) { 'mock-dnsservice' }
let(:translated_gem_name) { 'mock/dnsservice' }
before(:each) do
allow(subject).to receive(:require).with(gem_name).and_return(true)
end
it 'loads the specified gem' do
expect(subject).to receive(:require).with("vmpooler/dns/#{translated_gem_name}")
result = subject.load_from_gems(gem_name)
expect(result).to eq("vmpooler/dns/#{translated_gem_name}")
end
end
end

View file

@ -22,6 +22,7 @@ describe 'Vmpooler' do
['prefix', test_string, ""],
['logfile', test_string, nil],
['site_name', test_string, nil],
['domain', test_string, nil],
['clone_target', test_string, nil],
['create_folders', test_bool, nil],
['create_template_delta_disks', test_bool, nil],

File diff suppressed because it is too large Load diff

View file

@ -1,497 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
require 'vmpooler/pool_manager'
describe 'Vmpooler::PoolManager - Queue Reliability Features' do
let(:logger) { MockLogger.new }
let(:redis_connection_pool) { ConnectionPool.new(size: 1) { redis } }
let(:metrics) { Vmpooler::Metrics::DummyStatsd.new }
let(:config) { YAML.load(<<~EOT
---
:config:
task_limit: 10
vm_checktime: 1
vm_lifetime: 12
prefix: 'pooler-'
dlq_enabled: true
dlq_ttl: 168
dlq_max_entries: 100
purge_enabled: true
purge_dry_run: false
max_pending_age: 7200
max_ready_age: 86400
max_completed_age: 3600
health_check_enabled: true
health_check_interval: 300
health_thresholds:
pending_queue_max: 100
ready_queue_max: 500
dlq_max_warning: 100
dlq_max_critical: 1000
stuck_vm_age_threshold: 7200
:providers:
:dummy: {}
:pools:
- name: 'test-pool'
size: 5
provider: 'dummy'
EOT
)
}
subject { Vmpooler::PoolManager.new(config, logger, redis_connection_pool, metrics) }
describe 'Dead-Letter Queue (DLQ)' do
let(:vm) { 'vm-abc123' }
let(:pool) { 'test-pool' }
let(:error_class) { 'StandardError' }
let(:error_message) { 'template does not exist' }
let(:request_id) { 'req-123' }
let(:pool_alias) { 'test-alias' }
before(:each) do
redis_connection_pool.with do |redis_connection|
allow(redis_connection).to receive(:zadd)
allow(redis_connection).to receive(:zcard).and_return(0)
allow(redis_connection).to receive(:expire)
end
end
describe '#dlq_enabled?' do
it 'returns true when dlq_enabled is true in config' do
expect(subject.dlq_enabled?).to be true
end
it 'returns false when dlq_enabled is false in config' do
config[:config]['dlq_enabled'] = false
expect(subject.dlq_enabled?).to be false
end
end
describe '#dlq_ttl' do
it 'returns configured TTL' do
expect(subject.dlq_ttl).to eq(168)
end
it 'returns default TTL when not configured' do
config[:config].delete('dlq_ttl')
expect(subject.dlq_ttl).to eq(168)
end
end
describe '#dlq_max_entries' do
it 'returns configured max entries' do
expect(subject.dlq_max_entries).to eq(100)
end
it 'returns default max entries when not configured' do
config[:config].delete('dlq_max_entries')
expect(subject.dlq_max_entries).to eq(10000)
end
end
describe '#move_to_dlq' do
context 'when DLQ is enabled' do
it 'adds entry to DLQ sorted set' do
redis_connection_pool.with do |redis_connection|
dlq_key = 'vmpooler__dlq__pending'
expect(redis_connection).to receive(:zadd).with(dlq_key, anything, anything)
expect(redis_connection).to receive(:expire).with(dlq_key, anything)
subject.move_to_dlq(vm, pool, 'pending', error_class, error_message,
redis_connection, request_id: request_id, pool_alias: pool_alias)
end
end
it 'includes error details in DLQ entry' do
redis_connection_pool.with do |redis_connection|
expect(redis_connection).to receive(:zadd) do |_key, _score, entry|
expect(entry).to include(vm)
expect(entry).to include(error_message)
expect(entry).to include(error_class)
end
subject.move_to_dlq(vm, pool, 'pending', error_class, error_message, redis_connection)
end
end
it 'increments DLQ metrics' do
redis_connection_pool.with do |redis_connection|
expect(metrics).to receive(:increment).with('vmpooler_dlq.pending.count')
subject.move_to_dlq(vm, pool, 'pending', error_class, error_message, redis_connection)
end
end
it 'enforces max entries limit' do
redis_connection_pool.with do |redis_connection|
allow(redis_connection).to receive(:zcard).and_return(150)
expect(redis_connection).to receive(:zremrangebyrank).with(anything, 0, 49)
subject.move_to_dlq(vm, pool, 'pending', error_class, error_message, redis_connection)
end
end
end
context 'when DLQ is disabled' do
before { config[:config]['dlq_enabled'] = false }
it 'does not add entry to DLQ' do
redis_connection_pool.with do |redis_connection|
expect(redis_connection).not_to receive(:zadd)
subject.move_to_dlq(vm, pool, 'pending', error_class, error_message, redis_connection)
end
end
end
end
end
describe 'Auto-Purge' do
describe '#purge_enabled?' do
it 'returns true when purge_enabled is true in config' do
expect(subject.purge_enabled?).to be true
end
it 'returns false when purge_enabled is false in config' do
config[:config]['purge_enabled'] = false
expect(subject.purge_enabled?).to be false
end
end
describe '#purge_dry_run?' do
it 'returns false when purge_dry_run is false in config' do
expect(subject.purge_dry_run?).to be false
end
it 'returns true when purge_dry_run is true in config' do
config[:config]['purge_dry_run'] = true
expect(subject.purge_dry_run?).to be true
end
end
describe '#max_pending_age' do
it 'returns configured max age' do
expect(subject.max_pending_age).to eq(7200)
end
it 'returns default max age when not configured' do
config[:config].delete('max_pending_age')
expect(subject.max_pending_age).to eq(7200)
end
end
describe '#purge_pending_queue' do
let(:pool) { 'test-pool' }
let(:old_vm) { 'vm-old' }
let(:new_vm) { 'vm-new' }
before(:each) do
redis_connection_pool.with do |redis_connection|
# Old VM (3 hours old, exceeds 2 hour threshold)
redis_connection.sadd("vmpooler__pending__#{pool}", old_vm)
redis_connection.hset("vmpooler__vm__#{old_vm}", 'clone', (Time.now - 10800).to_s)
# New VM (30 minutes old, within threshold)
redis_connection.sadd("vmpooler__pending__#{pool}", new_vm)
redis_connection.hset("vmpooler__vm__#{new_vm}", 'clone', (Time.now - 1800).to_s)
end
end
context 'when not in dry-run mode' do
it 'purges stale pending VMs' do
redis_connection_pool.with do |redis_connection|
purged_count = subject.purge_pending_queue(pool, redis_connection)
expect(purged_count).to eq(1)
expect(redis_connection.sismember("vmpooler__pending__#{pool}", old_vm)).to be false
expect(redis_connection.sismember("vmpooler__pending__#{pool}", new_vm)).to be true
end
end
it 'moves purged VMs to DLQ' do
redis_connection_pool.with do |redis_connection|
expect(subject).to receive(:move_to_dlq).with(
old_vm, pool, 'pending', 'Purge', anything, redis_connection, anything
)
subject.purge_pending_queue(pool, redis_connection)
end
end
it 'increments purge metrics' do
redis_connection_pool.with do |redis_connection|
expect(metrics).to receive(:increment).with("vmpooler_purge.pending.#{pool}.count")
subject.purge_pending_queue(pool, redis_connection)
end
end
end
context 'when in dry-run mode' do
before { config[:config]['purge_dry_run'] = true }
it 'detects but does not purge stale VMs' do
redis_connection_pool.with do |redis_connection|
purged_count = subject.purge_pending_queue(pool, redis_connection)
expect(purged_count).to eq(1)
expect(redis_connection.sismember("vmpooler__pending__#{pool}", old_vm)).to be true
end
end
it 'does not move to DLQ' do
redis_connection_pool.with do |redis_connection|
expect(subject).not_to receive(:move_to_dlq)
subject.purge_pending_queue(pool, redis_connection)
end
end
end
end
describe '#purge_ready_queue' do
let(:pool) { 'test-pool' }
let(:old_vm) { 'vm-old-ready' }
let(:new_vm) { 'vm-new-ready' }
before(:each) do
redis_connection_pool.with do |redis_connection|
# Old VM (25 hours old, exceeds 24 hour threshold)
redis_connection.sadd("vmpooler__ready__#{pool}", old_vm)
redis_connection.hset("vmpooler__vm__#{old_vm}", 'ready', (Time.now - 90000).to_s)
# New VM (2 hours old, within threshold)
redis_connection.sadd("vmpooler__ready__#{pool}", new_vm)
redis_connection.hset("vmpooler__vm__#{new_vm}", 'ready', (Time.now - 7200).to_s)
end
end
it 'moves stale ready VMs to completed queue' do
redis_connection_pool.with do |redis_connection|
purged_count = subject.purge_ready_queue(pool, redis_connection)
expect(purged_count).to eq(1)
expect(redis_connection.sismember("vmpooler__ready__#{pool}", old_vm)).to be false
expect(redis_connection.sismember("vmpooler__completed__#{pool}", old_vm)).to be true
expect(redis_connection.sismember("vmpooler__ready__#{pool}", new_vm)).to be true
end
end
end
describe '#purge_completed_queue' do
let(:pool) { 'test-pool' }
let(:old_vm) { 'vm-old-completed' }
let(:new_vm) { 'vm-new-completed' }
before(:each) do
redis_connection_pool.with do |redis_connection|
# Old VM (2 hours old, exceeds 1 hour threshold)
redis_connection.sadd("vmpooler__completed__#{pool}", old_vm)
redis_connection.hset("vmpooler__vm__#{old_vm}", 'destroy', (Time.now - 7200).to_s)
# New VM (30 minutes old, within threshold)
redis_connection.sadd("vmpooler__completed__#{pool}", new_vm)
redis_connection.hset("vmpooler__vm__#{new_vm}", 'destroy', (Time.now - 1800).to_s)
end
end
it 'removes stale completed VMs' do
redis_connection_pool.with do |redis_connection|
purged_count = subject.purge_completed_queue(pool, redis_connection)
expect(purged_count).to eq(1)
expect(redis_connection.sismember("vmpooler__completed__#{pool}", old_vm)).to be false
expect(redis_connection.sismember("vmpooler__completed__#{pool}", new_vm)).to be true
end
end
end
end
describe 'Health Checks' do
describe '#health_check_enabled?' do
it 'returns true when health_check_enabled is true in config' do
expect(subject.health_check_enabled?).to be true
end
it 'returns false when health_check_enabled is false in config' do
config[:config]['health_check_enabled'] = false
expect(subject.health_check_enabled?).to be false
end
end
describe '#health_thresholds' do
it 'returns configured thresholds' do
thresholds = subject.health_thresholds
expect(thresholds['pending_queue_max']).to eq(100)
expect(thresholds['stuck_vm_age_threshold']).to eq(7200)
end
it 'merges with defaults when partially configured' do
config[:config]['health_thresholds'] = { 'pending_queue_max' => 200 }
thresholds = subject.health_thresholds
expect(thresholds['pending_queue_max']).to eq(200)
expect(thresholds['ready_queue_max']).to eq(500) # default
end
end
describe '#calculate_queue_ages' do
let(:pool) { 'test-pool' }
let(:vm1) { 'vm-1' }
let(:vm2) { 'vm-2' }
let(:vm3) { 'vm-3' }
before(:each) do
redis_connection_pool.with do |redis_connection|
redis_connection.hset("vmpooler__vm__#{vm1}", 'clone', (Time.now - 3600).to_s)
redis_connection.hset("vmpooler__vm__#{vm2}", 'clone', (Time.now - 7200).to_s)
redis_connection.hset("vmpooler__vm__#{vm3}", 'clone', (Time.now - 1800).to_s)
end
end
it 'calculates ages for all VMs' do
redis_connection_pool.with do |redis_connection|
vms = [vm1, vm2, vm3]
ages = subject.calculate_queue_ages(vms, 'clone', redis_connection)
expect(ages.length).to eq(3)
expect(ages[0]).to be_within(5).of(3600)
expect(ages[1]).to be_within(5).of(7200)
expect(ages[2]).to be_within(5).of(1800)
end
end
it 'skips VMs with missing timestamps' do
redis_connection_pool.with do |redis_connection|
vms = [vm1, 'vm-nonexistent', vm3]
ages = subject.calculate_queue_ages(vms, 'clone', redis_connection)
expect(ages.length).to eq(2)
end
end
end
describe '#determine_health_status' do
let(:base_metrics) do
{
'queues' => {
'test-pool' => {
'pending' => { 'size' => 10, 'stuck_count' => 2 },
'ready' => { 'size' => 50 }
}
},
'errors' => {
'dlq_total_size' => 50,
'stuck_vm_count' => 2
}
}
end
it 'returns healthy when all metrics are within thresholds' do
status = subject.determine_health_status(base_metrics)
expect(status).to eq('healthy')
end
it 'returns degraded when DLQ size exceeds warning threshold' do
metrics = base_metrics.dup
metrics['errors']['dlq_total_size'] = 150
status = subject.determine_health_status(metrics)
expect(status).to eq('degraded')
end
it 'returns unhealthy when DLQ size exceeds critical threshold' do
metrics = base_metrics.dup
metrics['errors']['dlq_total_size'] = 1500
status = subject.determine_health_status(metrics)
expect(status).to eq('unhealthy')
end
it 'returns degraded when pending queue exceeds warning threshold' do
metrics = base_metrics.dup
metrics['queues']['test-pool']['pending']['size'] = 120
status = subject.determine_health_status(metrics)
expect(status).to eq('degraded')
end
it 'returns unhealthy when pending queue exceeds critical threshold' do
metrics = base_metrics.dup
metrics['queues']['test-pool']['pending']['size'] = 250
status = subject.determine_health_status(metrics)
expect(status).to eq('unhealthy')
end
it 'returns unhealthy when stuck VM count exceeds critical threshold' do
metrics = base_metrics.dup
metrics['errors']['stuck_vm_count'] = 60
status = subject.determine_health_status(metrics)
expect(status).to eq('unhealthy')
end
end
describe '#push_health_metrics' do
let(:metrics_data) do
{
'queues' => {
'test-pool' => {
'pending' => { 'size' => 10, 'oldest_age' => 3600, 'stuck_count' => 2 },
'ready' => { 'size' => 50, 'oldest_age' => 7200 },
'completed' => { 'size' => 5 }
}
},
'tasks' => {
'clone' => { 'active' => 3 },
'ondemand' => { 'active' => 2, 'pending' => 5 }
},
'errors' => {
'dlq_total_size' => 25,
'stuck_vm_count' => 2,
'orphaned_metadata_count' => 3
}
}
end
it 'pushes status metric' do
allow(metrics).to receive(:gauge)
expect(metrics).to receive(:gauge).with('vmpooler_health.status', 0)
subject.push_health_metrics(metrics_data, 'healthy')
end
it 'pushes error metrics' do
allow(metrics).to receive(:gauge)
expect(metrics).to receive(:gauge).with('vmpooler_health.dlq.total_size', 25)
expect(metrics).to receive(:gauge).with('vmpooler_health.stuck_vms.count', 2)
expect(metrics).to receive(:gauge).with('vmpooler_health.orphaned_metadata.count', 3)
subject.push_health_metrics(metrics_data, 'healthy')
end
it 'pushes per-pool queue metrics' do
allow(metrics).to receive(:gauge)
expect(metrics).to receive(:gauge).with('vmpooler_health.queue.test-pool.pending.size', 10)
expect(metrics).to receive(:gauge).with('vmpooler_health.queue.test-pool.pending.oldest_age', 3600)
expect(metrics).to receive(:gauge).with('vmpooler_health.queue.test-pool.pending.stuck_count', 2)
expect(metrics).to receive(:gauge).with('vmpooler_health.queue.test-pool.ready.size', 50)
subject.push_health_metrics(metrics_data, 'healthy')
end
it 'pushes task metrics' do
allow(metrics).to receive(:gauge)
expect(metrics).to receive(:gauge).with('vmpooler_health.tasks.clone.active', 3)
expect(metrics).to receive(:gauge).with('vmpooler_health.tasks.ondemand.active', 2)
expect(metrics).to receive(:gauge).with('vmpooler_health.tasks.ondemand.pending', 5)
subject.push_health_metrics(metrics_data, 'healthy')
end
end
end
end

View file

@ -1,11 +0,0 @@
require 'spec_helper'
# The only method previously tested here was '#get_domain_for_pool'
# which was moved to Vmpooler::Dns as the more appropriate class
#
# TODO: Add tests for last remaining method, or move to more appropriate class
describe 'Vmpooler::Parsing' do
let(:pool) { 'pool1' }
subject { Vmpooler::Parsing }
end

View file

@ -45,55 +45,10 @@ describe 'Vmpooler' do
end
context 'when config file is set' do
before(:each) do
ENV['VMPOOLER_CONFIG_FILE'] = config_file
end
it 'should use the file' do
ENV['VMPOOLER_CONFIG_FILE'] = config_file
expect(Vmpooler.config[:pools]).to eq(config[:pools])
end
it 'merges one extra file, results in two providers' do
ENV['EXTRA_CONFIG'] = File.join(fixtures_dir, 'extra_config1.yaml')
expect(Vmpooler.config[:providers].keys).to include(:dummy)
expect(Vmpooler.config[:providers].keys).to include(:alice)
end
it 'merges two extra file, results in three providers and an extra pool' do
extra1 = File.join(fixtures_dir, 'extra_config1.yaml')
extra2 = File.join(fixtures_dir, 'extra_config2.yaml')
ENV['EXTRA_CONFIG'] = "#{extra1},#{extra2}"
expect(Vmpooler.config[:providers].keys).to include(:dummy)
expect(Vmpooler.config[:providers].keys).to include(:alice)
expect(Vmpooler.config[:providers].keys).to include(:bob)
merged_pools = [{"name"=>"pool03", "provider"=>"dummy", "dns_plugin"=>"example", "ready_ttl"=>5, "size"=>5},
{"name"=>"pool04", "provider"=>"dummy", "dns_plugin"=>"example", "ready_ttl"=>5, "size"=>5},
{"name"=>"pool05", "provider"=>"dummy", "dns_plugin"=>"example", "ready_ttl"=>5, "size"=>5}]
expect(Vmpooler.config[:pools]).to eq(merged_pools)
expect(Vmpooler.config[:config]).not_to be_nil #merge does not deleted existing keys
end
end
context 'when domain' do
context 'is set as a variable' do
before(:each) do
ENV['VMPOOLER_CONFIG_FILE'] = config_file
ENV['DOMAIN'] = 'example.com'
end
it 'should exit' do
expect { Vmpooler.config }.to raise_error(SystemExit)
end
end
context 'is set in a config file' do
let(:config_file) { File.join(fixtures_dir, 'vmpooler_domain.yaml') }
before(:each) do
ENV['VMPOOLER_CONFIG_FILE'] = config_file
end
it 'should exit' do
expect { Vmpooler.config }.to raise_error(SystemExit)
end
end
end
end
end

View file

@ -1,7 +0,0 @@
#!/usr/bin/env bash
# The container tag should closely match what is used in `docker/Dockerfile` in vmpooler-deployment
docker run -it --rm \
-v $(pwd):/app \
jruby:9.4.12.1-jdk11 \
/bin/bash -c 'apt-get update -qq && apt-get install -y --no-install-recommends git make netbase build-essential && cd /app && gem install bundler && bundle install --jobs 3 && bundle update; echo "LOCK_FILE_UPDATE_EXIT_CODE=$?"'

View file

@ -11,40 +11,39 @@ Gem::Specification.new do |s|
s.summary = 'vmpooler provides configurable pools of instantly-available (running) virtual machines'
s.homepage = 'https://github.com/puppetlabs/vmpooler'
s.license = 'Apache-2.0'
s.required_ruby_version = Gem::Requirement.new('>= 2.3.0')
s.files = Dir[ "bin/*", "lib/**/*" ]
s.bindir = 'bin'
s.executables = 'vmpooler'
s.require_paths = ["lib"]
s.add_dependency 'concurrent-ruby', '~> 1.1'
s.add_dependency 'connection_pool', '~> 2.4'
s.add_dependency 'deep_merge', '~> 1.2'
s.add_dependency 'connection_pool', '~> 2.2'
s.add_dependency 'net-ldap', '~> 0.16'
s.add_dependency 'opentelemetry-exporter-jaeger', '= 0.23.0'
s.add_dependency 'opentelemetry-instrumentation-concurrent_ruby', '= 0.21.1'
s.add_dependency 'opentelemetry-instrumentation-http_client', '= 0.22.2'
s.add_dependency 'opentelemetry-instrumentation-rack', '= 0.23.4'
s.add_dependency 'opentelemetry-instrumentation-redis', '= 0.25.3'
s.add_dependency 'opentelemetry-instrumentation-sinatra', '= 0.23.2'
s.add_dependency 'opentelemetry-resource_detectors', '= 0.24.2'
s.add_dependency 'opentelemetry-sdk', '~> 1.8'
s.add_dependency 'nokogiri', '~> 1.10'
s.add_dependency 'opentelemetry-exporter-jaeger', '= 0.17.0'
s.add_dependency 'opentelemetry-instrumentation-concurrent_ruby', '= 0.17.0'
s.add_dependency 'opentelemetry-instrumentation-redis', '= 0.17.0'
s.add_dependency 'opentelemetry-instrumentation-sinatra', '= 0.17.0'
s.add_dependency 'opentelemetry-resource_detectors', '= 0.17.0'
s.add_dependency 'opentelemetry-sdk', '= 0.17.0'
s.add_dependency 'pickup', '~> 0.0.11'
s.add_dependency 'prometheus-client', '>= 2', '< 5'
s.add_dependency 'puma', '>= 5.0.4', '< 7'
s.add_dependency 'rack', '>= 2.2', '< 4.0'
s.add_dependency 'prometheus-client', '~> 2.0'
s.add_dependency 'puma', '~> 5.0', '>= 5.0.4'
s.add_dependency 'rack', '~> 2.2'
s.add_dependency 'rake', '~> 13.0'
s.add_dependency 'redis', '~> 5.0'
s.add_dependency 'sinatra', '>= 2', '< 4'
s.add_dependency 'redis', '~> 4.1'
s.add_dependency 'sinatra', '~> 2.0'
s.add_dependency 'spicy-proton', '~> 2.1'
s.add_dependency 'statsd-ruby', '~> 1.4'
# Testing dependencies
s.add_development_dependency 'climate_control', '>= 0.2.0'
s.add_development_dependency 'mock_redis', '= 0.37.0'
s.add_development_dependency 'mock_redis', '>= 0.17.0'
s.add_development_dependency 'pry'
s.add_development_dependency 'rack-test', '>= 0.6'
s.add_development_dependency 'rspec', '>= 3.2'
s.add_development_dependency 'rubocop', '~> 1.56.0'
s.add_development_dependency 'rubocop', '~> 1.1.0'
s.add_development_dependency 'simplecov', '>= 0.11.2'
s.add_development_dependency 'thor', '~> 1.0', '>= 1.0.1'
s.add_development_dependency 'yarjuf', '>= 2.0'

View file

@ -3,12 +3,6 @@
:dummy:
filename: '/tmp/dummy-backing.yaml'
:dns_configs:
:example:
dns_class: dynamic-dns
#domain: 'localhost' # Flip these out for local requests
domain: 'example.com'
:redis:
server: 'localhost'
@ -23,13 +17,14 @@
logfile: '/var/log/vmpooler.log'
task_limit: 10
timeout: 15
timeout_notification: 5
vm_checktime: 1
vm_lifetime: 12
vm_lifetime_auth: 24
allowed_tags:
- 'created_by'
- 'project'
domain: 'example.com'
#domain: 'localhost' # Flip these out for local requests
prefix: 'poolvm-'
:pools:
@ -40,10 +35,8 @@
datastore: 'vmstorage'
size: 5
timeout: 15
timeout_notification: 5
ready_ttl: 1440
provider: dummy
dns_plugin: 'example'
- name: 'debian-7-x86_64'
alias: [ 'debian-7-64', 'debian-7-amd64' ]
template: 'Templates/debian-7-x86_64'
@ -51,7 +44,6 @@
datastore: 'vmstorage'
size: 5
timeout: 15
timeout_notification: 5
ready_ttl: 1440
provider: dummy
dns_plugin: 'example'

View file

@ -28,12 +28,6 @@
# Expects an array of strings specifying the allowed items by name
# (optional; default: nil)
#
# - skip_dns_check_before_creating_vm
# Setting this configuration parameter in a provider will make vmpooler skip the check it normally does for a DNS
# record. The normal behavior is to check DNS and if a record already exists (conflict) to re-generate a new hostname.
# By using this configuration parameter, it will skip the check and continue with the same conflicting name. This is
# useful for providers that can handle that case, for instance by replacing the existing DNS record with a new one.
#
# If you want to support more than one provider with different parameters (server, username or passwords) you have to specify the
# backing service in the provider_class configuration parameter for example 'vsphere' or 'dummy'. Each pool can specify
# the provider to use.
@ -367,15 +361,6 @@
# - user_object
# The LDAP object-type used to designate a user object.
#
# - service_account_hash
# A hash containing the following parameters for a service account to perform the
# initial bind. After the initial bind, then a search query is performed using the
# 'base' and 'user_object', then re-binds as the returned user.
# - :user_dn
# The full distinguished name (DN) of the service account used to bind.
# - :password
# The password for the service account used to bind.
#
# Example:
# :auth:
# provider: 'ldap'
@ -384,23 +369,6 @@
# port: 389
# base: 'ou=users,dc=company,dc=com'
# user_object: 'uid'
#
# :auth:
# provider: 'ldap'
# :ldap:
# host: 'ldap.example.com'
# port: 636
# service_account_hash:
# :user_dn: 'cn=Service Account,ou=Accounts,dc=ldap,dc=example,dc=com'
# :password: 'service-account-password'
# encryption:
# :method: :simple_tls
# :tls_options:
# :ssl_version: 'TLSv1_2'
# base:
# - 'ou=Accounts,dc=company,dc=com'
# user_object:
# - 'samAccountName'
:auth:
provider: 'ldap'
@ -456,12 +424,6 @@
# How long (in minutes) before marking a clone in 'pending' queues as 'failed' and retrying.
# (default: 15)
#
# - max_vm_retries
# Maximum number of times to retry VM creation for a failed request before marking it as permanently failed.
# This helps prevent infinite retry loops when there are configuration issues like invalid template paths.
# Permanent errors (like invalid template paths) are detected and will not be retried.
# (default: 3)
#
# - vm_checktime
# How often (in minutes) to check the sanity of VMs in 'ready' queues.
# (default: 1)
@ -477,6 +439,9 @@
# - allowed_tags
# If set, restricts tags to those specified in this array.
#
# - domain
# If set, returns a top-level 'domain' JSON key in POST requests
#
# - prefix
# If set, prefixes all created VMs with this string. This should include
# a separator.
@ -621,43 +586,19 @@
logfile: '/var/log/vmpooler.log'
task_limit: 10
timeout: 15
timeout_notification: 5
vm_checktime: 1
vm_lifetime: 12
vm_lifetime_auth: 24
max_vm_retries: 3
allowed_tags:
- 'created_by'
- 'project'
domain: 'example.com'
prefix: 'poolvm-'
experimental_features: true
backend_weight:
'backend1': 60
'backend2': 40
# :dns_configs:
#
# This section a list of dns configurations to be referenced by one or more pools.
#
# The currently supported backing services are:
# - dynamic-dns (This assumes that dynamic dns is handling record management and VMPooler does not require interaction)
# - gcp (Google Cloud DNS https://github.com/puppetlabs/vmpooler-dns-gcp)
#
# - dns_class
# Specify one of the supported backing services.
#
# - domain
# The domain expected to make up the FQDN when attempting to resolve VM instances.
#
# See DNS plugin docs for additional options specific to that class.
#
# Example
:dns_configs:
:example:
dns_class: dynamic-dns
domain: 'example.com'
# :pools:
#
# This section contains a list of virtual machine 'pools' for vmpooler to
@ -688,12 +629,6 @@
# If you have more than one provider, this is where you would choose which
# one to use for this pool
#
# - dns_plugin
# The name of the DNS plugin to use with this pool in order to determine the
# domain and settings specific to a DNS service. This should match
# a name in the :dns_configs: section above. e.g. example
# (required)
#
# - clone_target
# Per-pool option to override the global 'clone_target' cluster.
# (optional)
@ -745,7 +680,6 @@
datastore: 'vmstorage'
size: 5
timeout: 15
timeout_notification: 5
ready_ttl: 1440
provider: vsphere
create_linked_clone: true
@ -756,7 +690,6 @@
datastore: 'vmstorage'
size: 5
timeout: 15
timeout_notification: 5
ready_ttl: 1440
provider: vsphere
create_linked_clone: false

View file

@ -1,92 +0,0 @@
---
# VMPooler Configuration Example with Dead-Letter Queue, Auto-Purge, and Health Checks
# Redis Configuration
:redis:
server: 'localhost'
port: 6379
data_ttl: 168 # hours - how long to keep VM metadata in Redis
# Dead-Letter Queue (DLQ) Configuration
dlq_enabled: true
dlq_ttl: 168 # hours (7 days) - how long to keep DLQ entries
dlq_max_entries: 10000 # maximum entries per DLQ queue before trimming
# Application Configuration
:config:
# ... other existing config ...
# Dead-Letter Queue (DLQ) - Optional, defaults shown
dlq_enabled: false # Set to true to enable DLQ
dlq_ttl: 168 # hours (7 days)
dlq_max_entries: 10000 # per DLQ queue
# Auto-Purge Stale Queue Entries
purge_enabled: false # Set to true to enable auto-purge
purge_interval: 3600 # seconds (1 hour) - how often to run purge cycle
purge_dry_run: false # Set to true to log what would be purged without actually purging
# Auto-Purge Age Thresholds (in seconds)
max_pending_age: 7200 # 2 hours - VMs stuck in pending
max_ready_age: 86400 # 24 hours - VMs idle in ready queue
max_completed_age: 3600 # 1 hour - VMs in completed queue
max_orphaned_age: 86400 # 24 hours - orphaned VM metadata
max_request_age: 86400 # 24 hours - stale on-demand requests
# Health Checks
health_check_enabled: false # Set to true to enable health checks
health_check_interval: 300 # seconds (5 minutes) - how often to run health checks
# Health Check Thresholds
health_thresholds:
pending_queue_max: 100 # Warning threshold for pending queue size
ready_queue_max: 500 # Warning threshold for ready queue size
dlq_max_warning: 100 # Warning threshold for DLQ size
dlq_max_critical: 1000 # Critical threshold for DLQ size
stuck_vm_age_threshold: 7200 # 2 hours - age at which VM is considered "stuck"
stuck_vm_max_warning: 10 # Warning threshold for stuck VM count
stuck_vm_max_critical: 50 # Critical threshold for stuck VM count
# Pool Configuration
:pools:
- name: 'centos-7-x86_64'
size: 5
provider: 'vsphere'
# ... other pool settings ...
# Provider Configuration
:providers:
:vsphere:
server: 'vcenter.example.com'
username: 'vmpooler'
password: 'secret'
# ... other provider settings ...
# Example: Production Configuration
# For production use, you might want:
# :config:
# dlq_enabled: true
# dlq_ttl: 168 # Keep failed VMs for a week
#
# purge_enabled: true
# purge_interval: 1800 # Run every 30 minutes
# purge_dry_run: false
# max_pending_age: 3600 # Purge pending VMs after 1 hour
# max_ready_age: 172800 # Purge ready VMs after 2 days
#
# health_check_enabled: true
# health_check_interval: 300 # Check every 5 minutes
# Example: Development Configuration
# For development/testing, you might want:
# :config:
# dlq_enabled: true
# dlq_ttl: 24 # Keep failed VMs for a day
#
# purge_enabled: true
# purge_interval: 600 # Run every 10 minutes
# purge_dry_run: true # Test mode - log but don't actually purge
# max_pending_age: 1800 # More aggressive - 30 minutes
#
# health_check_enabled: true
# health_check_interval: 60 # Check every minute