add collections

This commit is contained in:
Gary Kwok
2024-02-01 11:40:42 +08:00
parent 0715d7c475
commit 7af4c56c96
3198 changed files with 455118 additions and 0 deletions

View File

@@ -0,0 +1,287 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2020 Orion Poplawski <orion@nwra.com>
# Copyright (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Orion Poplawski (@opoplawski)
name: cobbler
short_description: Cobbler inventory source
version_added: 1.0.0
description:
- Get inventory hosts from the cobbler service.
- "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and has a C(plugin: cobbler) entry."
extends_documentation_fragment:
- inventory_cache
options:
plugin:
description: The name of this plugin, it should always be set to C(community.general.cobbler) for this plugin to recognize it as it's own.
required: true
choices: [ 'cobbler', 'community.general.cobbler' ]
url:
description: URL to cobbler.
default: 'http://cobbler/cobbler_api'
env:
- name: COBBLER_SERVER
user:
description: Cobbler authentication user.
required: false
env:
- name: COBBLER_USER
password:
description: Cobbler authentication password
required: false
env:
- name: COBBLER_PASSWORD
cache_fallback:
description: Fallback to cached results if connection to cobbler fails
type: boolean
default: false
exclude_profiles:
description:
- Profiles to exclude from inventory.
- Ignored if I(include_profiles) is specified.
type: list
default: []
elements: str
include_profiles:
description:
- Profiles to include from inventory.
- If specified, all other profiles will be excluded.
- I(exclude_profiles) is ignored if I(include_profiles) is specified.
type: list
default: []
elements: str
version_added: 4.4.0
group_by:
description: Keys to group hosts by
type: list
elements: string
default: [ 'mgmt_classes', 'owners', 'status' ]
group:
description: Group to place all hosts into
default: cobbler
group_prefix:
description: Prefix to apply to cobbler groups
default: cobbler_
want_facts:
description: Toggle, if C(true) the plugin will retrieve host facts from the server
type: boolean
default: true
'''
EXAMPLES = '''
# my.cobbler.yml
plugin: community.general.cobbler
url: http://cobbler/cobbler_api
user: ansible-tester
password: secure
'''
import socket
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.six import iteritems
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name
# xmlrpc
try:
import xmlrpclib as xmlrpc_client
HAS_XMLRPC_CLIENT = True
except ImportError:
try:
import xmlrpc.client as xmlrpc_client
HAS_XMLRPC_CLIENT = True
except ImportError:
HAS_XMLRPC_CLIENT = False
class InventoryModule(BaseInventoryPlugin, Cacheable):
''' Host inventory parser for ansible using cobbler as source. '''
NAME = 'community.general.cobbler'
def __init__(self):
super(InventoryModule, self).__init__()
self.cache_key = None
self.connection = None
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
if path.endswith(('cobbler.yaml', 'cobbler.yml')):
valid = True
else:
self.display.vvv('Skipping due to inventory source not ending in "cobbler.yaml" nor "cobbler.yml"')
return valid
def _get_connection(self):
if not HAS_XMLRPC_CLIENT:
raise AnsibleError('Could not import xmlrpc client library')
if self.connection is None:
self.display.vvvv('Connecting to %s\n' % self.cobbler_url)
self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True)
self.token = None
if self.get_option('user') is not None:
self.token = self.connection.login(self.get_option('user'), self.get_option('password'))
return self.connection
def _init_cache(self):
if self.cache_key not in self._cache:
self._cache[self.cache_key] = {}
def _reload_cache(self):
if self.get_option('cache_fallback'):
self.display.vvv('Cannot connect to server, loading cache\n')
self._options['cache_timeout'] = 0
self.load_cache_plugin()
self._cache.get(self.cache_key, {})
def _get_profiles(self):
if not self.use_cache or 'profiles' not in self._cache.get(self.cache_key, {}):
c = self._get_connection()
try:
if self.token is not None:
data = c.get_profiles(self.token)
else:
data = c.get_profiles()
except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
self._reload_cache()
else:
self._init_cache()
self._cache[self.cache_key]['profiles'] = data
return self._cache[self.cache_key]['profiles']
def _get_systems(self):
if not self.use_cache or 'systems' not in self._cache.get(self.cache_key, {}):
c = self._get_connection()
try:
if self.token is not None:
data = c.get_systems(self.token)
else:
data = c.get_systems()
except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
self._reload_cache()
else:
self._init_cache()
self._cache[self.cache_key]['systems'] = data
return self._cache[self.cache_key]['systems']
def _add_safe_group_name(self, group, child=None):
group_name = self.inventory.add_group(to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group.lower().replace(" ", ""))))
if child is not None:
self.inventory.add_child(group_name, child)
return group_name
def _exclude_profile(self, profile):
if self.include_profiles:
return profile not in self.include_profiles
else:
return profile in self.exclude_profiles
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
# read config from file, this sets 'options'
self._read_config_data(path)
# get connection host
self.cobbler_url = self.get_option('url')
self.cache_key = self.get_cache_key(path)
self.use_cache = cache and self.get_option('cache')
self.exclude_profiles = self.get_option('exclude_profiles')
self.include_profiles = self.get_option('include_profiles')
self.group_by = self.get_option('group_by')
for profile in self._get_profiles():
if profile['parent']:
self.display.vvvv('Processing profile %s with parent %s\n' % (profile['name'], profile['parent']))
if not self._exclude_profile(profile['parent']):
parent_group_name = self._add_safe_group_name(profile['parent'])
self.display.vvvv('Added profile parent group %s\n' % parent_group_name)
if not self._exclude_profile(profile['name']):
group_name = self._add_safe_group_name(profile['name'])
self.display.vvvv('Added profile group %s\n' % group_name)
self.inventory.add_child(parent_group_name, group_name)
else:
self.display.vvvv('Processing profile %s without parent\n' % profile['name'])
# Create a hierarchy of profile names
profile_elements = profile['name'].split('-')
i = 0
while i < len(profile_elements) - 1:
profile_group = '-'.join(profile_elements[0:i + 1])
profile_group_child = '-'.join(profile_elements[0:i + 2])
if self._exclude_profile(profile_group):
self.display.vvvv('Excluding profile %s\n' % profile_group)
break
group_name = self._add_safe_group_name(profile_group)
self.display.vvvv('Added profile group %s\n' % group_name)
child_group_name = self._add_safe_group_name(profile_group_child)
self.display.vvvv('Added profile child group %s to %s\n' % (child_group_name, group_name))
self.inventory.add_child(group_name, child_group_name)
i = i + 1
# Add default group for this inventory if specified
self.group = to_safe_group_name(self.get_option('group'))
if self.group is not None and self.group != '':
self.inventory.add_group(self.group)
self.display.vvvv('Added site group %s\n' % self.group)
for host in self._get_systems():
# Get the FQDN for the host and add it to the right groups
hostname = host['hostname'] # None
interfaces = host['interfaces']
if self._exclude_profile(host['profile']):
self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile']))
continue
# hostname is often empty for non-static IP hosts
if hostname == '':
for (iname, ivalue) in iteritems(interfaces):
if ivalue['management'] or not ivalue['static']:
this_dns_name = ivalue.get('dns_name', None)
if this_dns_name is not None and this_dns_name != "":
hostname = this_dns_name
self.display.vvvv('Set hostname to %s from %s\n' % (hostname, iname))
if hostname == '':
self.display.vvvv('Cannot determine hostname for host %s, skipping\n' % host['name'])
continue
self.inventory.add_host(hostname)
self.display.vvvv('Added host %s hostname %s\n' % (host['name'], hostname))
# Add host to profile group
group_name = self._add_safe_group_name(host['profile'], child=hostname)
self.display.vvvv('Added host %s to profile group %s\n' % (hostname, group_name))
# Add host to groups specified by group_by fields
for group_by in self.group_by:
if host[group_by] == '<<inherit>>':
groups = []
else:
groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by]
for group in groups:
group_name = self._add_safe_group_name(group, child=hostname)
self.display.vvvv('Added host %s to group_by %s group %s\n' % (hostname, group_by, group_name))
# Add to group for this inventory
if self.group is not None:
self.inventory.add_child(self.group, hostname)
# Add host variables
if self.get_option('want_facts'):
try:
self.inventory.set_variable(hostname, 'cobbler', host)
except ValueError as e:
self.display.warning("Could not set host info for %s: %s" % (hostname, to_text(e)))

View File

@@ -0,0 +1,139 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: gitlab_runners
author:
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
short_description: Ansible dynamic inventory plugin for GitLab runners.
requirements:
- python >= 2.7
- python-gitlab > 1.8.0
extends_documentation_fragment:
- constructed
description:
- Reads inventories from the GitLab API.
- Uses a YAML configuration file gitlab_runners.[yml|yaml].
options:
plugin:
description: The name of this plugin, it should always be set to 'gitlab_runners' for this plugin to recognize it as it's own.
type: str
required: true
choices:
- gitlab_runners
- community.general.gitlab_runners
server_url:
description: The URL of the GitLab server, with protocol (i.e. http or https).
env:
- name: GITLAB_SERVER_URL
version_added: 1.0.0
type: str
required: true
api_token:
description: GitLab token for logging in.
env:
- name: GITLAB_API_TOKEN
version_added: 1.0.0
type: str
aliases:
- private_token
- access_token
filter:
description: filter runners from GitLab API
env:
- name: GITLAB_FILTER
version_added: 1.0.0
type: str
choices: ['active', 'paused', 'online', 'specific', 'shared']
verbose_output:
description: Toggle to (not) include all available nodes metadata
type: bool
default: true
'''
EXAMPLES = '''
# gitlab_runners.yml
plugin: community.general.gitlab_runners
host: https://gitlab.com
# Example using constructed features to create groups and set ansible_host
plugin: community.general.gitlab_runners
host: https://gitlab.com
strict: false
keyed_groups:
# add e.g. amd64 hosts to an arch_amd64 group
- prefix: arch
key: 'architecture'
# add e.g. linux hosts to an os_linux group
- prefix: os
key: 'platform'
# create a group per runner tag
# e.g. a runner tagged w/ "production" ends up in group "label_production"
# hint: labels containing special characters will be converted to safe names
- key: 'tag_list'
prefix: tag
'''
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.common.text.converters import to_native
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
try:
import gitlab
HAS_GITLAB = True
except ImportError:
HAS_GITLAB = False
class InventoryModule(BaseInventoryPlugin, Constructable):
''' Host inventory parser for ansible using GitLab API as source. '''
NAME = 'community.general.gitlab_runners'
def _populate(self):
gl = gitlab.Gitlab(self.get_option('server_url'), private_token=self.get_option('api_token'))
self.inventory.add_group('gitlab_runners')
try:
if self.get_option('filter'):
runners = gl.runners.all(scope=self.get_option('filter'))
else:
runners = gl.runners.all()
for runner in runners:
host = str(runner['id'])
ip_address = runner['ip_address']
host_attrs = vars(gl.runners.get(runner['id']))['_attrs']
self.inventory.add_host(host, group='gitlab_runners')
self.inventory.set_variable(host, 'ansible_host', ip_address)
if self.get_option('verbose_output', True):
self.inventory.set_variable(host, 'gitlab_runner_attributes', host_attrs)
# Use constructed if applicable
strict = self.get_option('strict')
# Composed variables
self._set_composite_vars(self.get_option('compose'), host_attrs, host, strict=strict)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(self.get_option('groups'), host_attrs, host, strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_attrs, host, strict=strict)
except Exception as e:
raise AnsibleParserError('Unable to fetch hosts from GitLab API, this was the original exception: %s' % to_native(e))
def verify_file(self, path):
"""Return the possibly of a file being consumable by this plugin."""
return (
super(InventoryModule, self).verify_file(path) and
path.endswith(("gitlab_runners.yaml", "gitlab_runners.yml")))
def parse(self, inventory, loader, path, cache=True):
if not HAS_GITLAB:
raise AnsibleError('The GitLab runners dynamic inventory plugin requires python-gitlab: https://python-gitlab.readthedocs.io/en/stable/')
super(InventoryModule, self).parse(inventory, loader, path, cache)
self._read_config_data(path)
self._populate()

View File

@@ -0,0 +1,294 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Cliff Hults <cliff.hlts@gmail.com>
# Copyright (c) 2021 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: icinga2
short_description: Icinga2 inventory source
version_added: 3.7.0
author:
- Cliff Hults (@BongoEADGC6) <cliff.hults@gmail.com>
description:
- Get inventory hosts from the Icinga2 API.
- "Uses a configuration file as an inventory source, it must end in
C(.icinga2.yml) or C(.icinga2.yaml)."
extends_documentation_fragment:
- constructed
options:
strict:
version_added: 4.4.0
compose:
version_added: 4.4.0
groups:
version_added: 4.4.0
keyed_groups:
version_added: 4.4.0
plugin:
description: Name of the plugin.
required: true
type: string
choices: ['community.general.icinga2']
url:
description: Root URL of Icinga2 API.
type: string
required: true
user:
description: Username to query the API.
type: string
required: true
password:
description: Password to query the API.
type: string
required: true
host_filter:
description:
- An Icinga2 API valid host filter. Leave blank for no filtering
type: string
required: false
validate_certs:
description: Enables or disables SSL certificate verification.
type: boolean
default: true
inventory_attr:
description:
- Allows the override of the inventory name based on different attributes.
- This allows for changing the way limits are used.
- The current default, C(address), is sometimes not unique or present. We recommend to use C(name) instead.
type: string
default: address
choices: ['name', 'display_name', 'address']
version_added: 4.2.0
'''
EXAMPLES = r'''
# my.icinga2.yml
plugin: community.general.icinga2
url: http://localhost:5665
user: ansible
password: secure
host_filter: \"linux-servers\" in host.groups
validate_certs: false
inventory_attr: name
groups:
# simple name matching
webservers: inventory_hostname.startswith('web')
# using icinga2 template
databaseservers: "'db-template' in (icinga2_attributes.templates|list)"
compose:
# set all icinga2 attributes to a host variable 'icinga2_attrs'
icinga2_attrs: icinga2_attributes
# set 'ansible_user' and 'ansible_port' from icinga2 host vars
ansible_user: icinga2_attributes.vars.ansible_user
ansible_port: icinga2_attributes.vars.ansible_port | default(22)
'''
import json
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
class InventoryModule(BaseInventoryPlugin, Constructable):
''' Host inventory parser for ansible using Icinga2 as source. '''
NAME = 'community.general.icinga2'
def __init__(self):
super(InventoryModule, self).__init__()
# from config
self.icinga2_url = None
self.icinga2_user = None
self.icinga2_password = None
self.ssl_verify = None
self.host_filter = None
self.inventory_attr = None
self.cache_key = None
self.use_cache = None
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
if path.endswith(('icinga2.yaml', 'icinga2.yml')):
valid = True
else:
self.display.vvv('Skipping due to inventory source not ending in "icinga2.yaml" nor "icinga2.yml"')
return valid
def _api_connect(self):
self.headers = {
'User-Agent': "ansible-icinga2-inv",
'Accept': "application/json",
}
api_status_url = self.icinga2_url + "/status"
request_args = {
'headers': self.headers,
'url_username': self.icinga2_user,
'url_password': self.icinga2_password,
'validate_certs': self.ssl_verify
}
open_url(api_status_url, **request_args)
def _post_request(self, request_url, data=None):
self.display.vvv("Requested URL: %s" % request_url)
request_args = {
'headers': self.headers,
'url_username': self.icinga2_user,
'url_password': self.icinga2_password,
'validate_certs': self.ssl_verify
}
if data is not None:
request_args['data'] = json.dumps(data)
self.display.vvv("Request Args: %s" % request_args)
try:
response = open_url(request_url, **request_args)
except HTTPError as e:
try:
error_body = json.loads(e.read().decode())
self.display.vvv("Error returned: {0}".format(error_body))
except Exception:
error_body = {"status": None}
if e.code == 404 and error_body.get('status') == "No objects found.":
raise AnsibleParserError("Host filter returned no data. Please confirm your host_filter value is valid")
raise AnsibleParserError("Unexpected data returned: {0} -- {1}".format(e, error_body))
response_body = response.read()
json_data = json.loads(response_body.decode('utf-8'))
self.display.vvv("Returned Data: %s" % json.dumps(json_data, indent=4, sort_keys=True))
if 200 <= response.status <= 299:
return json_data
if response.status == 404 and json_data['status'] == "No objects found.":
raise AnsibleParserError(
"API returned no data -- Response: %s - %s"
% (response.status, json_data['status']))
if response.status == 401:
raise AnsibleParserError(
"API was unable to complete query -- Response: %s - %s"
% (response.status, json_data['status']))
if response.status == 500:
raise AnsibleParserError(
"API Response - %s - %s"
% (json_data['status'], json_data['errors']))
raise AnsibleParserError(
"Unexpected data returned - %s - %s"
% (json_data['status'], json_data['errors']))
def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None):
query_hosts_url = "{0}/objects/hosts".format(self.icinga2_url)
self.headers['X-HTTP-Method-Override'] = 'GET'
data_dict = dict()
if hosts:
data_dict['hosts'] = hosts
if attrs is not None:
data_dict['attrs'] = attrs
if joins is not None:
data_dict['joins'] = joins
if host_filter is not None:
data_dict['filter'] = host_filter.replace("\\\"", "\"")
self.display.vvv(host_filter)
host_dict = self._post_request(query_hosts_url, data_dict)
return host_dict['results']
def get_inventory_from_icinga(self):
"""Query for all hosts """
self.display.vvv("Querying Icinga2 for inventory")
query_args = {
"attrs": ["address", "address6", "name", "display_name", "state_type", "state", "templates", "groups", "vars", "zone"],
}
if self.host_filter is not None:
query_args['host_filter'] = self.host_filter
# Icinga2 API Call
results_json = self._query_hosts(**query_args)
# Manipulate returned API data to Ansible inventory spec
ansible_inv = self._convert_inv(results_json)
return ansible_inv
def _apply_constructable(self, name, variables):
strict = self.get_option('strict')
self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict)
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict)
self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict)
def _populate(self):
groups = self._to_json(self.get_inventory_from_icinga())
return groups
def _to_json(self, in_dict):
"""Convert dictionary to JSON"""
return json.dumps(in_dict, sort_keys=True, indent=2)
def _convert_inv(self, json_data):
"""Convert Icinga2 API data to JSON format for Ansible"""
groups_dict = {"_meta": {"hostvars": {}}}
for entry in json_data:
host_attrs = entry['attrs']
if self.inventory_attr == "name":
host_name = entry.get('name')
if self.inventory_attr == "address":
# When looking for address for inventory, if missing fallback to object name
if host_attrs.get('address', '') != '':
host_name = host_attrs.get('address')
else:
host_name = entry.get('name')
if self.inventory_attr == "display_name":
host_name = host_attrs.get('display_name')
if host_attrs['state'] == 0:
host_attrs['state'] = 'on'
else:
host_attrs['state'] = 'off'
host_groups = host_attrs.get('groups')
self.inventory.add_host(host_name)
for group in host_groups:
if group not in self.inventory.groups.keys():
self.inventory.add_group(group)
self.inventory.add_child(group, host_name)
# If the address attribute is populated, override ansible_host with the value
if host_attrs.get('address') != '':
self.inventory.set_variable(host_name, 'ansible_host', host_attrs.get('address'))
self.inventory.set_variable(host_name, 'hostname', entry.get('name'))
self.inventory.set_variable(host_name, 'display_name', host_attrs.get('display_name'))
self.inventory.set_variable(host_name, 'state',
host_attrs['state'])
self.inventory.set_variable(host_name, 'state_type',
host_attrs['state_type'])
# Adds all attributes to a variable 'icinga2_attributes'
construct_vars = dict(self.inventory.get_host(host_name).get_vars())
construct_vars['icinga2_attributes'] = host_attrs
self._apply_constructable(host_name, construct_vars)
return groups_dict
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
# read config from file, this sets 'options'
self._read_config_data(path)
# Store the options from the YAML file
self.icinga2_url = self.get_option('url').rstrip('/') + '/v1'
self.icinga2_user = self.get_option('user')
self.icinga2_password = self.get_option('password')
self.ssl_verify = self.get_option('validate_certs')
self.host_filter = self.get_option('host_filter')
self.inventory_attr = self.get_option('inventory_attr')
# Not currently enabled
# self.cache_key = self.get_cache_key(path)
# self.use_cache = cache and self.get_option('cache')
# Test connection to API
self._api_connect()
# Call our internal helper to populate the dynamic inventory
self._populate()

View File

@@ -0,0 +1,316 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
name: linode
author:
- Luke Murphy (@decentral1se)
short_description: Ansible dynamic inventory plugin for Linode.
requirements:
- python >= 2.7
- linode_api4 >= 2.0.0
description:
- Reads inventories from the Linode API v4.
- Uses a YAML configuration file that ends with linode.(yml|yaml).
- Linode labels are used by default as the hostnames.
- The default inventory groups are built from groups (deprecated by
Linode) and not tags.
extends_documentation_fragment:
- constructed
- inventory_cache
options:
cache:
version_added: 4.5.0
cache_plugin:
version_added: 4.5.0
cache_timeout:
version_added: 4.5.0
cache_connection:
version_added: 4.5.0
cache_prefix:
version_added: 4.5.0
plugin:
description: Marks this as an instance of the 'linode' plugin.
required: true
choices: ['linode', 'community.general.linode']
ip_style:
description: Populate hostvars with all information available from the Linode APIv4.
type: string
default: plain
choices:
- plain
- api
version_added: 3.6.0
access_token:
description: The Linode account personal access token.
required: true
env:
- name: LINODE_ACCESS_TOKEN
regions:
description: Populate inventory with instances in this region.
default: []
type: list
elements: string
tags:
description: Populate inventory only with instances which have at least one of the tags listed here.
default: []
type: list
elements: string
version_added: 2.0.0
types:
description: Populate inventory with instances with this type.
default: []
type: list
elements: string
strict:
version_added: 2.0.0
compose:
version_added: 2.0.0
groups:
version_added: 2.0.0
keyed_groups:
version_added: 2.0.0
'''
EXAMPLES = r'''
# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment.
plugin: community.general.linode
# You can use Jinja to template the access token.
plugin: community.general.linode
access_token: "{{ lookup('ini', 'token', section='your_username', file='~/.config/linode-cli') }}"
# For older Ansible versions, you need to write this as:
# access_token: "{{ lookup('ini', 'token section=your_username file=~/.config/linode-cli') }}"
# Example with regions, types, groups and access token
plugin: community.general.linode
access_token: foobar
regions:
- eu-west
types:
- g5-standard-2
# Example with keyed_groups, groups, and compose
plugin: community.general.linode
access_token: foobar
keyed_groups:
- key: tags
separator: ''
- key: region
prefix: region
groups:
webservers: "'web' in (tags|list)"
mailservers: "'mail' in (tags|list)"
compose:
# By default, Ansible tries to connect to the label of the instance.
# Since that might not be a valid name to connect to, you can
# replace it with the first IPv4 address of the linode as follows:
ansible_ssh_host: ipv4[0]
ansible_port: 2222
# Example where control traffic limited to internal network
plugin: community.general.linode
access_token: foobar
ip_style: api
compose:
ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first"
'''
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.six import string_types
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
try:
from linode_api4 import LinodeClient
from linode_api4.objects.linode import Instance
from linode_api4.errors import ApiError as LinodeApiError
HAS_LINODE = True
except ImportError:
HAS_LINODE = False
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
NAME = 'community.general.linode'
def _build_client(self, loader):
"""Build the Linode client."""
access_token = self.get_option('access_token')
if self.templar.is_template(access_token):
access_token = self.templar.template(variable=access_token, disable_lookups=False)
if access_token is None:
raise AnsibleError((
'Could not retrieve Linode access token '
'from plugin configuration sources'
))
self.client = LinodeClient(access_token)
def _get_instances_inventory(self):
"""Retrieve Linode instance information from cloud inventory."""
try:
self.instances = self.client.linode.instances()
except LinodeApiError as exception:
raise AnsibleError('Linode client raised: %s' % exception)
def _add_groups(self):
"""Add Linode instance groups to the dynamic inventory."""
self.linode_groups = set(
filter(None, [
instance.group
for instance
in self.instances
])
)
for linode_group in self.linode_groups:
self.inventory.add_group(linode_group)
def _filter_by_config(self):
"""Filter instances by user specified configuration."""
regions = self.get_option('regions')
if regions:
self.instances = [
instance for instance in self.instances
if instance.region.id in regions
]
types = self.get_option('types')
if types:
self.instances = [
instance for instance in self.instances
if instance.type.id in types
]
tags = self.get_option('tags')
if tags:
self.instances = [
instance for instance in self.instances
if any(tag in instance.tags for tag in tags)
]
def _add_instances_to_groups(self):
"""Add instance names to their dynamic inventory groups."""
for instance in self.instances:
self.inventory.add_host(instance.label, group=instance.group)
def _add_hostvars_for_instances(self):
"""Add hostvars for instances in the dynamic inventory."""
ip_style = self.get_option('ip_style')
for instance in self.instances:
hostvars = instance._raw_json
for hostvar_key in hostvars:
if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']:
continue
self.inventory.set_variable(
instance.label,
hostvar_key,
hostvars[hostvar_key]
)
if ip_style == 'api':
ips = instance.ips.ipv4.public + instance.ips.ipv4.private
ips += [instance.ips.ipv6.slaac, instance.ips.ipv6.link_local]
ips += instance.ips.ipv6.pools
for ip_type in set(ip.type for ip in ips):
self.inventory.set_variable(
instance.label,
ip_type,
self._ip_data([ip for ip in ips if ip.type == ip_type])
)
def _ip_data(self, ip_list):
data = []
for ip in list(ip_list):
data.append(
{
'address': ip.address,
'subnet_mask': ip.subnet_mask,
'gateway': ip.gateway,
'public': ip.public,
'prefix': ip.prefix,
'rdns': ip.rdns,
'type': ip.type
}
)
return data
def _cacheable_inventory(self):
return [i._raw_json for i in self.instances]
def populate(self):
strict = self.get_option('strict')
self._filter_by_config()
self._add_groups()
self._add_instances_to_groups()
self._add_hostvars_for_instances()
for instance in self.instances:
variables = self.inventory.get_host(instance.label).get_vars()
self._add_host_to_composed_groups(
self.get_option('groups'),
variables,
instance.label,
strict=strict)
self._add_host_to_keyed_groups(
self.get_option('keyed_groups'),
variables,
instance.label,
strict=strict)
self._set_composite_vars(
self.get_option('compose'),
variables,
instance.label,
strict=strict)
def verify_file(self, path):
"""Verify the Linode configuration file."""
if super(InventoryModule, self).verify_file(path):
endings = ('linode.yaml', 'linode.yml')
if any((path.endswith(ending) for ending in endings)):
return True
return False
def parse(self, inventory, loader, path, cache=True):
"""Dynamically parse Linode the cloud inventory."""
super(InventoryModule, self).parse(inventory, loader, path)
self.instances = None
if not HAS_LINODE:
raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.')
self._read_config_data(path)
cache_key = self.get_cache_key(path)
if cache:
cache = self.get_option('cache')
update_cache = False
if cache:
try:
self.instances = [Instance(None, i["id"], i) for i in self._cache[cache_key]]
except KeyError:
update_cache = True
# Check for None rather than False in order to allow
# for empty sets of cached instances
if self.instances is None:
self._build_client(loader)
self._get_instances_inventory()
if update_cache:
self._cache[cache_key] = self._cacheable_inventory()
self.populate()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,232 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: nmap
short_description: Uses nmap to find hosts to target
description:
- Uses a YAML configuration file with a valid YAML extension.
extends_documentation_fragment:
- constructed
- inventory_cache
requirements:
- nmap CLI installed
options:
plugin:
description: token that ensures this is a source file for the 'nmap' plugin.
required: true
choices: ['nmap', 'community.general.nmap']
sudo:
description: Set to C(true) to execute a C(sudo nmap) plugin scan.
version_added: 4.8.0
default: false
type: boolean
address:
description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
required: true
exclude:
description: list of addresses to exclude
type: list
elements: string
ports:
description: Enable/disable scanning for open ports
type: boolean
default: true
ipv4:
description: use IPv4 type addresses
type: boolean
default: true
ipv6:
description: use IPv6 type addresses
type: boolean
default: true
notes:
- At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False.
- 'TODO: add OS fingerprinting'
'''
EXAMPLES = '''
# inventory.config file in YAML format
plugin: community.general.nmap
strict: false
address: 192.168.0.0/24
# a sudo nmap scan to fully use nmap scan power.
plugin: community.general.nmap
sudo: true
strict: false
address: 192.168.0.0/24
'''
import os
import re
from subprocess import Popen, PIPE
from ansible import constants as C
from ansible.errors import AnsibleParserError
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.module_utils.common.process import get_bin_path
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
NAME = 'community.general.nmap'
find_host = re.compile(r'^Nmap scan report for ([\w,.,-]+)(?: \(([\w,.,:,\[,\]]+)\))?')
find_port = re.compile(r'^(\d+)/(\w+)\s+(\w+)\s+(\w+)')
def __init__(self):
self._nmap = None
super(InventoryModule, self).__init__()
def _populate(self, hosts):
# Use constructed if applicable
strict = self.get_option('strict')
for host in hosts:
hostname = host['name']
self.inventory.add_host(hostname)
for var, value in host.items():
self.inventory.set_variable(hostname, var, value)
# Composed variables
self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
file_name, ext = os.path.splitext(path)
if not ext or ext in C.YAML_FILENAME_EXTENSIONS:
valid = True
return valid
def parse(self, inventory, loader, path, cache=True):
try:
self._nmap = get_bin_path('nmap')
except ValueError as e:
raise AnsibleParserError('nmap inventory plugin requires the nmap cli tool to work: {0}'.format(to_native(e)))
super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
self._read_config_data(path)
cache_key = self.get_cache_key(path)
# cache may be True or False at this point to indicate if the inventory is being refreshed
# get the user's cache option too to see if we should save the cache if it is changing
user_cache_setting = self.get_option('cache')
# read if the user has caching enabled and the cache isn't being refreshed
attempt_to_read_cache = user_cache_setting and cache
# update if the user has caching enabled and the cache is being refreshed; update this value to True if the cache has expired below
cache_needs_update = user_cache_setting and not cache
if attempt_to_read_cache:
try:
results = self._cache[cache_key]
except KeyError:
# This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated
cache_needs_update = True
if not user_cache_setting or cache_needs_update:
# setup command
cmd = [self._nmap]
if self._options['sudo']:
cmd.insert(0, 'sudo')
if not self._options['ports']:
cmd.append('-sP')
if self._options['ipv4'] and not self._options['ipv6']:
cmd.append('-4')
elif self._options['ipv6'] and not self._options['ipv4']:
cmd.append('-6')
elif not self._options['ipv6'] and not self._options['ipv4']:
raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
if self._options['exclude']:
cmd.append('--exclude')
cmd.append(','.join(self._options['exclude']))
cmd.append(self._options['address'])
try:
# execute
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
# parse results
host = None
ip = None
ports = []
results = []
try:
t_stdout = to_text(stdout, errors='surrogate_or_strict')
except UnicodeError as e:
raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
for line in t_stdout.splitlines():
hits = self.find_host.match(line)
if hits:
if host is not None and ports:
results[-1]['ports'] = ports
# if dns only shows arpa, just use ip instead as hostname
if hits.group(1).endswith('.in-addr.arpa'):
host = hits.group(2)
else:
host = hits.group(1)
# if no reverse dns exists, just use ip instead as hostname
if hits.group(2) is not None:
ip = hits.group(2)
else:
ip = hits.group(1)
if host is not None:
# update inventory
results.append(dict())
results[-1]['name'] = host
results[-1]['ip'] = ip
ports = []
continue
host_ports = self.find_port.match(line)
if host is not None and host_ports:
ports.append({'port': host_ports.group(1),
'protocol': host_ports.group(2),
'state': host_ports.group(3),
'service': host_ports.group(4)})
continue
# if any leftovers
if host and ports:
results[-1]['ports'] = ports
except Exception as e:
raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
if cache_needs_update:
self._cache[cache_key] = results
self._populate(results)

View File

@@ -0,0 +1,263 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
name: online
author:
- Remy Leone (@remyleone)
short_description: Scaleway (previously Online SAS or Online.net) inventory source
description:
- Get inventory hosts from Scaleway (previously Online SAS or Online.net).
options:
plugin:
description: token that ensures this is a source file for the 'online' plugin.
required: true
choices: ['online', 'community.general.online']
oauth_token:
required: true
description: Online OAuth token.
env:
# in order of precedence
- name: ONLINE_TOKEN
- name: ONLINE_API_KEY
- name: ONLINE_OAUTH_TOKEN
hostnames:
description: List of preference about what to use as an hostname.
type: list
elements: string
default:
- public_ipv4
choices:
- public_ipv4
- private_ipv4
- hostname
groups:
description: List of groups.
type: list
elements: string
choices:
- location
- offer
- rpn
'''
EXAMPLES = r'''
# online_inventory.yml file in YAML format
# Example command line: ansible-inventory --list -i online_inventory.yml
plugin: community.general.online
hostnames:
- public_ipv4
groups:
- location
- offer
- rpn
'''
import json
from sys import version as python_version
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
from ansible.plugins.inventory import BaseInventoryPlugin
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.six.moves.urllib.parse import urljoin
class InventoryModule(BaseInventoryPlugin):
NAME = 'community.general.online'
API_ENDPOINT = "https://api.online.net"
def extract_public_ipv4(self, host_infos):
try:
return host_infos["network"]["ip"][0]
except (KeyError, TypeError, IndexError):
self.display.warning("An error happened while extracting public IPv4 address. Information skipped.")
return None
def extract_private_ipv4(self, host_infos):
try:
return host_infos["network"]["private"][0]
except (KeyError, TypeError, IndexError):
self.display.warning("An error happened while extracting private IPv4 address. Information skipped.")
return None
def extract_os_name(self, host_infos):
try:
return host_infos["os"]["name"]
except (KeyError, TypeError):
self.display.warning("An error happened while extracting OS name. Information skipped.")
return None
def extract_os_version(self, host_infos):
try:
return host_infos["os"]["version"]
except (KeyError, TypeError):
self.display.warning("An error happened while extracting OS version. Information skipped.")
return None
def extract_hostname(self, host_infos):
try:
return host_infos["hostname"]
except (KeyError, TypeError):
self.display.warning("An error happened while extracting hostname. Information skipped.")
return None
def extract_location(self, host_infos):
try:
return host_infos["location"]["datacenter"]
except (KeyError, TypeError):
self.display.warning("An error happened while extracting datacenter location. Information skipped.")
return None
def extract_offer(self, host_infos):
try:
return host_infos["offer"]
except (KeyError, TypeError):
self.display.warning("An error happened while extracting commercial offer. Information skipped.")
return None
def extract_rpn(self, host_infos):
try:
return self.rpn_lookup_cache[host_infos["id"]]
except (KeyError, TypeError):
self.display.warning("An error happened while extracting RPN information. Information skipped.")
return None
def _fetch_information(self, url):
try:
response = open_url(url, headers=self.headers)
except Exception as e:
self.display.warning("An error happened while fetching: %s" % url)
return None
try:
raw_data = to_text(response.read(), errors='surrogate_or_strict')
except UnicodeError:
raise AnsibleError("Incorrect encoding of fetched payload from Online servers")
try:
return json.loads(raw_data)
except ValueError:
raise AnsibleError("Incorrect JSON payload")
@staticmethod
def extract_rpn_lookup_cache(rpn_list):
lookup = {}
for rpn in rpn_list:
for member in rpn["members"]:
lookup[member["id"]] = rpn["name"]
return lookup
def _fill_host_variables(self, hostname, host_infos):
targeted_attributes = (
"offer",
"id",
"hostname",
"location",
"boot_mode",
"power",
"last_reboot",
"anti_ddos",
"hardware_watch",
"support"
)
for attribute in targeted_attributes:
self.inventory.set_variable(hostname, attribute, host_infos[attribute])
if self.extract_public_ipv4(host_infos=host_infos):
self.inventory.set_variable(hostname, "public_ipv4", self.extract_public_ipv4(host_infos=host_infos))
self.inventory.set_variable(hostname, "ansible_host", self.extract_public_ipv4(host_infos=host_infos))
if self.extract_private_ipv4(host_infos=host_infos):
self.inventory.set_variable(hostname, "public_ipv4", self.extract_private_ipv4(host_infos=host_infos))
if self.extract_os_name(host_infos=host_infos):
self.inventory.set_variable(hostname, "os_name", self.extract_os_name(host_infos=host_infos))
if self.extract_os_version(host_infos=host_infos):
self.inventory.set_variable(hostname, "os_version", self.extract_os_name(host_infos=host_infos))
def _filter_host(self, host_infos, hostname_preferences):
for pref in hostname_preferences:
if self.extractors[pref](host_infos):
return self.extractors[pref](host_infos)
return None
def do_server_inventory(self, host_infos, hostname_preferences, group_preferences):
hostname = self._filter_host(host_infos=host_infos,
hostname_preferences=hostname_preferences)
# No suitable hostname were found in the attributes and the host won't be in the inventory
if not hostname:
return
self.inventory.add_host(host=hostname)
self._fill_host_variables(hostname=hostname, host_infos=host_infos)
for g in group_preferences:
group = self.group_extractors[g](host_infos)
if not group:
return
self.inventory.add_group(group=group)
self.inventory.add_host(group=group, host=hostname)
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._read_config_data(path=path)
token = self.get_option("oauth_token")
hostname_preferences = self.get_option("hostnames")
group_preferences = self.get_option("groups")
if group_preferences is None:
group_preferences = []
self.extractors = {
"public_ipv4": self.extract_public_ipv4,
"private_ipv4": self.extract_private_ipv4,
"hostname": self.extract_hostname,
}
self.group_extractors = {
"location": self.extract_location,
"offer": self.extract_offer,
"rpn": self.extract_rpn
}
self.headers = {
'Authorization': "Bearer %s" % token,
'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ', 1)[0]),
'Content-type': 'application/json'
}
servers_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/server")
servers_api_path = self._fetch_information(url=servers_url)
if "rpn" in group_preferences:
rpn_groups_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/rpn/group")
rpn_list = self._fetch_information(url=rpn_groups_url)
self.rpn_lookup_cache = self.extract_rpn_lookup_cache(rpn_list)
for server_api_path in servers_api_path:
server_url = urljoin(InventoryModule.API_ENDPOINT, server_api_path)
raw_server_info = self._fetch_information(url=server_url)
if raw_server_info is None:
continue
self.do_server_inventory(host_infos=raw_server_info,
hostname_preferences=hostname_preferences,
group_preferences=group_preferences)

View File

@@ -0,0 +1,252 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™ <support@feldhost.cz>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
name: opennebula
author:
- Kristian Feldsam (@feldsam)
short_description: OpenNebula inventory source
version_added: "3.8.0"
extends_documentation_fragment:
- constructed
description:
- Get inventory hosts from OpenNebula cloud.
- Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml)
to set parameter values.
- Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file.
options:
plugin:
description: Token that ensures this is a source file for the 'opennebula' plugin.
type: string
required: true
choices: [ community.general.opennebula ]
api_url:
description:
- URL of the OpenNebula RPC server.
- It is recommended to use HTTPS so that the username/password are not
transferred over the network unencrypted.
- If not set then the value of the C(ONE_URL) environment variable is used.
env:
- name: ONE_URL
required: true
type: string
api_username:
description:
- Name of the user to login into the OpenNebula RPC server. If not set
then the value of the C(ONE_USERNAME) environment variable is used.
env:
- name: ONE_USERNAME
type: string
api_password:
description:
- Password or a token of the user to login into OpenNebula RPC server.
- If not set, the value of the C(ONE_PASSWORD) environment variable is used.
env:
- name: ONE_PASSWORD
required: false
type: string
api_authfile:
description:
- If both I(api_username) or I(api_password) are not set, then it will try
authenticate with ONE auth file. Default path is C(~/.one/one_auth).
- Set environment variable C(ONE_AUTH) to override this path.
env:
- name: ONE_AUTH
required: false
type: string
hostname:
description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM.
type: string
default: v4_first_ip
choices:
- v4_first_ip
- v6_first_ip
- name
filter_by_label:
description: Only return servers filtered by this label.
type: string
group_by_labels:
description: Create host groups by vm labels
type: bool
default: true
'''
EXAMPLES = r'''
# inventory_opennebula.yml file in YAML format
# Example command line: ansible-inventory --list -i inventory_opennebula.yml
# Pass a label filter to the API
plugin: community.general.opennebula
api_url: https://opennebula:2633/RPC2
filter_by_label: Cache
'''
try:
import pyone
HAS_PYONE = True
except ImportError:
HAS_PYONE = False
from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.module_utils.common.text.converters import to_native
from collections import namedtuple
import os
class InventoryModule(BaseInventoryPlugin, Constructable):
NAME = 'community.general.opennebula'
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
if path.endswith(('opennebula.yaml', 'opennebula.yml')):
valid = True
return valid
def _get_connection_info(self):
url = self.get_option('api_url')
username = self.get_option('api_username')
password = self.get_option('api_password')
authfile = self.get_option('api_authfile')
if not username and not password:
if authfile is None:
authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
try:
with open(authfile, "r") as fp:
authstring = fp.read().rstrip()
username, password = authstring.split(":")
except (OSError, IOError):
raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile))
except Exception:
raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile))
auth_params = namedtuple('auth', ('url', 'username', 'password'))
return auth_params(url=url, username=username, password=password)
def _get_vm_ipv4(self, vm):
nic = vm.TEMPLATE.get('NIC')
if isinstance(nic, dict):
nic = [nic]
for net in nic:
return net['IP']
return False
def _get_vm_ipv6(self, vm):
nic = vm.TEMPLATE.get('NIC')
if isinstance(nic, dict):
nic = [nic]
for net in nic:
if net.get('IP6_GLOBAL'):
return net['IP6_GLOBAL']
return False
def _get_vm_pool(self):
auth = self._get_connection_info()
if not (auth.username and auth.password):
raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.')
else:
one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
# get hosts (VMs)
try:
vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3)
except Exception as e:
raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e)))
return vm_pool
def _retrieve_servers(self, label_filter=None):
vm_pool = self._get_vm_pool()
result = []
# iterate over hosts
for vm in vm_pool.VM:
server = vm.USER_TEMPLATE
labels = []
if vm.USER_TEMPLATE.get('LABELS'):
labels = [s for s in vm.USER_TEMPLATE.get('LABELS') if s == ',' or s == '-' or s.isalnum() or s.isspace()]
labels = ''.join(labels)
labels = labels.replace(' ', '_')
labels = labels.replace('-', '_')
labels = labels.split(',')
# filter by label
if label_filter is not None:
if label_filter not in labels:
continue
server['name'] = vm.NAME
server['LABELS'] = labels
server['v4_first_ip'] = self._get_vm_ipv4(vm)
server['v6_first_ip'] = self._get_vm_ipv6(vm)
result.append(server)
return result
def _populate(self):
hostname_preference = self.get_option('hostname')
group_by_labels = self.get_option('group_by_labels')
strict = self.get_option('strict')
# Add a top group 'one'
self.inventory.add_group(group='all')
filter_by_label = self.get_option('filter_by_label')
servers = self._retrieve_servers(filter_by_label)
for server in servers:
hostname = server['name']
# check for labels
if group_by_labels and server['LABELS']:
for label in server['LABELS']:
self.inventory.add_group(group=label)
self.inventory.add_host(host=hostname, group=label)
self.inventory.add_host(host=hostname, group='all')
for attribute, value in server.items():
self.inventory.set_variable(hostname, attribute, value)
if hostname_preference != 'name':
self.inventory.set_variable(hostname, 'ansible_host', server[hostname_preference])
if server.get('SSH_PORT'):
self.inventory.set_variable(hostname, 'ansible_port', server['SSH_PORT'])
# handle construcable implementation: get composed variables if any
self._set_composite_vars(self.get_option('compose'), server, hostname, strict=strict)
# groups based on jinja conditionals get added to specific groups
self._add_host_to_composed_groups(self.get_option('groups'), server, hostname, strict=strict)
# groups based on variables associated with them in the inventory
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), server, hostname, strict=strict)
def parse(self, inventory, loader, path, cache=True):
if not HAS_PYONE:
raise AnsibleError('OpenNebula Inventory plugin requires pyone to work!')
super(InventoryModule, self).parse(inventory, loader, path)
self._read_config_data(path=path)
self._populate()

View File

@@ -0,0 +1,639 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>, Daniel Lobato Garcia <dlobatog@redhat.com>
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: proxmox
short_description: Proxmox inventory source
version_added: "1.2.0"
author:
- Jeffrey van Pelt (@Thulium-Drake) <jeff@vanpelt.one>
requirements:
- requests >= 1.1
description:
- Get inventory hosts from a Proxmox PVE cluster.
- "Uses a configuration file as an inventory source, it must end in C(.proxmox.yml) or C(.proxmox.yaml)"
- Will retrieve the first network interface with an IP for Proxmox nodes.
- Can retrieve LXC/QEMU configuration as facts.
extends_documentation_fragment:
- constructed
- inventory_cache
options:
plugin:
description: The name of this plugin, it should always be set to C(community.general.proxmox) for this plugin to recognize it as it's own.
required: true
choices: ['community.general.proxmox']
type: str
url:
description:
- URL to Proxmox cluster.
- If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_URL) will be used instead.
- Since community.general 4.7.0 you can also use templating to specify the value of the I(url).
default: 'http://localhost:8006'
type: str
env:
- name: PROXMOX_URL
version_added: 2.0.0
user:
description:
- Proxmox authentication user.
- If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_USER) will be used instead.
- Since community.general 4.7.0 you can also use templating to specify the value of the I(user).
required: true
type: str
env:
- name: PROXMOX_USER
version_added: 2.0.0
password:
description:
- Proxmox authentication password.
- If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_PASSWORD) will be used instead.
- Since community.general 4.7.0 you can also use templating to specify the value of the I(password).
- If you do not specify a password, you must set I(token_id) and I(token_secret) instead.
type: str
env:
- name: PROXMOX_PASSWORD
version_added: 2.0.0
token_id:
description:
- Proxmox authentication token ID.
- If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_TOKEN_ID) will be used instead.
- To use token authentication, you must also specify I(token_secret). If you do not specify I(token_id) and I(token_secret),
you must set a password instead.
- Make sure to grant explicit pve permissions to the token or disable 'privilege separation' to use the users' privileges instead.
version_added: 4.8.0
type: str
env:
- name: PROXMOX_TOKEN_ID
token_secret:
description:
- Proxmox authentication token secret.
- If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_TOKEN_SECRET) will be used instead.
- To use token authentication, you must also specify I(token_id). If you do not specify I(token_id) and I(token_secret),
you must set a password instead.
version_added: 4.8.0
type: str
env:
- name: PROXMOX_TOKEN_SECRET
validate_certs:
description: Verify SSL certificate if using HTTPS.
type: boolean
default: true
group_prefix:
description: Prefix to apply to Proxmox groups.
default: proxmox_
type: str
facts_prefix:
description: Prefix to apply to LXC/QEMU config facts.
default: proxmox_
type: str
want_facts:
description:
- Gather LXC/QEMU configuration facts.
- When I(want_facts) is set to C(true) more details about QEMU VM status are possible, besides the running and stopped states.
Currently if the VM is running and it is suspended, the status will be running and the machine will be in C(running) group,
but its actual state will be paused. See I(qemu_extended_statuses) for how to retrieve the real status.
default: false
type: bool
qemu_extended_statuses:
description:
- Requires I(want_facts) to be set to C(true) to function. This will allow you to differentiate betweend C(paused) and C(prelaunch)
statuses of the QEMU VMs.
- This introduces multiple groups [prefixed with I(group_prefix)] C(prelaunch) and C(paused).
default: false
type: bool
version_added: 5.1.0
want_proxmox_nodes_ansible_host:
version_added: 3.0.0
description:
- Whether to set C(ansbile_host) for proxmox nodes.
- When set to C(true) (default), will use the first available interface. This can be different from what you expect.
- The default of this option changed from C(true) to C(false) in community.general 6.0.0.
type: bool
default: false
filters:
version_added: 4.6.0
description: A list of Jinja templates that allow filtering hosts.
type: list
elements: str
default: []
strict:
version_added: 2.5.0
compose:
version_added: 2.5.0
groups:
version_added: 2.5.0
keyed_groups:
version_added: 2.5.0
'''
EXAMPLES = '''
# Minimal example which will not gather additional facts for QEMU/LXC guests
# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006
# my.proxmox.yml
plugin: community.general.proxmox
user: ansible@pve
password: secure
# Note that this can easily give you wrong values as ansible_host. See further below for
# an example where this is set to `false` and where ansible_host is set with `compose`.
want_proxmox_nodes_ansible_host: true
# Instead of login with password, proxmox supports api token authentication since release 6.2.
plugin: community.general.proxmox
user: ci@pve
token_id: gitlab-1
token_secret: fa256e9c-26ab-41ec-82da-707a2c079829
# The secret can also be a vault string or passed via the environment variable TOKEN_SECRET.
token_secret: !vault |
$ANSIBLE_VAULT;1.1;AES256
62353634333163633336343265623632626339313032653563653165313262343931643431656138
6134333736323265656466646539663134306166666237630a653363623262636663333762316136
34616361326263383766366663393837626437316462313332663736623066656237386531663731
3037646432383064630a663165303564623338666131353366373630656661333437393937343331
32643131386134396336623736393634373936356332623632306561356361323737313663633633
6231313333666361656537343562333337323030623732323833
# More complete example demonstrating the use of 'want_facts' and the constructed options
# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true'
# my.proxmox.yml
plugin: community.general.proxmox
url: http://pve.domain.com:8006
user: ansible@pve
password: secure
validate_certs: false
want_facts: true
keyed_groups:
# proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true'
- key: proxmox_tags_parsed
separator: ""
prefix: group
groups:
webservers: "'web' in (proxmox_tags_parsed|list)"
mailservers: "'mail' in (proxmox_tags_parsed|list)"
compose:
ansible_port: 2222
# Note that this can easily give you wrong values as ansible_host. See further below for
# an example where this is set to `false` and where ansible_host is set with `compose`.
want_proxmox_nodes_ansible_host: true
# Using the inventory to allow ansible to connect via the first IP address of the VM / Container
# (Default is connection by name of QEMU/LXC guests)
# Note: my_inv_var demonstrates how to add a string variable to every host used by the inventory.
# my.proxmox.yml
plugin: community.general.proxmox
url: http://pve.domain.com:8006
user: ansible@pve
password: secure
validate_certs: false
want_facts: true
want_proxmox_nodes_ansible_host: false
compose:
ansible_host: proxmox_ipconfig0.ip | default(proxmox_net0.ip) | ipaddr('address')
my_inv_var_1: "'my_var1_value'"
my_inv_var_2: >
"my_var_2_value"
# Specify the url, user and password using templating
# my.proxmox.yml
plugin: community.general.proxmox
url: "{{ lookup('ansible.builtin.ini', 'url', section='proxmox', file='file.ini') }}"
user: "{{ lookup('ansible.builtin.env','PM_USER') | default('ansible@pve') }}"
password: "{{ lookup('community.general.random_string', base64=True) }}"
# Note that this can easily give you wrong values as ansible_host. See further up for
# an example where this is set to `false` and where ansible_host is set with `compose`.
want_proxmox_nodes_ansible_host: true
'''
import itertools
import re
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.utils.display import Display
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
# 3rd party imports
try:
import requests
if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
raise ImportError
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
display = Display()
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
''' Host inventory parser for ansible using Proxmox as source. '''
NAME = 'community.general.proxmox'
def __init__(self):
super(InventoryModule, self).__init__()
# from config
self.proxmox_url = None
self.session = None
self.cache_key = None
self.use_cache = None
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
if path.endswith(('proxmox.yaml', 'proxmox.yml')):
valid = True
else:
self.display.vvv('Skipping due to inventory source not ending in "proxmox.yaml" nor "proxmox.yml"')
return valid
def _get_session(self):
if not self.session:
self.session = requests.session()
self.session.verify = self.get_option('validate_certs')
return self.session
def _get_auth(self):
credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, })
if self.proxmox_password:
credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, })
a = self._get_session()
ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials)
json = ret.json()
self.headers = {
# only required for POST/PUT/DELETE methods, which we are not using currently
# 'CSRFPreventionToken': json['data']['CSRFPreventionToken'],
'Cookie': 'PVEAuthCookie={0}'.format(json['data']['ticket'])
}
else:
self.headers = {'Authorization': 'PVEAPIToken={0}!{1}={2}'.format(self.proxmox_user, self.proxmox_token_id, self.proxmox_token_secret)}
def _get_json(self, url, ignore_errors=None):
if not self.use_cache or url not in self._cache.get(self.cache_key, {}):
if self.cache_key not in self._cache:
self._cache[self.cache_key] = {'url': ''}
data = []
s = self._get_session()
while True:
ret = s.get(url, headers=self.headers)
if ignore_errors and ret.status_code in ignore_errors:
break
ret.raise_for_status()
json = ret.json()
# process results
# FIXME: This assumes 'return type' matches a specific query,
# it will break if we expand the queries and they dont have different types
if 'data' not in json:
# /hosts/:id does not have a 'data' key
data = json
break
elif isinstance(json['data'], MutableMapping):
# /facts are returned as dict in 'data'
data = json['data']
break
else:
# /hosts 's 'results' is a list of all hosts, returned is paginated
data = data + json['data']
break
self._cache[self.cache_key][url] = data
return self._cache[self.cache_key][url]
def _get_nodes(self):
return self._get_json("%s/api2/json/nodes" % self.proxmox_url)
def _get_pools(self):
return self._get_json("%s/api2/json/pools" % self.proxmox_url)
def _get_lxc_per_node(self, node):
return self._get_json("%s/api2/json/nodes/%s/lxc" % (self.proxmox_url, node))
def _get_qemu_per_node(self, node):
return self._get_json("%s/api2/json/nodes/%s/qemu" % (self.proxmox_url, node))
def _get_members_per_pool(self, pool):
ret = self._get_json("%s/api2/json/pools/%s" % (self.proxmox_url, pool))
return ret['members']
def _get_node_ip(self, node):
ret = self._get_json("%s/api2/json/nodes/%s/network" % (self.proxmox_url, node))
for iface in ret:
try:
return iface['address']
except Exception:
return None
def _get_agent_network_interfaces(self, node, vmid, vmtype):
result = []
try:
ifaces = self._get_json(
"%s/api2/json/nodes/%s/%s/%s/agent/network-get-interfaces" % (
self.proxmox_url, node, vmtype, vmid
)
)['result']
if "error" in ifaces:
if "class" in ifaces["error"]:
# This happens on Windows, even though qemu agent is running, the IP address
# cannot be fetched, as it's unsupported, also a command disabled can happen.
errorClass = ifaces["error"]["class"]
if errorClass in ["Unsupported"]:
self.display.v("Retrieving network interfaces from guest agents on windows with older qemu-guest-agents is not supported")
elif errorClass in ["CommandDisabled"]:
self.display.v("Retrieving network interfaces from guest agents has been disabled")
return result
for iface in ifaces:
result.append({
'name': iface['name'],
'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '',
'ip-addresses': ["%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses']] if 'ip-addresses' in iface else []
})
except requests.HTTPError:
pass
return result
def _get_vm_config(self, properties, node, vmid, vmtype, name):
ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid))
properties[self._fact('node')] = node
properties[self._fact('vmid')] = vmid
properties[self._fact('vmtype')] = vmtype
plaintext_configs = [
'description',
]
for config in ret:
key = self._fact(config)
value = ret[config]
try:
# fixup disk images as they have no key
if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')):
value = ('disk_image=' + value)
# Additional field containing parsed tags as list
if config == 'tags':
stripped_value = value.strip()
if stripped_value:
parsed_key = key + "_parsed"
properties[parsed_key] = [tag.strip() for tag in stripped_value.split(",")]
# The first field in the agent string tells you whether the agent is enabled
# the rest of the comma separated string is extra config for the agent.
# In some (newer versions of proxmox) instances it can be 'enabled=1'.
if config == 'agent':
agent_enabled = 0
try:
agent_enabled = int(value.split(',')[0])
except ValueError:
if value.split(',')[0] == "enabled=1":
agent_enabled = 1
if agent_enabled:
agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype)
if agent_iface_value:
agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces"))
properties[agent_iface_key] = agent_iface_value
if config == 'lxc':
out_val = {}
for k, v in value:
if k.startswith('lxc.'):
k = k[len('lxc.'):]
out_val[k] = v
value = out_val
if config not in plaintext_configs and isinstance(value, string_types) \
and all("=" in v for v in value.split(",")):
# split off strings with commas to a dict
# skip over any keys that cannot be processed
try:
value = dict(key.split("=", 1) for key in value.split(","))
except Exception:
continue
properties[key] = value
except NameError:
return None
def _get_vm_status(self, properties, node, vmid, vmtype, name):
ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid))
properties[self._fact('status')] = ret['status']
if vmtype == 'qemu':
properties[self._fact('qmpstatus')] = ret['qmpstatus']
def _get_vm_snapshots(self, properties, node, vmid, vmtype, name):
ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid))
snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current']
properties[self._fact('snapshots')] = snapshots
def to_safe(self, word):
'''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
#> ProxmoxInventory.to_safe("foo-bar baz")
'foo_barbaz'
'''
regex = r"[^A-Za-z0-9\_]"
return re.sub(regex, "_", word.replace(" ", ""))
def _fact(self, name):
'''Generate a fact's full name from the common prefix and a name.'''
return self.to_safe('%s%s' % (self.facts_prefix, name.lower()))
def _group(self, name):
'''Generate a group's full name from the common prefix and a name.'''
return self.to_safe('%s%s' % (self.group_prefix, name.lower()))
def _can_add_host(self, name, properties):
'''Ensure that a host satisfies all defined hosts filters. If strict mode is
enabled, any error during host filter compositing will lead to an AnsibleError
being raised, otherwise the filter will be ignored.
'''
for host_filter in self.host_filters:
try:
if not self._compose(host_filter, properties):
return False
except Exception as e: # pylint: disable=broad-except
message = "Could not evaluate host filter %s for host %s - %s" % (host_filter, name, to_native(e))
if self.strict:
raise AnsibleError(message)
display.warning(message)
return True
def _add_host(self, name, variables):
self.inventory.add_host(name)
for k, v in variables.items():
self.inventory.set_variable(name, k, v)
variables = self.inventory.get_host(name).get_vars()
self._set_composite_vars(self.get_option('compose'), variables, name, strict=self.strict)
self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=self.strict)
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=self.strict)
def _handle_item(self, node, ittype, item):
'''Handle an item from the list of LXC containers and Qemu VM. The
return value will be either None if the item was skipped or the name of
the item if it was added to the inventory.'''
if item.get('template'):
return None
properties = dict()
name, vmid = item['name'], item['vmid']
# get status, config and snapshots if want_facts == True
want_facts = self.get_option('want_facts')
if want_facts:
self._get_vm_status(properties, node, vmid, ittype, name)
self._get_vm_config(properties, node, vmid, ittype, name)
self._get_vm_snapshots(properties, node, vmid, ittype, name)
# ensure the host satisfies filters
if not self._can_add_host(name, properties):
return None
# add the host to the inventory
self._add_host(name, properties)
node_type_group = self._group('%s_%s' % (node, ittype))
self.inventory.add_child(self._group('all_' + ittype), name)
self.inventory.add_child(node_type_group, name)
item_status = item['status']
if item_status == 'running':
if want_facts and ittype == 'qemu' and self.get_option('qemu_extended_statuses'):
# get more details about the status of the qemu VM
item_status = properties.get(self._fact('qmpstatus'), item_status)
self.inventory.add_child(self._group('all_%s' % (item_status, )), name)
return name
def _populate_pool_groups(self, added_hosts):
'''Generate groups from Proxmox resource pools, ignoring VMs and
containers that were skipped.'''
for pool in self._get_pools():
poolid = pool.get('poolid')
if not poolid:
continue
pool_group = self._group('pool_' + poolid)
self.inventory.add_group(pool_group)
for member in self._get_members_per_pool(poolid):
name = member.get('name')
if name and name in added_hosts:
self.inventory.add_child(pool_group, name)
def _populate(self):
# create common groups
default_groups = ['lxc', 'qemu', 'running', 'stopped']
if self.get_option('qemu_extended_statuses'):
default_groups.extend(['prelaunch', 'paused'])
for group in default_groups:
self.inventory.add_group(self._group('all_%s' % (group)))
nodes_group = self._group('nodes')
self.inventory.add_group(nodes_group)
want_proxmox_nodes_ansible_host = self.get_option("want_proxmox_nodes_ansible_host")
# gather vm's on nodes
self._get_auth()
hosts = []
for node in self._get_nodes():
if not node.get('node'):
continue
self.inventory.add_host(node['node'])
if node['type'] == 'node':
self.inventory.add_child(nodes_group, node['node'])
if node['status'] == 'offline':
continue
# get node IP address
if want_proxmox_nodes_ansible_host:
ip = self._get_node_ip(node['node'])
self.inventory.set_variable(node['node'], 'ansible_host', ip)
# add LXC/Qemu groups for the node
for ittype in ('lxc', 'qemu'):
node_type_group = self._group('%s_%s' % (node['node'], ittype))
self.inventory.add_group(node_type_group)
# get LXC containers and Qemu VMs for this node
lxc_objects = zip(itertools.repeat('lxc'), self._get_lxc_per_node(node['node']))
qemu_objects = zip(itertools.repeat('qemu'), self._get_qemu_per_node(node['node']))
for ittype, item in itertools.chain(lxc_objects, qemu_objects):
name = self._handle_item(node['node'], ittype, item)
if name is not None:
hosts.append(name)
# gather vm's in pools
self._populate_pool_groups(hosts)
def parse(self, inventory, loader, path, cache=True):
if not HAS_REQUESTS:
raise AnsibleError('This module requires Python Requests 1.1.0 or higher: '
'https://github.com/psf/requests.')
super(InventoryModule, self).parse(inventory, loader, path)
# read config from file, this sets 'options'
self._read_config_data(path)
# read and template auth options
for o in ('url', 'user', 'password', 'token_id', 'token_secret'):
v = self.get_option(o)
if self.templar.is_template(v):
v = self.templar.template(v, disable_looups=False)
setattr(self, 'proxmox_%s' % o, v)
# some more cleanup and validation
self.proxmox_url = self.proxmox_url.rstrip('/')
if self.proxmox_password is None and (self.proxmox_token_id is None or self.proxmox_token_secret is None):
raise AnsibleError('You must specify either a password or both token_id and token_secret.')
if self.get_option('qemu_extended_statuses') and not self.get_option('want_facts'):
raise AnsibleError('You must set want_facts to True if you want to use qemu_extended_statuses.')
# read rest of options
self.cache_key = self.get_cache_key(path)
self.use_cache = cache and self.get_option('cache')
self.host_filters = self.get_option('filters')
self.group_prefix = self.get_option('group_prefix')
self.facts_prefix = self.get_option('facts_prefix')
self.strict = self.get_option('strict')
# actually populate inventory
self._populate()

View File

@@ -0,0 +1,344 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
name: scaleway
author:
- Remy Leone (@remyleone)
short_description: Scaleway inventory source
description:
- Get inventory hosts from Scaleway.
requirements:
- PyYAML
options:
plugin:
description: Token that ensures this is a source file for the 'scaleway' plugin.
required: true
choices: ['scaleway', 'community.general.scaleway']
regions:
description: Filter results on a specific Scaleway region.
type: list
elements: string
default:
- ams1
- par1
- par2
- waw1
tags:
description: Filter results on a specific tag.
type: list
elements: string
scw_profile:
description:
- The config profile to use in config file.
- By default uses the one specified as C(active_profile) in the config file, or falls back to C(default) if that is not defined.
type: string
version_added: 4.4.0
oauth_token:
description:
- Scaleway OAuth token.
- If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file
(C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)).
- More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/).
env:
# in order of precedence
- name: SCW_TOKEN
- name: SCW_API_KEY
- name: SCW_OAUTH_TOKEN
hostnames:
description: List of preference about what to use as an hostname.
type: list
elements: string
default:
- public_ipv4
choices:
- public_ipv4
- private_ipv4
- public_ipv6
- hostname
- id
variables:
description: 'Set individual variables: keys are variable names and
values are templates. Any value returned by the
L(Scaleway API, https://developer.scaleway.com/#servers-server-get)
can be used.'
type: dict
'''
EXAMPLES = r'''
# scaleway_inventory.yml file in YAML format
# Example command line: ansible-inventory --list -i scaleway_inventory.yml
# use hostname as inventory_hostname
# use the private IP address to connect to the host
plugin: community.general.scaleway
regions:
- ams1
- par1
tags:
- foobar
hostnames:
- hostname
variables:
ansible_host: private_ip
state: state
# use hostname as inventory_hostname and public IP address to connect to the host
plugin: community.general.scaleway
hostnames:
- hostname
regions:
- par1
variables:
ansible_host: public_ip.address
# Using static strings as variables
plugin: community.general.scaleway
hostnames:
- hostname
variables:
ansible_host: public_ip.address
ansible_connection: "'ssh'"
ansible_user: "'admin'"
'''
import os
import json
try:
import yaml
except ImportError as exc:
YAML_IMPORT_ERROR = exc
else:
YAML_IMPORT_ERROR = None
from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link
from ansible.module_utils.urls import open_url
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.six import raise_from
import ansible.module_utils.six.moves.urllib.parse as urllib_parse
def _fetch_information(token, url):
results = []
paginated_url = url
while True:
try:
response = open_url(paginated_url,
headers={'X-Auth-Token': token,
'Content-type': 'application/json'})
except Exception as e:
raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e)))
try:
raw_json = json.loads(to_text(response.read()))
except ValueError:
raise AnsibleError("Incorrect JSON payload")
try:
results.extend(raw_json["servers"])
except KeyError:
raise AnsibleError("Incorrect format from the Scaleway API response")
link = response.headers['Link']
if not link:
return results
relations = parse_pagination_link(link)
if 'next' not in relations:
return results
paginated_url = urllib_parse.urljoin(paginated_url, relations['next'])
def _build_server_url(api_endpoint):
return "/".join([api_endpoint, "servers"])
def extract_public_ipv4(server_info):
try:
return server_info["public_ip"]["address"]
except (KeyError, TypeError):
return None
def extract_private_ipv4(server_info):
try:
return server_info["private_ip"]
except (KeyError, TypeError):
return None
def extract_hostname(server_info):
try:
return server_info["hostname"]
except (KeyError, TypeError):
return None
def extract_server_id(server_info):
try:
return server_info["id"]
except (KeyError, TypeError):
return None
def extract_public_ipv6(server_info):
try:
return server_info["ipv6"]["address"]
except (KeyError, TypeError):
return None
def extract_tags(server_info):
try:
return server_info["tags"]
except (KeyError, TypeError):
return None
def extract_zone(server_info):
try:
return server_info["location"]["zone_id"]
except (KeyError, TypeError):
return None
extractors = {
"public_ipv4": extract_public_ipv4,
"private_ipv4": extract_private_ipv4,
"public_ipv6": extract_public_ipv6,
"hostname": extract_hostname,
"id": extract_server_id
}
class InventoryModule(BaseInventoryPlugin, Constructable):
NAME = 'community.general.scaleway'
def _fill_host_variables(self, host, server_info):
targeted_attributes = (
"arch",
"commercial_type",
"id",
"organization",
"state",
"hostname",
)
for attribute in targeted_attributes:
self.inventory.set_variable(host, attribute, server_info[attribute])
self.inventory.set_variable(host, "tags", server_info["tags"])
if extract_public_ipv6(server_info=server_info):
self.inventory.set_variable(host, "public_ipv6", extract_public_ipv6(server_info=server_info))
if extract_public_ipv4(server_info=server_info):
self.inventory.set_variable(host, "public_ipv4", extract_public_ipv4(server_info=server_info))
if extract_private_ipv4(server_info=server_info):
self.inventory.set_variable(host, "private_ipv4", extract_private_ipv4(server_info=server_info))
def _get_zones(self, config_zones):
return set(SCALEWAY_LOCATION.keys()).intersection(config_zones)
def match_groups(self, server_info, tags):
server_zone = extract_zone(server_info=server_info)
server_tags = extract_tags(server_info=server_info)
# If a server does not have a zone, it means it is archived
if server_zone is None:
return set()
# If no filtering is defined, all tags are valid groups
if tags is None:
return set(server_tags).union((server_zone,))
matching_tags = set(server_tags).intersection(tags)
if not matching_tags:
return set()
return matching_tags.union((server_zone,))
def _filter_host(self, host_infos, hostname_preferences):
for pref in hostname_preferences:
if extractors[pref](host_infos):
return extractors[pref](host_infos)
return None
def do_zone_inventory(self, zone, token, tags, hostname_preferences):
self.inventory.add_group(zone)
zone_info = SCALEWAY_LOCATION[zone]
url = _build_server_url(zone_info["api_endpoint"])
raw_zone_hosts_infos = _fetch_information(url=url, token=token)
for host_infos in raw_zone_hosts_infos:
hostname = self._filter_host(host_infos=host_infos,
hostname_preferences=hostname_preferences)
# No suitable hostname were found in the attributes and the host won't be in the inventory
if not hostname:
continue
groups = self.match_groups(host_infos, tags)
for group in groups:
self.inventory.add_group(group=group)
self.inventory.add_host(group=group, host=hostname)
self._fill_host_variables(host=hostname, server_info=host_infos)
# Composed variables
self._set_composite_vars(self.get_option('variables'), host_infos, hostname, strict=False)
def get_oauth_token(self):
oauth_token = self.get_option('oauth_token')
if 'SCW_CONFIG_PATH' in os.environ:
scw_config_path = os.getenv('SCW_CONFIG_PATH')
elif 'XDG_CONFIG_HOME' in os.environ:
scw_config_path = os.path.join(os.getenv('XDG_CONFIG_HOME'), 'scw', 'config.yaml')
else:
scw_config_path = os.path.join(os.path.expanduser('~'), '.config', 'scw', 'config.yaml')
if not oauth_token and os.path.exists(scw_config_path):
with open(scw_config_path) as fh:
scw_config = yaml.safe_load(fh)
ansible_profile = self.get_option('scw_profile')
if ansible_profile:
active_profile = ansible_profile
else:
active_profile = scw_config.get('active_profile', 'default')
if active_profile == 'default':
oauth_token = scw_config.get('secret_key')
else:
oauth_token = scw_config['profiles'][active_profile].get('secret_key')
return oauth_token
def parse(self, inventory, loader, path, cache=True):
if YAML_IMPORT_ERROR:
raise_from(AnsibleError('PyYAML is probably missing'), YAML_IMPORT_ERROR)
super(InventoryModule, self).parse(inventory, loader, path)
self._read_config_data(path=path)
config_zones = self.get_option("regions")
tags = self.get_option("tags")
token = self.get_oauth_token()
if not token:
raise AnsibleError("'oauth_token' value is null, you must configure it either in inventory, envvars or scaleway-cli config.")
hostname_preference = self.get_option("hostnames")
for zone in self._get_zones(config_zones):
self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference)

View File

@@ -0,0 +1,283 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Shay Rybak <shay.rybak@stackpath.com>
# Copyright (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: stackpath_compute
short_description: StackPath Edge Computing inventory source
version_added: 1.2.0
author:
- UNKNOWN (@shayrybak)
extends_documentation_fragment:
- inventory_cache
- constructed
description:
- Get inventory hosts from StackPath Edge Computing.
- Uses a YAML configuration file that ends with stackpath_compute.(yml|yaml).
options:
plugin:
description:
- A token that ensures this is a source file for the plugin.
required: true
choices: ['community.general.stackpath_compute']
client_id:
description:
- An OAuth client ID generated from the API Management section of the StackPath customer portal
U(https://control.stackpath.net/api-management).
required: true
type: str
client_secret:
description:
- An OAuth client secret generated from the API Management section of the StackPath customer portal
U(https://control.stackpath.net/api-management).
required: true
type: str
stack_slugs:
description:
- A list of Stack slugs to query instances in. If no entry then get instances in all stacks on the account.
type: list
elements: str
use_internal_ip:
description:
- Whether or not to use internal IP addresses, If false, uses external IP addresses, internal otherwise.
- If an instance doesn't have an external IP it will not be returned when this option is set to false.
type: bool
'''
EXAMPLES = '''
# Example using credentials to fetch all workload instances in a stack.
---
plugin: community.general.stackpath_compute
client_id: my_client_id
client_secret: my_client_secret
stack_slugs:
- my_first_stack_slug
- my_other_stack_slug
use_internal_ip: false
'''
import traceback
import json
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
from ansible.plugins.inventory import (
BaseInventoryPlugin,
Constructable,
Cacheable
)
from ansible.utils.display import Display
display = Display()
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
NAME = 'community.general.stackpath_compute'
def __init__(self):
super(InventoryModule, self).__init__()
# credentials
self.client_id = None
self.client_secret = None
self.stack_slug = None
self.api_host = "https://gateway.stackpath.com"
self.group_keys = [
"stackSlug",
"workloadId",
"cityCode",
"countryCode",
"continent",
"target",
"name",
"workloadSlug"
]
def _validate_config(self, config):
if config['plugin'] != 'community.general.stackpath_compute':
raise AnsibleError("plugin doesn't match this plugin")
try:
client_id = config['client_id']
if len(client_id) != 32:
raise AnsibleError("client_id must be 32 characters long")
except KeyError:
raise AnsibleError("config missing client_id, a required option")
try:
client_secret = config['client_secret']
if len(client_secret) != 64:
raise AnsibleError("client_secret must be 64 characters long")
except KeyError:
raise AnsibleError("config missing client_id, a required option")
return True
def _set_credentials(self):
'''
:param config_data: contents of the inventory config file
'''
self.client_id = self.get_option('client_id')
self.client_secret = self.get_option('client_secret')
def _authenticate(self):
payload = json.dumps(
{
"client_id": self.client_id,
"client_secret": self.client_secret,
"grant_type": "client_credentials",
}
)
headers = {
"Content-Type": "application/json",
}
resp = open_url(
self.api_host + '/identity/v1/oauth2/token',
headers=headers,
data=payload,
method="POST"
)
status_code = resp.code
if status_code == 200:
body = resp.read()
self.auth_token = json.loads(body)["access_token"]
def _query(self):
results = []
workloads = []
self._authenticate()
for stack_slug in self.stack_slugs:
try:
workloads = self._stackpath_query_get_list(self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads')
except Exception:
raise AnsibleError("Failed to get workloads from the StackPath API: %s" % traceback.format_exc())
for workload in workloads:
try:
workload_instances = self._stackpath_query_get_list(
self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads/' + workload["id"] + '/instances'
)
except Exception:
raise AnsibleError("Failed to get workload instances from the StackPath API: %s" % traceback.format_exc())
for instance in workload_instances:
if instance["phase"] == "RUNNING":
instance["stackSlug"] = stack_slug
instance["workloadId"] = workload["id"]
instance["workloadSlug"] = workload["slug"]
instance["cityCode"] = instance["location"]["cityCode"]
instance["countryCode"] = instance["location"]["countryCode"]
instance["continent"] = instance["location"]["continent"]
instance["target"] = instance["metadata"]["labels"]["workload.platform.stackpath.net/target-name"]
try:
if instance[self.hostname_key]:
results.append(instance)
except KeyError:
pass
return results
def _populate(self, instances):
for instance in instances:
for group_key in self.group_keys:
group = group_key + "_" + instance[group_key]
group = group.lower().replace(" ", "_").replace("-", "_")
self.inventory.add_group(group)
self.inventory.add_host(instance[self.hostname_key],
group=group)
def _stackpath_query_get_list(self, url):
self._authenticate()
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.auth_token,
}
next_page = True
result = []
cursor = '-1'
while next_page:
resp = open_url(
url + '?page_request.first=10&page_request.after=%s' % cursor,
headers=headers,
method="GET"
)
status_code = resp.code
if status_code == 200:
body = resp.read()
body_json = json.loads(body)
result.extend(body_json["results"])
next_page = body_json["pageInfo"]["hasNextPage"]
if next_page:
cursor = body_json["pageInfo"]["endCursor"]
return result
def _get_stack_slugs(self, stacks):
self.stack_slugs = [stack["slug"] for stack in stacks]
def verify_file(self, path):
'''
:param loader: an ansible.parsing.dataloader.DataLoader object
:param path: the path to the inventory config file
:return the contents of the config file
'''
if super(InventoryModule, self).verify_file(path):
if path.endswith(('stackpath_compute.yml', 'stackpath_compute.yaml')):
return True
display.debug(
"stackpath_compute inventory filename must end with \
'stackpath_compute.yml' or 'stackpath_compute.yaml'"
)
return False
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
config = self._read_config_data(path)
self._validate_config(config)
self._set_credentials()
# get user specifications
self.use_internal_ip = self.get_option('use_internal_ip')
if self.use_internal_ip:
self.hostname_key = "ipAddress"
else:
self.hostname_key = "externalIpAddress"
self.stack_slugs = self.get_option('stack_slugs')
if not self.stack_slugs:
try:
stacks = self._stackpath_query_get_list(self.api_host + '/stack/v1/stacks')
self._get_stack_slugs(stacks)
except Exception:
raise AnsibleError("Failed to get stack IDs from the Stackpath API: %s" % traceback.format_exc())
cache_key = self.get_cache_key(path)
# false when refresh_cache or --flush-cache is used
if cache:
# get the user-specified directive
cache = self.get_option('cache')
# Generate inventory
cache_needs_update = False
if cache:
try:
results = self._cache[cache_key]
except KeyError:
# if cache expires or cache file doesn't exist
cache_needs_update = True
if not cache or cache_needs_update:
results = self._query()
self._populate(results)
# If the cache has expired/doesn't exist or
# if refresh_inventory/flush cache is used
# when the user is using caching, update the cached inventory
try:
if cache_needs_update or (not cache and self.get_option('cache')):
self._cache[cache_key] = results
except Exception:
raise AnsibleError("Failed to populate data: %s" % traceback.format_exc())

View File

@@ -0,0 +1,287 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Unknown (!UNKNOWN)
name: virtualbox
short_description: virtualbox inventory source
description:
- Get inventory hosts from the local virtualbox installation.
- Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml).
- The inventory_hostname is always the 'Name' of the virtualbox instance.
extends_documentation_fragment:
- constructed
- inventory_cache
options:
plugin:
description: token that ensures this is a source file for the 'virtualbox' plugin
required: true
choices: ['virtualbox', 'community.general.virtualbox']
running_only:
description: toggles showing all vms vs only those currently running
type: boolean
default: false
settings_password_file:
description: provide a file containing the settings password (equivalent to --settingspwfile)
network_info_path:
description: property path to query for network information (ansible_host)
default: "/VirtualBox/GuestInfo/Net/0/V4/IP"
query:
description: create vars from virtualbox properties
type: dictionary
default: {}
'''
EXAMPLES = '''
# file must be named vbox.yaml or vbox.yml
simple_config_file:
plugin: community.general.virtualbox
settings_password_file: /etc/virtulbox/secrets
query:
logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList
compose:
ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh')
# add hosts (all match with minishift vm) to the group container if any of the vms are in ansible_inventory'
plugin: community.general.virtualbox
groups:
container: "'minis' in (inventory_hostname)"
'''
import os
from subprocess import Popen, PIPE
from ansible.errors import AnsibleParserError
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.module_utils.common.process import get_bin_path
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
''' Host inventory parser for ansible using local virtualbox. '''
NAME = 'community.general.virtualbox'
VBOX = "VBoxManage"
def __init__(self):
self._vbox_path = None
super(InventoryModule, self).__init__()
def _query_vbox_data(self, host, property_path):
ret = None
try:
cmd = [self._vbox_path, b'guestproperty', b'get',
to_bytes(host, errors='surrogate_or_strict'),
to_bytes(property_path, errors='surrogate_or_strict')]
x = Popen(cmd, stdout=PIPE)
ipinfo = to_text(x.stdout.read(), errors='surrogate_or_strict')
if 'Value' in ipinfo:
a, ip = ipinfo.split(':', 1)
ret = ip.strip()
except Exception:
pass
return ret
def _set_variables(self, hostvars):
# set vars in inventory from hostvars
for host in hostvars:
query = self.get_option('query')
# create vars from vbox properties
if query and isinstance(query, MutableMapping):
for varname in query:
hostvars[host][varname] = self._query_vbox_data(host, query[varname])
strict = self.get_option('strict')
# create composite vars
self._set_composite_vars(self.get_option('compose'), hostvars[host], host, strict=strict)
# actually update inventory
for key in hostvars[host]:
self.inventory.set_variable(host, key, hostvars[host][key])
# constructed groups based on conditionals
self._add_host_to_composed_groups(self.get_option('groups'), hostvars[host], host, strict=strict)
# constructed keyed_groups
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars[host], host, strict=strict)
def _populate_from_cache(self, source_data):
hostvars = source_data.pop('_meta', {}).get('hostvars', {})
for group in source_data:
if group == 'all':
continue
else:
group = self.inventory.add_group(group)
hosts = source_data[group].get('hosts', [])
for host in hosts:
self._populate_host_vars([host], hostvars.get(host, {}), group)
self.inventory.add_child('all', group)
if not source_data:
for host in hostvars:
self.inventory.add_host(host)
self._populate_host_vars([host], hostvars.get(host, {}))
def _populate_from_source(self, source_data, using_current_cache=False):
if using_current_cache:
self._populate_from_cache(source_data)
return source_data
cacheable_results = {'_meta': {'hostvars': {}}}
hostvars = {}
prevkey = pref_k = ''
current_host = None
# needed to possibly set ansible_host
netinfo = self.get_option('network_info_path')
for line in source_data:
line = to_text(line)
if ':' not in line:
continue
try:
k, v = line.split(':', 1)
except Exception:
# skip non splitable
continue
if k.strip() == '':
# skip empty
continue
v = v.strip()
# found host
if k.startswith('Name') and ',' not in v: # some setting strings appear in Name
current_host = v
if current_host not in hostvars:
hostvars[current_host] = {}
self.inventory.add_host(current_host)
# try to get network info
netdata = self._query_vbox_data(current_host, netinfo)
if netdata:
self.inventory.set_variable(current_host, 'ansible_host', netdata)
# found groups
elif k == 'Groups':
for group in v.split('/'):
if group:
group = self.inventory.add_group(group)
self.inventory.add_child(group, current_host)
if group not in cacheable_results:
cacheable_results[group] = {'hosts': []}
cacheable_results[group]['hosts'].append(current_host)
continue
else:
# found vars, accumulate in hostvars for clean inventory set
pref_k = 'vbox_' + k.strip().replace(' ', '_')
leading_spaces = len(k) - len(k.lstrip(' '))
if 0 < leading_spaces <= 2:
if prevkey not in hostvars[current_host] or not isinstance(hostvars[current_host][prevkey], dict):
hostvars[current_host][prevkey] = {}
hostvars[current_host][prevkey][pref_k] = v
elif leading_spaces > 2:
continue
else:
if v != '':
hostvars[current_host][pref_k] = v
if self._ungrouped_host(current_host, cacheable_results):
if 'ungrouped' not in cacheable_results:
cacheable_results['ungrouped'] = {'hosts': []}
cacheable_results['ungrouped']['hosts'].append(current_host)
prevkey = pref_k
self._set_variables(hostvars)
for host in hostvars:
h = self.inventory.get_host(host)
cacheable_results['_meta']['hostvars'][h.name] = h.vars
return cacheable_results
def _ungrouped_host(self, host, inventory):
def find_host(host, inventory):
for k, v in inventory.items():
if k == '_meta':
continue
if isinstance(v, dict):
yield self._ungrouped_host(host, v)
elif isinstance(v, list):
yield host not in v
yield True
return all(find_host(host, inventory))
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
if path.endswith(('virtualbox.yaml', 'virtualbox.yml', 'vbox.yaml', 'vbox.yml')):
valid = True
return valid
def parse(self, inventory, loader, path, cache=True):
try:
self._vbox_path = get_bin_path(self.VBOX)
except ValueError as e:
raise AnsibleParserError(e)
super(InventoryModule, self).parse(inventory, loader, path)
cache_key = self.get_cache_key(path)
config_data = self._read_config_data(path)
# set _options from config data
self._consume_options(config_data)
source_data = None
if cache:
cache = self.get_option('cache')
update_cache = False
if cache:
try:
source_data = self._cache[cache_key]
except KeyError:
update_cache = True
if not source_data:
b_pwfile = to_bytes(self.get_option('settings_password_file'), errors='surrogate_or_strict', nonstring='passthru')
running = self.get_option('running_only')
# start getting data
cmd = [self._vbox_path, b'list', b'-l']
if running:
cmd.append(b'runningvms')
else:
cmd.append(b'vms')
if b_pwfile and os.path.exists(b_pwfile):
cmd.append(b'--settingspwfile')
cmd.append(b_pwfile)
try:
p = Popen(cmd, stdout=PIPE)
except Exception as e:
raise AnsibleParserError(to_native(e))
source_data = p.stdout.read().splitlines()
using_current_cache = cache and not update_cache
cacheable_results = self._populate_from_source(source_data, using_current_cache)
if update_cache:
self._cache[cache_key] = cacheable_results

View File

@@ -0,0 +1,328 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: xen_orchestra
short_description: Xen Orchestra inventory source
version_added: 4.1.0
author:
- Dom Del Nano (@ddelnano) <ddelnano@gmail.com>
- Samori Gorse (@shinuza) <samorigorse@gmail.com>
requirements:
- websocket-client >= 1.0.0
description:
- Get inventory hosts from a Xen Orchestra deployment.
- 'Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml).'
extends_documentation_fragment:
- constructed
- inventory_cache
options:
plugin:
description: The name of this plugin, it should always be set to C(community.general.xen_orchestra) for this plugin to recognize it as its own.
required: true
choices: ['community.general.xen_orchestra']
type: str
api_host:
description:
- API host to XOA API.
- If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_HOST) will be used instead.
type: str
env:
- name: ANSIBLE_XO_HOST
user:
description:
- Xen Orchestra user.
- If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_USER) will be used instead.
required: true
type: str
env:
- name: ANSIBLE_XO_USER
password:
description:
- Xen Orchestra password.
- If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_PASSWORD) will be used instead.
required: true
type: str
env:
- name: ANSIBLE_XO_PASSWORD
validate_certs:
description: Verify TLS certificate if using HTTPS.
type: boolean
default: true
use_ssl:
description: Use wss when connecting to the Xen Orchestra API
type: boolean
default: true
'''
EXAMPLES = '''
# file must be named xen_orchestra.yaml or xen_orchestra.yml
plugin: community.general.xen_orchestra
api_host: 192.168.1.255
user: xo
password: xo_pwd
validate_certs: true
use_ssl: true
groups:
kube_nodes: "'kube_node' in tags"
compose:
ansible_port: 2222
'''
import json
import ssl
from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
# 3rd party imports
try:
HAS_WEBSOCKET = True
import websocket
from websocket import create_connection
if LooseVersion(websocket.__version__) <= LooseVersion('1.0.0'):
raise ImportError
except ImportError as e:
HAS_WEBSOCKET = False
HALTED = 'Halted'
PAUSED = 'Paused'
RUNNING = 'Running'
SUSPENDED = 'Suspended'
POWER_STATES = [RUNNING, HALTED, SUSPENDED, PAUSED]
HOST_GROUP = 'xo_hosts'
POOL_GROUP = 'xo_pools'
def clean_group_name(label):
return label.lower().replace(' ', '-').replace('-', '_')
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
''' Host inventory parser for ansible using XenOrchestra as source. '''
NAME = 'community.general.xen_orchestra'
def __init__(self):
super(InventoryModule, self).__init__()
# from config
self.counter = -1
self.session = None
self.cache_key = None
self.use_cache = None
@property
def pointer(self):
self.counter += 1
return self.counter
def create_connection(self, xoa_api_host):
validate_certs = self.get_option('validate_certs')
use_ssl = self.get_option('use_ssl')
proto = 'wss' if use_ssl else 'ws'
sslopt = None if validate_certs else {'cert_reqs': ssl.CERT_NONE}
self.conn = create_connection(
'{0}://{1}/api/'.format(proto, xoa_api_host), sslopt=sslopt)
def login(self, user, password):
payload = {'id': self.pointer, 'jsonrpc': '2.0', 'method': 'session.signIn', 'params': {
'username': user, 'password': password}}
self.conn.send(json.dumps(payload))
result = json.loads(self.conn.recv())
if 'error' in result:
raise AnsibleError(
'Could not connect: {0}'.format(result['error']))
def get_object(self, name):
payload = {'id': self.pointer, 'jsonrpc': '2.0',
'method': 'xo.getAllObjects', 'params': {'filter': {'type': name}}}
self.conn.send(json.dumps(payload))
answer = json.loads(self.conn.recv())
if 'error' in answer:
raise AnsibleError(
'Could not request: {0}'.format(answer['error']))
return answer['result']
def _get_objects(self):
self.create_connection(self.xoa_api_host)
self.login(self.xoa_user, self.xoa_password)
return {
'vms': self.get_object('VM'),
'pools': self.get_object('pool'),
'hosts': self.get_object('host'),
}
def _apply_constructable(self, name, variables):
strict = self.get_option('strict')
self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict)
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict)
self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict)
def _add_vms(self, vms, hosts, pools):
for uuid, vm in vms.items():
group = 'with_ip'
ip = vm.get('mainIpAddress')
entry_name = uuid
power_state = vm['power_state'].lower()
pool_name = self._pool_group_name_for_uuid(pools, vm['$poolId'])
host_name = self._host_group_name_for_uuid(hosts, vm['$container'])
self.inventory.add_host(entry_name)
# Grouping by power state
self.inventory.add_child(power_state, entry_name)
# Grouping by host
if host_name:
self.inventory.add_child(host_name, entry_name)
# Grouping by pool
if pool_name:
self.inventory.add_child(pool_name, entry_name)
# Grouping VMs with an IP together
if ip is None:
group = 'without_ip'
self.inventory.add_group(group)
self.inventory.add_child(group, entry_name)
# Adding meta
self.inventory.set_variable(entry_name, 'uuid', uuid)
self.inventory.set_variable(entry_name, 'ip', ip)
self.inventory.set_variable(entry_name, 'ansible_host', ip)
self.inventory.set_variable(entry_name, 'power_state', power_state)
self.inventory.set_variable(
entry_name, 'name_label', vm['name_label'])
self.inventory.set_variable(entry_name, 'type', vm['type'])
self.inventory.set_variable(
entry_name, 'cpus', vm['CPUs']['number'])
self.inventory.set_variable(entry_name, 'tags', vm['tags'])
self.inventory.set_variable(
entry_name, 'memory', vm['memory']['size'])
self.inventory.set_variable(
entry_name, 'has_ip', group == 'with_ip')
self.inventory.set_variable(
entry_name, 'is_managed', vm.get('managementAgentDetected', False))
self.inventory.set_variable(
entry_name, 'os_version', vm['os_version'])
self._apply_constructable(entry_name, self.inventory.get_host(entry_name).get_vars())
def _add_hosts(self, hosts, pools):
for host in hosts.values():
entry_name = host['uuid']
group_name = 'xo_host_{0}'.format(
clean_group_name(host['name_label']))
pool_name = self._pool_group_name_for_uuid(pools, host['$poolId'])
self.inventory.add_group(group_name)
self.inventory.add_host(entry_name)
self.inventory.add_child(HOST_GROUP, entry_name)
self.inventory.add_child(pool_name, entry_name)
self.inventory.set_variable(entry_name, 'enabled', host['enabled'])
self.inventory.set_variable(
entry_name, 'hostname', host['hostname'])
self.inventory.set_variable(entry_name, 'memory', host['memory'])
self.inventory.set_variable(entry_name, 'address', host['address'])
self.inventory.set_variable(entry_name, 'cpus', host['cpus'])
self.inventory.set_variable(entry_name, 'type', 'host')
self.inventory.set_variable(entry_name, 'tags', host['tags'])
self.inventory.set_variable(entry_name, 'version', host['version'])
self.inventory.set_variable(
entry_name, 'power_state', host['power_state'].lower())
self.inventory.set_variable(
entry_name, 'product_brand', host['productBrand'])
for pool in pools.values():
group_name = 'xo_pool_{0}'.format(
clean_group_name(pool['name_label']))
self.inventory.add_group(group_name)
def _add_pools(self, pools):
for pool in pools.values():
group_name = 'xo_pool_{0}'.format(
clean_group_name(pool['name_label']))
self.inventory.add_group(group_name)
# TODO: Refactor
def _pool_group_name_for_uuid(self, pools, pool_uuid):
for pool in pools:
if pool == pool_uuid:
return 'xo_pool_{0}'.format(
clean_group_name(pools[pool_uuid]['name_label']))
# TODO: Refactor
def _host_group_name_for_uuid(self, hosts, host_uuid):
for host in hosts:
if host == host_uuid:
return 'xo_host_{0}'.format(
clean_group_name(hosts[host_uuid]['name_label']
))
def _populate(self, objects):
# Prepare general groups
self.inventory.add_group(HOST_GROUP)
self.inventory.add_group(POOL_GROUP)
for group in POWER_STATES:
self.inventory.add_group(group.lower())
self._add_pools(objects['pools'])
self._add_hosts(objects['hosts'], objects['pools'])
self._add_vms(objects['vms'], objects['hosts'], objects['pools'])
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
if path.endswith(('xen_orchestra.yaml', 'xen_orchestra.yml')):
valid = True
else:
self.display.vvv(
'Skipping due to inventory source not ending in "xen_orchestra.yaml" nor "xen_orchestra.yml"')
return valid
def parse(self, inventory, loader, path, cache=True):
if not HAS_WEBSOCKET:
raise AnsibleError('This plugin requires websocket-client 1.0.0 or higher: '
'https://github.com/websocket-client/websocket-client.')
super(InventoryModule, self).parse(inventory, loader, path)
# read config from file, this sets 'options'
self._read_config_data(path)
self.inventory = inventory
self.protocol = 'wss'
self.xoa_api_host = self.get_option('api_host')
self.xoa_user = self.get_option('user')
self.xoa_password = self.get_option('password')
self.cache_key = self.get_cache_key(path)
self.use_cache = cache and self.get_option('cache')
self.validate_certs = self.get_option('validate_certs')
if not self.get_option('use_ssl'):
self.protocol = 'ws'
objects = self._get_objects()
self._populate(objects)