V1
This commit is contained in:
@@ -0,0 +1,522 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""short_description: Check or wait for migrations between nodes"""
|
||||
|
||||
# Copyright (c) 2018, Albert Autin
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: aerospike_migrations
|
||||
short_description: Check or wait for migrations between nodes
|
||||
description:
|
||||
- This can be used to check for migrations in a cluster.
|
||||
This makes it easy to do a rolling upgrade/update on Aerospike nodes.
|
||||
- If waiting for migrations is not desired, simply just poll until
|
||||
port 3000 if available or asinfo -v status returns ok
|
||||
author: "Albert Autin (@Alb0t)"
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
- Which host do we use as seed for info connection
|
||||
required: false
|
||||
type: str
|
||||
default: localhost
|
||||
port:
|
||||
description:
|
||||
- Which port to connect to Aerospike on (service port)
|
||||
required: false
|
||||
type: int
|
||||
default: 3000
|
||||
connect_timeout:
|
||||
description:
|
||||
- How long to try to connect before giving up (milliseconds)
|
||||
required: false
|
||||
type: int
|
||||
default: 1000
|
||||
consecutive_good_checks:
|
||||
description:
|
||||
- How many times should the cluster report "no migrations"
|
||||
consecutively before returning OK back to ansible?
|
||||
required: false
|
||||
type: int
|
||||
default: 3
|
||||
sleep_between_checks:
|
||||
description:
|
||||
- How long to sleep between each check (seconds).
|
||||
required: false
|
||||
type: int
|
||||
default: 60
|
||||
tries_limit:
|
||||
description:
|
||||
- How many times do we poll before giving up and failing?
|
||||
default: 300
|
||||
required: false
|
||||
type: int
|
||||
local_only:
|
||||
description:
|
||||
- Do you wish to only check for migrations on the local node
|
||||
before returning, or do you want all nodes in the cluster
|
||||
to finish before returning?
|
||||
required: true
|
||||
type: bool
|
||||
min_cluster_size:
|
||||
description:
|
||||
- Check will return bad until cluster size is met
|
||||
or until tries is exhausted
|
||||
required: false
|
||||
type: int
|
||||
default: 1
|
||||
fail_on_cluster_change:
|
||||
description:
|
||||
- Fail if the cluster key changes
|
||||
if something else is changing the cluster, we may want to fail
|
||||
required: false
|
||||
type: bool
|
||||
default: true
|
||||
migrate_tx_key:
|
||||
description:
|
||||
- The metric key used to determine if we have tx migrations
|
||||
remaining. Changeable due to backwards compatibility.
|
||||
required: false
|
||||
type: str
|
||||
default: migrate_tx_partitions_remaining
|
||||
migrate_rx_key:
|
||||
description:
|
||||
- The metric key used to determine if we have rx migrations
|
||||
remaining. Changeable due to backwards compatibility.
|
||||
required: false
|
||||
type: str
|
||||
default: migrate_rx_partitions_remaining
|
||||
target_cluster_size:
|
||||
description:
|
||||
- When all aerospike builds in the cluster are greater than
|
||||
version 4.3, then the C(cluster-stable) info command will be used.
|
||||
Inside this command, you can optionally specify what the target
|
||||
cluster size is - but it is not necessary. You can still rely on
|
||||
min_cluster_size if you don't want to use this option.
|
||||
- If this option is specified on a cluster that has at least 1
|
||||
host <4.3 then it will be ignored until the min version reaches
|
||||
4.3.
|
||||
required: false
|
||||
type: int
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
# check for migrations on local node
|
||||
- name: Wait for migrations on local node before proceeding
|
||||
community.general.aerospike_migrations:
|
||||
host: "localhost"
|
||||
connect_timeout: 2000
|
||||
consecutive_good_checks: 5
|
||||
sleep_between_checks: 15
|
||||
tries_limit: 600
|
||||
local_only: false
|
||||
|
||||
# example playbook:
|
||||
- name: Upgrade aerospike
|
||||
hosts: all
|
||||
become: true
|
||||
serial: 1
|
||||
tasks:
|
||||
- name: Install dependencies
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- python
|
||||
- python-pip
|
||||
- python-setuptools
|
||||
state: latest
|
||||
- name: Setup aerospike
|
||||
ansible.builtin.pip:
|
||||
name: aerospike
|
||||
# check for migrations every (sleep_between_checks)
|
||||
# If at least (consecutive_good_checks) checks come back OK in a row, then return OK.
|
||||
# Will exit if any exception, which can be caused by bad nodes,
|
||||
# nodes not returning data, or other reasons.
|
||||
# Maximum runtime before giving up in this case will be:
|
||||
# Tries Limit * Sleep Between Checks * delay * retries
|
||||
- name: Wait for aerospike migrations
|
||||
community.general.aerospike_migrations:
|
||||
local_only: true
|
||||
sleep_between_checks: 1
|
||||
tries_limit: 5
|
||||
consecutive_good_checks: 3
|
||||
fail_on_cluster_change: true
|
||||
min_cluster_size: 3
|
||||
target_cluster_size: 4
|
||||
register: migrations_check
|
||||
until: migrations_check is succeeded
|
||||
changed_when: false
|
||||
delay: 60
|
||||
retries: 120
|
||||
- name: Another thing
|
||||
ansible.builtin.shell: |
|
||||
echo foo
|
||||
- name: Reboot
|
||||
ansible.builtin.reboot:
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
# Returns only a success/failure result. Changed is always false.
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
LIB_FOUND_ERR = None
|
||||
try:
|
||||
import aerospike
|
||||
from time import sleep
|
||||
import re
|
||||
except ImportError as ie:
|
||||
LIB_FOUND = False
|
||||
LIB_FOUND_ERR = traceback.format_exc()
|
||||
else:
|
||||
LIB_FOUND = True
|
||||
|
||||
|
||||
def run_module():
|
||||
"""run ansible module"""
|
||||
module_args = dict(
|
||||
host=dict(type='str', required=False, default='localhost'),
|
||||
port=dict(type='int', required=False, default=3000),
|
||||
connect_timeout=dict(type='int', required=False, default=1000),
|
||||
consecutive_good_checks=dict(type='int', required=False, default=3),
|
||||
sleep_between_checks=dict(type='int', required=False, default=60),
|
||||
tries_limit=dict(type='int', required=False, default=300),
|
||||
local_only=dict(type='bool', required=True),
|
||||
min_cluster_size=dict(type='int', required=False, default=1),
|
||||
target_cluster_size=dict(type='int', required=False, default=None),
|
||||
fail_on_cluster_change=dict(type='bool', required=False, default=True),
|
||||
migrate_tx_key=dict(type='str', required=False, no_log=False,
|
||||
default="migrate_tx_partitions_remaining"),
|
||||
migrate_rx_key=dict(type='str', required=False, no_log=False,
|
||||
default="migrate_rx_partitions_remaining")
|
||||
)
|
||||
|
||||
result = dict(
|
||||
changed=False,
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True
|
||||
)
|
||||
if not LIB_FOUND:
|
||||
module.fail_json(msg=missing_required_lib('aerospike'),
|
||||
exception=LIB_FOUND_ERR)
|
||||
|
||||
try:
|
||||
if module.check_mode:
|
||||
has_migrations, skip_reason = False, None
|
||||
else:
|
||||
migrations = Migrations(module)
|
||||
has_migrations, skip_reason = migrations.has_migs(
|
||||
module.params['local_only']
|
||||
)
|
||||
|
||||
if has_migrations:
|
||||
module.fail_json(msg="Failed.", skip_reason=skip_reason)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Error: {0}".format(e))
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
class Migrations:
|
||||
""" Check or wait for migrations between nodes """
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self._client = self._create_client().connect()
|
||||
self._nodes = {}
|
||||
self._update_nodes_list()
|
||||
self._cluster_statistics = {}
|
||||
self._update_cluster_statistics()
|
||||
self._namespaces = set()
|
||||
self._update_cluster_namespace_list()
|
||||
self._build_list = set()
|
||||
self._update_build_list()
|
||||
self._start_cluster_key = \
|
||||
self._cluster_statistics[self._nodes[0]]['cluster_key']
|
||||
|
||||
def _create_client(self):
|
||||
""" TODO: add support for auth, tls, and other special features
|
||||
I won't use those features, so I'll wait until somebody complains
|
||||
or does it for me (Cross fingers)
|
||||
create the client object"""
|
||||
config = {
|
||||
'hosts': [
|
||||
(self.module.params['host'], self.module.params['port'])
|
||||
],
|
||||
'policies': {
|
||||
'timeout': self.module.params['connect_timeout']
|
||||
}
|
||||
}
|
||||
return aerospike.client(config)
|
||||
|
||||
def _info_cmd_helper(self, cmd, node=None, delimiter=';'):
|
||||
"""delimiter is for separate stats that come back, NOT for kv
|
||||
separation which is ="""
|
||||
if node is None: # If no node passed, use the first one (local)
|
||||
node = self._nodes[0]
|
||||
data = self._client.info_node(cmd, node)
|
||||
data = data.split("\t")
|
||||
if len(data) != 1 and len(data) != 2:
|
||||
self.module.fail_json(
|
||||
msg="Unexpected number of values returned in info command: " +
|
||||
str(len(data))
|
||||
)
|
||||
# data will be in format 'command\touput'
|
||||
data = data[-1]
|
||||
data = data.rstrip("\n\r")
|
||||
data_arr = data.split(delimiter)
|
||||
|
||||
# some commands don't return in kv format
|
||||
# so we dont want a dict from those.
|
||||
if '=' in data:
|
||||
retval = dict(
|
||||
metric.split("=", 1) for metric in data_arr
|
||||
)
|
||||
else:
|
||||
# if only 1 element found, and not kv, return just the value.
|
||||
if len(data_arr) == 1:
|
||||
retval = data_arr[0]
|
||||
else:
|
||||
retval = data_arr
|
||||
return retval
|
||||
|
||||
def _update_build_list(self):
|
||||
"""creates self._build_list which is a unique list
|
||||
of build versions."""
|
||||
self._build_list = set()
|
||||
for node in self._nodes:
|
||||
build = self._info_cmd_helper('build', node)
|
||||
self._build_list.add(build)
|
||||
|
||||
# just checks to see if the version is 4.3 or greater
|
||||
def _can_use_cluster_stable(self):
|
||||
# if version <4.3 we can't use cluster-stable info cmd
|
||||
# regex hack to check for versions beginning with 0-3 or
|
||||
# beginning with 4.0,4.1,4.2
|
||||
if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _update_cluster_namespace_list(self):
|
||||
""" make a unique list of namespaces
|
||||
TODO: does this work on a rolling namespace add/deletion?
|
||||
thankfully if it doesn't, we dont need this on builds >=4.3"""
|
||||
self._namespaces = set()
|
||||
for node in self._nodes:
|
||||
namespaces = self._info_cmd_helper('namespaces', node)
|
||||
for namespace in namespaces:
|
||||
self._namespaces.add(namespace)
|
||||
|
||||
def _update_cluster_statistics(self):
|
||||
"""create a dict of nodes with their related stats """
|
||||
self._cluster_statistics = {}
|
||||
for node in self._nodes:
|
||||
self._cluster_statistics[node] = \
|
||||
self._info_cmd_helper('statistics', node)
|
||||
|
||||
def _update_nodes_list(self):
|
||||
"""get a fresh list of all the nodes"""
|
||||
self._nodes = self._client.get_nodes()
|
||||
if not self._nodes:
|
||||
self.module.fail_json("Failed to retrieve at least 1 node.")
|
||||
|
||||
def _namespace_has_migs(self, namespace, node=None):
|
||||
"""returns a True or False.
|
||||
Does the namespace have migrations for the node passed?
|
||||
If no node passed, uses the local node or the first one in the list"""
|
||||
namespace_stats = self._info_cmd_helper("namespace/" + namespace, node)
|
||||
try:
|
||||
namespace_tx = \
|
||||
int(namespace_stats[self.module.params['migrate_tx_key']])
|
||||
namespace_rx = \
|
||||
int(namespace_stats[self.module.params['migrate_rx_key']])
|
||||
except KeyError:
|
||||
self.module.fail_json(
|
||||
msg="Did not find partition remaining key:" +
|
||||
self.module.params['migrate_tx_key'] +
|
||||
" or key:" +
|
||||
self.module.params['migrate_rx_key'] +
|
||||
" in 'namespace/" +
|
||||
namespace +
|
||||
"' output."
|
||||
)
|
||||
except TypeError:
|
||||
self.module.fail_json(
|
||||
msg="namespace stat returned was not numerical"
|
||||
)
|
||||
return namespace_tx != 0 or namespace_rx != 0
|
||||
|
||||
def _node_has_migs(self, node=None):
|
||||
"""just calls namespace_has_migs and
|
||||
if any namespace has migs returns true"""
|
||||
migs = 0
|
||||
self._update_cluster_namespace_list()
|
||||
for namespace in self._namespaces:
|
||||
if self._namespace_has_migs(namespace, node):
|
||||
migs += 1
|
||||
return migs != 0
|
||||
|
||||
def _cluster_key_consistent(self):
|
||||
"""create a dictionary to store what each node
|
||||
returns the cluster key as. we should end up with only 1 dict key,
|
||||
with the key being the cluster key."""
|
||||
cluster_keys = {}
|
||||
for node in self._nodes:
|
||||
cluster_key = self._cluster_statistics[node][
|
||||
'cluster_key']
|
||||
if cluster_key not in cluster_keys:
|
||||
cluster_keys[cluster_key] = 1
|
||||
else:
|
||||
cluster_keys[cluster_key] += 1
|
||||
if len(cluster_keys.keys()) == 1 and \
|
||||
self._start_cluster_key in cluster_keys:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _cluster_migrates_allowed(self):
|
||||
"""ensure all nodes have 'migrate_allowed' in their stats output"""
|
||||
for node in self._nodes:
|
||||
node_stats = self._info_cmd_helper('statistics', node)
|
||||
allowed = node_stats['migrate_allowed']
|
||||
if allowed == "false":
|
||||
return False
|
||||
return True
|
||||
|
||||
def _cluster_has_migs(self):
|
||||
"""calls node_has_migs for each node"""
|
||||
migs = 0
|
||||
for node in self._nodes:
|
||||
if self._node_has_migs(node):
|
||||
migs += 1
|
||||
if migs == 0:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _has_migs(self, local):
|
||||
if local:
|
||||
return self._local_node_has_migs()
|
||||
return self._cluster_has_migs()
|
||||
|
||||
def _local_node_has_migs(self):
|
||||
return self._node_has_migs(None)
|
||||
|
||||
def _is_min_cluster_size(self):
|
||||
"""checks that all nodes in the cluster are returning the
|
||||
minimum cluster size specified in their statistics output"""
|
||||
sizes = set()
|
||||
for node in self._cluster_statistics:
|
||||
sizes.add(int(self._cluster_statistics[node]['cluster_size']))
|
||||
|
||||
if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no
|
||||
return False
|
||||
if (min(sizes)) >= self.module.params['min_cluster_size']:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _cluster_stable(self):
|
||||
"""Added 4.3:
|
||||
cluster-stable:size=<target-cluster-size>;ignore-migrations=<yes/no>;namespace=<namespace-name>
|
||||
Returns the current 'cluster_key' when the following are satisfied:
|
||||
|
||||
If 'size' is specified then the target node's 'cluster-size'
|
||||
must match size.
|
||||
If 'ignore-migrations' is either unspecified or 'false' then
|
||||
the target node's migrations counts must be zero for the provided
|
||||
'namespace' or all namespaces if 'namespace' is not provided."""
|
||||
cluster_key = set()
|
||||
cluster_key.add(self._info_cmd_helper('statistics')['cluster_key'])
|
||||
cmd = "cluster-stable:"
|
||||
target_cluster_size = self.module.params['target_cluster_size']
|
||||
if target_cluster_size is not None:
|
||||
cmd = cmd + "size=" + str(target_cluster_size) + ";"
|
||||
for node in self._nodes:
|
||||
try:
|
||||
cluster_key.add(self._info_cmd_helper(cmd, node))
|
||||
except aerospike.exception.ServerError as e: # unstable-cluster is returned in form of Exception
|
||||
if 'unstable-cluster' in e.msg:
|
||||
return False
|
||||
raise e
|
||||
if len(cluster_key) == 1:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _cluster_good_state(self):
|
||||
"""checks a few things to make sure we're OK to say the cluster
|
||||
has no migs. It could be in a unhealthy condition that does not allow
|
||||
migs, or a split brain"""
|
||||
if self._cluster_key_consistent() is not True:
|
||||
return False, "Cluster key inconsistent."
|
||||
if self._is_min_cluster_size() is not True:
|
||||
return False, "Cluster min size not reached."
|
||||
if self._cluster_migrates_allowed() is not True:
|
||||
return False, "migrate_allowed is false somewhere."
|
||||
return True, "OK."
|
||||
|
||||
def has_migs(self, local=True):
|
||||
"""returns a boolean, False if no migrations otherwise True"""
|
||||
consecutive_good = 0
|
||||
try_num = 0
|
||||
skip_reason = list()
|
||||
while \
|
||||
try_num < int(self.module.params['tries_limit']) and \
|
||||
consecutive_good < \
|
||||
int(self.module.params['consecutive_good_checks']):
|
||||
|
||||
self._update_nodes_list()
|
||||
self._update_cluster_statistics()
|
||||
|
||||
# These checks are outside of the while loop because
|
||||
# we probably want to skip & sleep instead of failing entirely
|
||||
stable, reason = self._cluster_good_state()
|
||||
if stable is not True:
|
||||
skip_reason.append(
|
||||
"Skipping on try#" + str(try_num) +
|
||||
" for reason:" + reason
|
||||
)
|
||||
else:
|
||||
if self._can_use_cluster_stable():
|
||||
if self._cluster_stable():
|
||||
consecutive_good += 1
|
||||
else:
|
||||
consecutive_good = 0
|
||||
skip_reason.append(
|
||||
"Skipping on try#" + str(try_num) +
|
||||
" for reason:" + " cluster_stable"
|
||||
)
|
||||
elif self._has_migs(local):
|
||||
# print("_has_migs")
|
||||
skip_reason.append(
|
||||
"Skipping on try#" + str(try_num) +
|
||||
" for reason:" + " migrations"
|
||||
)
|
||||
consecutive_good = 0
|
||||
else:
|
||||
consecutive_good += 1
|
||||
if consecutive_good == self.module.params[
|
||||
'consecutive_good_checks']:
|
||||
break
|
||||
try_num += 1
|
||||
sleep(self.module.params['sleep_between_checks'])
|
||||
# print(skip_reason)
|
||||
if consecutive_good == self.module.params['consecutive_good_checks']:
|
||||
return False, None
|
||||
return True, skip_reason
|
||||
|
||||
|
||||
def main():
|
||||
"""main method for ansible module"""
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,162 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2013 Bruce Pennypacker <bruce@pennypacker.org>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: airbrake_deployment
|
||||
author:
|
||||
- "Bruce Pennypacker (@bpennypacker)"
|
||||
- "Patrick Humpal (@phumpal)"
|
||||
short_description: Notify airbrake about app deployments
|
||||
description:
|
||||
- Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)).
|
||||
options:
|
||||
project_id:
|
||||
description:
|
||||
- Airbrake PROJECT_ID
|
||||
required: true
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
project_key:
|
||||
description:
|
||||
- Airbrake PROJECT_KEY.
|
||||
required: true
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
environment:
|
||||
description:
|
||||
- The airbrake environment name, typically 'production', 'staging', etc.
|
||||
required: true
|
||||
type: str
|
||||
user:
|
||||
description:
|
||||
- The username of the person doing the deployment
|
||||
required: false
|
||||
type: str
|
||||
repo:
|
||||
description:
|
||||
- URL of the project repository
|
||||
required: false
|
||||
type: str
|
||||
revision:
|
||||
description:
|
||||
- A hash, number, tag, or other identifier showing what revision from version control was deployed
|
||||
required: false
|
||||
type: str
|
||||
version:
|
||||
description:
|
||||
- A string identifying what version was deployed
|
||||
required: false
|
||||
type: str
|
||||
version_added: '1.0.0'
|
||||
url:
|
||||
description:
|
||||
- Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
|
||||
required: false
|
||||
default: "https://api.airbrake.io/api/v4/projects/"
|
||||
type: str
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(false), SSL certificates for the target url will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: true
|
||||
type: bool
|
||||
|
||||
requirements: []
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Notify airbrake about an app deployment
|
||||
community.general.airbrake_deployment:
|
||||
project_id: '12345'
|
||||
project_key: 'AAAAAA'
|
||||
environment: staging
|
||||
user: ansible
|
||||
revision: '4.2'
|
||||
|
||||
- name: Notify airbrake about an app deployment, using git hash as revision
|
||||
community.general.airbrake_deployment:
|
||||
project_id: '12345'
|
||||
project_key: 'AAAAAA'
|
||||
environment: staging
|
||||
user: ansible
|
||||
revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15'
|
||||
version: '0.2.0'
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
project_id=dict(required=True, no_log=True, type='str'),
|
||||
project_key=dict(required=True, no_log=True, type='str'),
|
||||
environment=dict(required=True, type='str'),
|
||||
user=dict(required=False, type='str'),
|
||||
repo=dict(required=False, type='str'),
|
||||
revision=dict(required=False, type='str'),
|
||||
version=dict(required=False, type='str'),
|
||||
url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
# Build list of params
|
||||
params = {}
|
||||
|
||||
# If we're in check mode, just exit pretending like we succeeded
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
# v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4
|
||||
if module.params["environment"]:
|
||||
params["environment"] = module.params["environment"]
|
||||
|
||||
if module.params["user"]:
|
||||
params["username"] = module.params["user"]
|
||||
|
||||
if module.params["repo"]:
|
||||
params["repository"] = module.params["repo"]
|
||||
|
||||
if module.params["revision"]:
|
||||
params["revision"] = module.params["revision"]
|
||||
|
||||
if module.params["version"]:
|
||||
params["version"] = module.params["version"]
|
||||
|
||||
# Build deploy url
|
||||
url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"]
|
||||
json_body = module.jsonify(params)
|
||||
|
||||
# Build header
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
|
||||
# Notify Airbrake of deploy
|
||||
response, info = fetch_url(module, url, data=json_body,
|
||||
headers=headers, method='POST')
|
||||
|
||||
if info['status'] == 200 or info['status'] == 201:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,370 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017, 2018 Kairo Araujo <kairo@kairo.eti.br>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
author:
|
||||
- Kairo Araujo (@kairoaraujo)
|
||||
module: aix_devices
|
||||
short_description: Manages AIX devices
|
||||
description:
|
||||
- This module discovers, defines, removes and modifies attributes of AIX devices.
|
||||
options:
|
||||
attributes:
|
||||
description:
|
||||
- A list of device attributes.
|
||||
type: dict
|
||||
device:
|
||||
description:
|
||||
- The name of the device.
|
||||
- C(all) is valid to rescan C(available) all devices (AIX cfgmgr command).
|
||||
type: str
|
||||
force:
|
||||
description:
|
||||
- Forces action.
|
||||
type: bool
|
||||
default: false
|
||||
recursive:
|
||||
description:
|
||||
- Removes or defines a device and children devices.
|
||||
type: bool
|
||||
default: false
|
||||
state:
|
||||
description:
|
||||
- Controls the device state.
|
||||
- C(available) (alias C(present)) rescan a specific device or all devices (when C(device) is not specified).
|
||||
- C(removed) (alias C(absent) removes a device.
|
||||
- C(defined) changes device to Defined state.
|
||||
type: str
|
||||
choices: [ available, defined, removed ]
|
||||
default: available
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Scan new devices
|
||||
community.general.aix_devices:
|
||||
device: all
|
||||
state: available
|
||||
|
||||
- name: Scan new virtual devices (vio0)
|
||||
community.general.aix_devices:
|
||||
device: vio0
|
||||
state: available
|
||||
|
||||
- name: Removing IP alias to en0
|
||||
community.general.aix_devices:
|
||||
device: en0
|
||||
attributes:
|
||||
delalias4: 10.0.0.100,255.255.255.0
|
||||
|
||||
- name: Removes ent2
|
||||
community.general.aix_devices:
|
||||
device: ent2
|
||||
state: removed
|
||||
|
||||
- name: Put device en2 in Defined
|
||||
community.general.aix_devices:
|
||||
device: en2
|
||||
state: defined
|
||||
|
||||
- name: Removes ent4 (inexistent).
|
||||
community.general.aix_devices:
|
||||
device: ent4
|
||||
state: removed
|
||||
|
||||
- name: Put device en4 in Defined (inexistent)
|
||||
community.general.aix_devices:
|
||||
device: en4
|
||||
state: defined
|
||||
|
||||
- name: Put vscsi1 and children devices in Defined state.
|
||||
community.general.aix_devices:
|
||||
device: vscsi1
|
||||
recursive: true
|
||||
state: defined
|
||||
|
||||
- name: Removes vscsi1 and children devices.
|
||||
community.general.aix_devices:
|
||||
device: vscsi1
|
||||
recursive: true
|
||||
state: removed
|
||||
|
||||
- name: Changes en1 mtu to 9000 and disables arp.
|
||||
community.general.aix_devices:
|
||||
device: en1
|
||||
attributes:
|
||||
mtu: 900
|
||||
arp: off
|
||||
state: available
|
||||
|
||||
- name: Configure IP, netmask and set en1 up.
|
||||
community.general.aix_devices:
|
||||
device: en1
|
||||
attributes:
|
||||
netaddr: 192.168.0.100
|
||||
netmask: 255.255.255.0
|
||||
state: up
|
||||
state: available
|
||||
|
||||
- name: Adding IP alias to en0
|
||||
community.general.aix_devices:
|
||||
device: en0
|
||||
attributes:
|
||||
alias4: 10.0.0.100,255.255.255.0
|
||||
state: available
|
||||
'''
|
||||
|
||||
RETURN = r''' # '''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def _check_device(module, device):
|
||||
"""
|
||||
Check if device already exists and the state.
|
||||
Args:
|
||||
module: Ansible module.
|
||||
device: device to be checked.
|
||||
|
||||
Returns: bool, device state
|
||||
|
||||
"""
|
||||
lsdev_cmd = module.get_bin_path('lsdev', True)
|
||||
rc, lsdev_out, err = module.run_command(["%s" % lsdev_cmd, '-C', '-l', "%s" % device])
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed to run lsdev", rc=rc, err=err)
|
||||
|
||||
if lsdev_out:
|
||||
device_state = lsdev_out.split()[1]
|
||||
return True, device_state
|
||||
|
||||
device_state = None
|
||||
return False, device_state
|
||||
|
||||
|
||||
def _check_device_attr(module, device, attr):
|
||||
"""
|
||||
|
||||
Args:
|
||||
module: Ansible module.
|
||||
device: device to check attributes.
|
||||
attr: attribute to be checked.
|
||||
|
||||
Returns:
|
||||
|
||||
"""
|
||||
lsattr_cmd = module.get_bin_path('lsattr', True)
|
||||
rc, lsattr_out, err = module.run_command(["%s" % lsattr_cmd, '-El', "%s" % device, '-a', "%s" % attr])
|
||||
|
||||
hidden_attrs = ['delalias4', 'delalias6']
|
||||
|
||||
if rc == 255:
|
||||
|
||||
if attr in hidden_attrs:
|
||||
current_param = ''
|
||||
else:
|
||||
current_param = None
|
||||
|
||||
return current_param
|
||||
|
||||
elif rc != 0:
|
||||
module.fail_json(msg="Failed to run lsattr: %s" % err, rc=rc, err=err)
|
||||
|
||||
current_param = lsattr_out.split()[1]
|
||||
return current_param
|
||||
|
||||
|
||||
def discover_device(module, device):
|
||||
""" Discover AIX devices."""
|
||||
cfgmgr_cmd = module.get_bin_path('cfgmgr', True)
|
||||
|
||||
if device is not None:
|
||||
device = "-l %s" % device
|
||||
|
||||
else:
|
||||
device = ''
|
||||
|
||||
changed = True
|
||||
msg = ''
|
||||
if not module.check_mode:
|
||||
rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device])
|
||||
changed = True
|
||||
msg = cfgmgr_out
|
||||
|
||||
return changed, msg
|
||||
|
||||
|
||||
def change_device_attr(module, attributes, device, force):
|
||||
""" Change AIX device attribute. """
|
||||
|
||||
attr_changed = []
|
||||
attr_not_changed = []
|
||||
attr_invalid = []
|
||||
chdev_cmd = module.get_bin_path('chdev', True)
|
||||
|
||||
for attr in list(attributes.keys()):
|
||||
new_param = attributes[attr]
|
||||
current_param = _check_device_attr(module, device, attr)
|
||||
|
||||
if current_param is None:
|
||||
attr_invalid.append(attr)
|
||||
|
||||
elif current_param != new_param:
|
||||
if force:
|
||||
cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr]), "%s" % force]
|
||||
else:
|
||||
cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr])]
|
||||
|
||||
if not module.check_mode:
|
||||
rc, chdev_out, err = module.run_command(cmd)
|
||||
if rc != 0:
|
||||
module.exit_json(msg="Failed to run chdev.", rc=rc, err=err)
|
||||
|
||||
attr_changed.append(attributes[attr])
|
||||
else:
|
||||
attr_not_changed.append(attributes[attr])
|
||||
|
||||
if len(attr_changed) > 0:
|
||||
changed = True
|
||||
attr_changed_msg = "Attributes changed: %s. " % ','.join(attr_changed)
|
||||
else:
|
||||
changed = False
|
||||
attr_changed_msg = ''
|
||||
|
||||
if len(attr_not_changed) > 0:
|
||||
attr_not_changed_msg = "Attributes already set: %s. " % ','.join(attr_not_changed)
|
||||
else:
|
||||
attr_not_changed_msg = ''
|
||||
|
||||
if len(attr_invalid) > 0:
|
||||
attr_invalid_msg = "Invalid attributes: %s " % ', '.join(attr_invalid)
|
||||
else:
|
||||
attr_invalid_msg = ''
|
||||
|
||||
msg = "%s%s%s" % (attr_changed_msg, attr_not_changed_msg, attr_invalid_msg)
|
||||
|
||||
return changed, msg
|
||||
|
||||
|
||||
def remove_device(module, device, force, recursive, state):
|
||||
""" Puts device in defined state or removes device. """
|
||||
|
||||
state_opt = {
|
||||
'removed': '-d',
|
||||
'absent': '-d',
|
||||
'defined': ''
|
||||
}
|
||||
|
||||
recursive_opt = {
|
||||
True: '-R',
|
||||
False: ''
|
||||
}
|
||||
|
||||
recursive = recursive_opt[recursive]
|
||||
state = state_opt[state]
|
||||
|
||||
changed = True
|
||||
msg = ''
|
||||
rmdev_cmd = module.get_bin_path('rmdev', True)
|
||||
|
||||
if not module.check_mode:
|
||||
if state:
|
||||
rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive, "%s" % force])
|
||||
else:
|
||||
rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive])
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed to run rmdev", rc=rc, err=err)
|
||||
|
||||
msg = rmdev_out
|
||||
|
||||
return changed, msg
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
attributes=dict(type='dict'),
|
||||
device=dict(type='str'),
|
||||
force=dict(type='bool', default=False),
|
||||
recursive=dict(type='bool', default=False),
|
||||
state=dict(type='str', default='available', choices=['available', 'defined', 'removed']),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
force_opt = {
|
||||
True: '-f',
|
||||
False: '',
|
||||
}
|
||||
|
||||
attributes = module.params['attributes']
|
||||
device = module.params['device']
|
||||
force = force_opt[module.params['force']]
|
||||
recursive = module.params['recursive']
|
||||
state = module.params['state']
|
||||
|
||||
result = dict(
|
||||
changed=False,
|
||||
msg='',
|
||||
)
|
||||
|
||||
if state == 'available' or state == 'present':
|
||||
if attributes:
|
||||
# change attributes on device
|
||||
device_status, device_state = _check_device(module, device)
|
||||
if device_status:
|
||||
result['changed'], result['msg'] = change_device_attr(module, attributes, device, force)
|
||||
else:
|
||||
result['msg'] = "Device %s does not exist." % device
|
||||
|
||||
else:
|
||||
# discovery devices (cfgmgr)
|
||||
if device and device != 'all':
|
||||
device_status, device_state = _check_device(module, device)
|
||||
if device_status:
|
||||
# run cfgmgr on specific device
|
||||
result['changed'], result['msg'] = discover_device(module, device)
|
||||
|
||||
else:
|
||||
result['msg'] = "Device %s does not exist." % device
|
||||
|
||||
else:
|
||||
result['changed'], result['msg'] = discover_device(module, device)
|
||||
|
||||
elif state == 'removed' or state == 'absent' or state == 'defined':
|
||||
if not device:
|
||||
result['msg'] = "device is required to removed or defined state."
|
||||
|
||||
else:
|
||||
# Remove device
|
||||
check_device, device_state = _check_device(module, device)
|
||||
if check_device:
|
||||
if state == 'defined' and device_state == 'Defined':
|
||||
result['changed'] = False
|
||||
result['msg'] = 'Device %s already in Defined' % device
|
||||
|
||||
else:
|
||||
result['changed'], result['msg'] = remove_device(module, device, force, recursive, state)
|
||||
|
||||
else:
|
||||
result['msg'] = "Device %s does not exist." % device
|
||||
|
||||
else:
|
||||
result['msg'] = "Unexpected state %s." % state
|
||||
module.fail_json(**result)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,566 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
author:
|
||||
- Kairo Araujo (@kairoaraujo)
|
||||
module: aix_filesystem
|
||||
short_description: Configure LVM and NFS file systems for AIX
|
||||
description:
|
||||
- This module creates, removes, mount and unmount LVM and NFS file system for
|
||||
AIX using C(/etc/filesystems).
|
||||
- For LVM file systems is possible to resize a file system.
|
||||
options:
|
||||
account_subsystem:
|
||||
description:
|
||||
- Specifies whether the file system is to be processed by the accounting subsystem.
|
||||
type: bool
|
||||
default: false
|
||||
attributes:
|
||||
description:
|
||||
- Specifies attributes for files system separated by comma.
|
||||
type: list
|
||||
elements: str
|
||||
default:
|
||||
- agblksize='4096'
|
||||
- isnapshot='no'
|
||||
auto_mount:
|
||||
description:
|
||||
- File system is automatically mounted at system restart.
|
||||
type: bool
|
||||
default: true
|
||||
device:
|
||||
description:
|
||||
- Logical volume (LV) device name or remote export device to create a NFS file system.
|
||||
- It is used to create a file system on an already existing logical volume or the exported NFS file system.
|
||||
- If not mentioned a new logical volume name will be created following AIX standards (LVM).
|
||||
type: str
|
||||
fs_type:
|
||||
description:
|
||||
- Specifies the virtual file system type.
|
||||
type: str
|
||||
default: jfs2
|
||||
permissions:
|
||||
description:
|
||||
- Set file system permissions. C(rw) (read-write) or C(ro) (read-only).
|
||||
type: str
|
||||
choices: [ ro, rw ]
|
||||
default: rw
|
||||
mount_group:
|
||||
description:
|
||||
- Specifies the mount group.
|
||||
type: str
|
||||
filesystem:
|
||||
description:
|
||||
- Specifies the mount point, which is the directory where the file system will be mounted.
|
||||
type: str
|
||||
required: true
|
||||
nfs_server:
|
||||
description:
|
||||
- Specifies a Network File System (NFS) server.
|
||||
type: str
|
||||
rm_mount_point:
|
||||
description:
|
||||
- Removes the mount point directory when used with state C(absent).
|
||||
type: bool
|
||||
default: false
|
||||
size:
|
||||
description:
|
||||
- Specifies the file system size.
|
||||
- For already C(present) it will be resized.
|
||||
- 512-byte blocks, Megabytes or Gigabytes. If the value has M specified
|
||||
it will be in Megabytes. If the value has G specified it will be in
|
||||
Gigabytes.
|
||||
- If no M or G the value will be 512-byte blocks.
|
||||
- If "+" is specified in begin of value, the value will be added.
|
||||
- If "-" is specified in begin of value, the value will be removed.
|
||||
- If "+" or "-" is not specified, the total value will be the specified.
|
||||
- Size will respects the LVM AIX standards.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Controls the file system state.
|
||||
- C(present) check if file system exists, creates or resize.
|
||||
- C(absent) removes existing file system if already C(unmounted).
|
||||
- C(mounted) checks if the file system is mounted or mount the file system.
|
||||
- C(unmounted) check if the file system is unmounted or unmount the file system.
|
||||
type: str
|
||||
choices: [ absent, mounted, present, unmounted ]
|
||||
default: present
|
||||
vg:
|
||||
description:
|
||||
- Specifies an existing volume group (VG).
|
||||
type: str
|
||||
notes:
|
||||
- For more C(attributes), please check "crfs" AIX manual.
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create filesystem in a previously defined logical volume.
|
||||
community.general.aix_filesystem:
|
||||
device: testlv
|
||||
filesystem: /testfs
|
||||
state: present
|
||||
|
||||
- name: Creating NFS filesystem from nfshost.
|
||||
community.general.aix_filesystem:
|
||||
device: /home/ftp
|
||||
nfs_server: nfshost
|
||||
filesystem: /home/ftp
|
||||
state: present
|
||||
|
||||
- name: Creating a new file system without a previously logical volume.
|
||||
community.general.aix_filesystem:
|
||||
filesystem: /newfs
|
||||
size: 1G
|
||||
state: present
|
||||
vg: datavg
|
||||
|
||||
- name: Unmounting /testfs.
|
||||
community.general.aix_filesystem:
|
||||
filesystem: /testfs
|
||||
state: unmounted
|
||||
|
||||
- name: Resizing /mksysb to +512M.
|
||||
community.general.aix_filesystem:
|
||||
filesystem: /mksysb
|
||||
size: +512M
|
||||
state: present
|
||||
|
||||
- name: Resizing /mksysb to 11G.
|
||||
community.general.aix_filesystem:
|
||||
filesystem: /mksysb
|
||||
size: 11G
|
||||
state: present
|
||||
|
||||
- name: Resizing /mksysb to -2G.
|
||||
community.general.aix_filesystem:
|
||||
filesystem: /mksysb
|
||||
size: -2G
|
||||
state: present
|
||||
|
||||
- name: Remove NFS filesystem /home/ftp.
|
||||
community.general.aix_filesystem:
|
||||
filesystem: /home/ftp
|
||||
rm_mount_point: true
|
||||
state: absent
|
||||
|
||||
- name: Remove /newfs.
|
||||
community.general.aix_filesystem:
|
||||
filesystem: /newfs
|
||||
rm_mount_point: true
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
changed:
|
||||
description: Return changed for aix_filesystems actions as true or false.
|
||||
returned: always
|
||||
type: bool
|
||||
msg:
|
||||
description: Return message regarding the action.
|
||||
returned: always
|
||||
type: str
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils._mount import ismount
|
||||
import re
|
||||
|
||||
|
||||
def _fs_exists(module, filesystem):
|
||||
"""
|
||||
Check if file system already exists on /etc/filesystems.
|
||||
|
||||
:param module: Ansible module.
|
||||
:param community.general.filesystem: filesystem name.
|
||||
:return: True or False.
|
||||
"""
|
||||
lsfs_cmd = module.get_bin_path('lsfs', True)
|
||||
rc, lsfs_out, err = module.run_command([lsfs_cmd, "-l", filesystem])
|
||||
if rc == 1:
|
||||
if re.findall("No record matching", err):
|
||||
return False
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Failed to run lsfs. Error message: %s" % err)
|
||||
|
||||
else:
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _check_nfs_device(module, nfs_host, device):
|
||||
"""
|
||||
Validate if NFS server is exporting the device (remote export).
|
||||
|
||||
:param module: Ansible module.
|
||||
:param nfs_host: nfs_host parameter, NFS server.
|
||||
:param device: device parameter, remote export.
|
||||
:return: True or False.
|
||||
"""
|
||||
showmount_cmd = module.get_bin_path('showmount', True)
|
||||
rc, showmount_out, err = module.run_command([showmount_cmd, "-a", nfs_host])
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed to run showmount. Error message: %s" % err)
|
||||
else:
|
||||
showmount_data = showmount_out.splitlines()
|
||||
for line in showmount_data:
|
||||
if line.split(':')[1] == device:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _validate_vg(module, vg):
|
||||
"""
|
||||
Check the current state of volume group.
|
||||
|
||||
:param module: Ansible module argument spec.
|
||||
:param vg: Volume Group name.
|
||||
:return: True (VG in varyon state) or False (VG in varyoff state) or
|
||||
None (VG does not exist), message.
|
||||
"""
|
||||
lsvg_cmd = module.get_bin_path('lsvg', True)
|
||||
rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"])
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
|
||||
|
||||
rc, current_all_vgs, err = module.run_command([lsvg_cmd, "%s"])
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
|
||||
|
||||
if vg in current_all_vgs and vg not in current_active_vgs:
|
||||
msg = "Volume group %s is in varyoff state." % vg
|
||||
return False, msg
|
||||
elif vg in current_active_vgs:
|
||||
msg = "Volume group %s is in varyon state." % vg
|
||||
return True, msg
|
||||
else:
|
||||
msg = "Volume group %s does not exist." % vg
|
||||
return None, msg
|
||||
|
||||
|
||||
def resize_fs(module, filesystem, size):
|
||||
""" Resize LVM file system. """
|
||||
|
||||
chfs_cmd = module.get_bin_path('chfs', True)
|
||||
if not module.check_mode:
|
||||
rc, chfs_out, err = module.run_command([chfs_cmd, "-a", "size=%s" % size, filesystem])
|
||||
|
||||
if rc == 28:
|
||||
changed = False
|
||||
return changed, chfs_out
|
||||
elif rc != 0:
|
||||
if re.findall('Maximum allocation for logical', err):
|
||||
changed = False
|
||||
return changed, err
|
||||
else:
|
||||
module.fail_json(msg="Failed to run chfs. Error message: %s" % err)
|
||||
|
||||
else:
|
||||
if re.findall('The filesystem size is already', chfs_out):
|
||||
changed = False
|
||||
else:
|
||||
changed = True
|
||||
|
||||
return changed, chfs_out
|
||||
else:
|
||||
changed = True
|
||||
msg = ''
|
||||
|
||||
return changed, msg
|
||||
|
||||
|
||||
def create_fs(
|
||||
module, fs_type, filesystem, vg, device, size, mount_group, auto_mount,
|
||||
account_subsystem, permissions, nfs_server, attributes):
|
||||
""" Create LVM file system or NFS remote mount point. """
|
||||
|
||||
attributes = ' -a '.join(attributes)
|
||||
|
||||
# Parameters definition.
|
||||
account_subsys_opt = {
|
||||
True: '-t yes',
|
||||
False: '-t no'
|
||||
}
|
||||
|
||||
if nfs_server is not None:
|
||||
auto_mount_opt = {
|
||||
True: '-A',
|
||||
False: '-a'
|
||||
}
|
||||
|
||||
else:
|
||||
auto_mount_opt = {
|
||||
True: '-A yes',
|
||||
False: '-A no'
|
||||
}
|
||||
|
||||
if size is None:
|
||||
size = ''
|
||||
else:
|
||||
size = "-a size=%s" % size
|
||||
|
||||
if device is None:
|
||||
device = ''
|
||||
else:
|
||||
device = "-d %s" % device
|
||||
|
||||
if vg is None:
|
||||
vg = ''
|
||||
else:
|
||||
vg_state, msg = _validate_vg(module, vg)
|
||||
if vg_state:
|
||||
vg = "-g %s" % vg
|
||||
else:
|
||||
changed = False
|
||||
|
||||
return changed, msg
|
||||
|
||||
if mount_group is None:
|
||||
mount_group = ''
|
||||
|
||||
else:
|
||||
mount_group = "-u %s" % mount_group
|
||||
|
||||
auto_mount = auto_mount_opt[auto_mount]
|
||||
account_subsystem = account_subsys_opt[account_subsystem]
|
||||
|
||||
if nfs_server is not None:
|
||||
# Creates a NFS file system.
|
||||
mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True)
|
||||
if not module.check_mode:
|
||||
rc, mknfsmnt_out, err = module.run_command([mknfsmnt_cmd, "-f", filesystem, device, "-h", nfs_server, "-t", permissions, auto_mount, "-w", "bg"])
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed to run mknfsmnt. Error message: %s" % err)
|
||||
else:
|
||||
changed = True
|
||||
msg = "NFS file system %s created." % filesystem
|
||||
|
||||
return changed, msg
|
||||
else:
|
||||
changed = True
|
||||
msg = ''
|
||||
|
||||
return changed, msg
|
||||
|
||||
else:
|
||||
# Creates a LVM file system.
|
||||
crfs_cmd = module.get_bin_path('crfs', True)
|
||||
if not module.check_mode:
|
||||
cmd = [crfs_cmd, "-v", fs_type, "-m", filesystem, vg, device, mount_group, auto_mount, account_subsystem, "-p", permissions, size, "-a", attributes]
|
||||
rc, crfs_out, err = module.run_command(cmd)
|
||||
|
||||
if rc == 10:
|
||||
module.exit_json(
|
||||
msg="Using a existent previously defined logical volume, "
|
||||
"volume group needs to be empty. %s" % err)
|
||||
|
||||
elif rc != 0:
|
||||
module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
|
||||
|
||||
else:
|
||||
changed = True
|
||||
return changed, crfs_out
|
||||
else:
|
||||
changed = True
|
||||
msg = ''
|
||||
|
||||
return changed, msg
|
||||
|
||||
|
||||
def remove_fs(module, filesystem, rm_mount_point):
|
||||
""" Remove an LVM file system or NFS entry. """
|
||||
|
||||
# Command parameters.
|
||||
rm_mount_point_opt = {
|
||||
True: '-r',
|
||||
False: ''
|
||||
}
|
||||
|
||||
rm_mount_point = rm_mount_point_opt[rm_mount_point]
|
||||
|
||||
rmfs_cmd = module.get_bin_path('rmfs', True)
|
||||
if not module.check_mode:
|
||||
cmd = [rmfs_cmd, "-r", rm_mount_point, filesystem]
|
||||
rc, rmfs_out, err = module.run_command(cmd)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
|
||||
else:
|
||||
changed = True
|
||||
msg = rmfs_out
|
||||
if not rmfs_out:
|
||||
msg = "File system %s removed." % filesystem
|
||||
|
||||
return changed, msg
|
||||
else:
|
||||
changed = True
|
||||
msg = ''
|
||||
|
||||
return changed, msg
|
||||
|
||||
|
||||
def mount_fs(module, filesystem):
|
||||
""" Mount a file system. """
|
||||
mount_cmd = module.get_bin_path('mount', True)
|
||||
|
||||
if not module.check_mode:
|
||||
rc, mount_out, err = module.run_command([mount_cmd, filesystem])
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed to run mount. Error message: %s" % err)
|
||||
else:
|
||||
changed = True
|
||||
msg = "File system %s mounted." % filesystem
|
||||
|
||||
return changed, msg
|
||||
else:
|
||||
changed = True
|
||||
msg = ''
|
||||
|
||||
return changed, msg
|
||||
|
||||
|
||||
def unmount_fs(module, filesystem):
|
||||
""" Unmount a file system."""
|
||||
unmount_cmd = module.get_bin_path('unmount', True)
|
||||
|
||||
if not module.check_mode:
|
||||
rc, unmount_out, err = module.run_command([unmount_cmd, filesystem])
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed to run unmount. Error message: %s" % err)
|
||||
else:
|
||||
changed = True
|
||||
msg = "File system %s unmounted." % filesystem
|
||||
|
||||
return changed, msg
|
||||
else:
|
||||
changed = True
|
||||
msg = ''
|
||||
|
||||
return changed, msg
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
account_subsystem=dict(type='bool', default=False),
|
||||
attributes=dict(type='list', elements='str', default=["agblksize='4096'", "isnapshot='no'"]),
|
||||
auto_mount=dict(type='bool', default=True),
|
||||
device=dict(type='str'),
|
||||
filesystem=dict(type='str', required=True),
|
||||
fs_type=dict(type='str', default='jfs2'),
|
||||
permissions=dict(type='str', default='rw', choices=['rw', 'ro']),
|
||||
mount_group=dict(type='str'),
|
||||
nfs_server=dict(type='str'),
|
||||
rm_mount_point=dict(type='bool', default=False),
|
||||
size=dict(type='str'),
|
||||
state=dict(type='str', default='present', choices=['absent', 'mounted', 'present', 'unmounted']),
|
||||
vg=dict(type='str'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
account_subsystem = module.params['account_subsystem']
|
||||
attributes = module.params['attributes']
|
||||
auto_mount = module.params['auto_mount']
|
||||
device = module.params['device']
|
||||
fs_type = module.params['fs_type']
|
||||
permissions = module.params['permissions']
|
||||
mount_group = module.params['mount_group']
|
||||
filesystem = module.params['filesystem']
|
||||
nfs_server = module.params['nfs_server']
|
||||
rm_mount_point = module.params['rm_mount_point']
|
||||
size = module.params['size']
|
||||
state = module.params['state']
|
||||
vg = module.params['vg']
|
||||
|
||||
result = dict(
|
||||
changed=False,
|
||||
msg='',
|
||||
)
|
||||
|
||||
if state == 'present':
|
||||
fs_mounted = ismount(filesystem)
|
||||
fs_exists = _fs_exists(module, filesystem)
|
||||
|
||||
# Check if fs is mounted or exists.
|
||||
if fs_mounted or fs_exists:
|
||||
result['msg'] = "File system %s already exists." % filesystem
|
||||
result['changed'] = False
|
||||
|
||||
# If parameter size was passed, resize fs.
|
||||
if size is not None:
|
||||
result['changed'], result['msg'] = resize_fs(module, filesystem, size)
|
||||
|
||||
# If fs doesn't exist, create it.
|
||||
else:
|
||||
# Check if fs will be a NFS device.
|
||||
if nfs_server is not None:
|
||||
if device is None:
|
||||
result['msg'] = 'Parameter "device" is required when "nfs_server" is defined.'
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
# Create a fs from NFS export.
|
||||
if _check_nfs_device(module, nfs_server, device):
|
||||
result['changed'], result['msg'] = create_fs(
|
||||
module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
|
||||
|
||||
if device is None:
|
||||
if vg is None:
|
||||
result['msg'] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.'
|
||||
module.fail_json(**result)
|
||||
else:
|
||||
# Create a fs from
|
||||
result['changed'], result['msg'] = create_fs(
|
||||
module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
|
||||
|
||||
if device is not None and nfs_server is None:
|
||||
# Create a fs from a previously lv device.
|
||||
result['changed'], result['msg'] = create_fs(
|
||||
module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
|
||||
|
||||
elif state == 'absent':
|
||||
if ismount(filesystem):
|
||||
result['msg'] = "File system %s mounted." % filesystem
|
||||
|
||||
else:
|
||||
fs_status = _fs_exists(module, filesystem)
|
||||
if not fs_status:
|
||||
result['msg'] = "File system %s does not exist." % filesystem
|
||||
else:
|
||||
result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point)
|
||||
|
||||
elif state == 'mounted':
|
||||
if ismount(filesystem):
|
||||
result['changed'] = False
|
||||
result['msg'] = "File system %s already mounted." % filesystem
|
||||
else:
|
||||
result['changed'], result['msg'] = mount_fs(module, filesystem)
|
||||
|
||||
elif state == 'unmounted':
|
||||
if not ismount(filesystem):
|
||||
result['changed'] = False
|
||||
result['msg'] = "File system %s already unmounted." % filesystem
|
||||
else:
|
||||
result['changed'], result['msg'] = unmount_fs(module, filesystem)
|
||||
|
||||
else:
|
||||
# Unreachable codeblock
|
||||
result['msg'] = "Unexpected state %s." % state
|
||||
module.fail_json(**result)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,248 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017, Joris Weijters <joris.weijters@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
author:
|
||||
- Joris Weijters (@molekuul)
|
||||
module: aix_inittab
|
||||
short_description: Manages the inittab on AIX
|
||||
description:
|
||||
- Manages the inittab on AIX.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the inittab entry.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ service ]
|
||||
runlevel:
|
||||
description:
|
||||
- Runlevel of the entry.
|
||||
type: str
|
||||
required: true
|
||||
action:
|
||||
description:
|
||||
- Action what the init has to do with this entry.
|
||||
type: str
|
||||
choices:
|
||||
- boot
|
||||
- bootwait
|
||||
- hold
|
||||
- initdefault
|
||||
- 'off'
|
||||
- once
|
||||
- ondemand
|
||||
- powerfail
|
||||
- powerwait
|
||||
- respawn
|
||||
- sysinit
|
||||
- wait
|
||||
command:
|
||||
description:
|
||||
- What command has to run.
|
||||
type: str
|
||||
required: true
|
||||
insertafter:
|
||||
description:
|
||||
- After which inittabline should the new entry inserted.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether the entry should be present or absent in the inittab file.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
notes:
|
||||
- The changes are persistent across reboots.
|
||||
- You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands.
|
||||
- Tested on AIX 7.1.
|
||||
requirements:
|
||||
- itertools
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Add service startmyservice to the inittab, directly after service existingservice.
|
||||
- name: Add startmyservice to inittab
|
||||
community.general.aix_inittab:
|
||||
name: startmyservice
|
||||
runlevel: 4
|
||||
action: once
|
||||
command: echo hello
|
||||
insertafter: existingservice
|
||||
state: present
|
||||
become: true
|
||||
|
||||
# Change inittab entry startmyservice to runlevel "2" and processaction "wait".
|
||||
- name: Change startmyservice to inittab
|
||||
community.general.aix_inittab:
|
||||
name: startmyservice
|
||||
runlevel: 2
|
||||
action: wait
|
||||
command: echo hello
|
||||
state: present
|
||||
become: true
|
||||
|
||||
- name: Remove startmyservice from inittab
|
||||
community.general.aix_inittab:
|
||||
name: startmyservice
|
||||
runlevel: 2
|
||||
action: wait
|
||||
command: echo hello
|
||||
state: absent
|
||||
become: true
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
name:
|
||||
description: Name of the adjusted inittab entry
|
||||
returned: always
|
||||
type: str
|
||||
sample: startmyservice
|
||||
msg:
|
||||
description: Action done with the inittab entry
|
||||
returned: changed
|
||||
type: str
|
||||
sample: changed inittab entry startmyservice
|
||||
changed:
|
||||
description: Whether the inittab changed or not
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
'''
|
||||
|
||||
# Import necessary libraries
|
||||
try:
|
||||
# python 2
|
||||
from itertools import izip
|
||||
except ImportError:
|
||||
izip = zip
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
# end import modules
|
||||
# start defining the functions
|
||||
|
||||
|
||||
def check_current_entry(module):
|
||||
# Check if entry exists, if not return False in exists in return dict,
|
||||
# if true return True and the entry in return dict
|
||||
existsdict = {'exist': False}
|
||||
lsitab = module.get_bin_path('lsitab')
|
||||
(rc, out, err) = module.run_command([lsitab, module.params['name']])
|
||||
if rc == 0:
|
||||
keys = ('name', 'runlevel', 'action', 'command')
|
||||
values = out.split(":")
|
||||
# strip non readable characters as \n
|
||||
values = map(lambda s: s.strip(), values)
|
||||
existsdict = dict(izip(keys, values))
|
||||
existsdict.update({'exist': True})
|
||||
return existsdict
|
||||
|
||||
|
||||
def main():
|
||||
# initialize
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', required=True, aliases=['service']),
|
||||
runlevel=dict(type='str', required=True),
|
||||
action=dict(type='str', choices=[
|
||||
'boot',
|
||||
'bootwait',
|
||||
'hold',
|
||||
'initdefault',
|
||||
'off',
|
||||
'once',
|
||||
'ondemand',
|
||||
'powerfail',
|
||||
'powerwait',
|
||||
'respawn',
|
||||
'sysinit',
|
||||
'wait',
|
||||
]),
|
||||
command=dict(type='str', required=True),
|
||||
insertafter=dict(type='str'),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
result = {
|
||||
'name': module.params['name'],
|
||||
'changed': False,
|
||||
'msg': ""
|
||||
}
|
||||
|
||||
# Find commandline strings
|
||||
mkitab = module.get_bin_path('mkitab')
|
||||
rmitab = module.get_bin_path('rmitab')
|
||||
chitab = module.get_bin_path('chitab')
|
||||
rc = 0
|
||||
|
||||
# check if the new entry exists
|
||||
current_entry = check_current_entry(module)
|
||||
|
||||
# if action is install or change,
|
||||
if module.params['state'] == 'present':
|
||||
|
||||
# create new entry string
|
||||
new_entry = module.params['name'] + ":" + module.params['runlevel'] + \
|
||||
":" + module.params['action'] + ":" + module.params['command']
|
||||
|
||||
# If current entry exists or fields are different(if the entry does not
|
||||
# exists, then the entry wil be created
|
||||
if (not current_entry['exist']) or (
|
||||
module.params['runlevel'] != current_entry['runlevel'] or
|
||||
module.params['action'] != current_entry['action'] or
|
||||
module.params['command'] != current_entry['command']):
|
||||
|
||||
# If the entry does exist then change the entry
|
||||
if current_entry['exist']:
|
||||
if not module.check_mode:
|
||||
(rc, out, err) = module.run_command([chitab, new_entry])
|
||||
if rc != 0:
|
||||
module.fail_json(
|
||||
msg="could not change inittab", rc=rc, err=err)
|
||||
result['msg'] = "changed inittab entry" + " " + current_entry['name']
|
||||
result['changed'] = True
|
||||
|
||||
# If the entry does not exist create the entry
|
||||
elif not current_entry['exist']:
|
||||
if module.params['insertafter']:
|
||||
if not module.check_mode:
|
||||
(rc, out, err) = module.run_command(
|
||||
[mkitab, '-i', module.params['insertafter'], new_entry])
|
||||
else:
|
||||
if not module.check_mode:
|
||||
(rc, out, err) = module.run_command(
|
||||
[mkitab, new_entry])
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="could not adjust inittab", rc=rc, err=err)
|
||||
result['msg'] = "add inittab entry" + " " + module.params['name']
|
||||
result['changed'] = True
|
||||
|
||||
elif module.params['state'] == 'absent':
|
||||
# If the action is remove and the entry exists then remove the entry
|
||||
if current_entry['exist']:
|
||||
if not module.check_mode:
|
||||
(rc, out, err) = module.run_command(
|
||||
[rmitab, module.params['name']])
|
||||
if rc != 0:
|
||||
module.fail_json(
|
||||
msg="could not remove entry from inittab)", rc=rc, err=err)
|
||||
result['msg'] = "removed inittab entry" + " " + current_entry['name']
|
||||
result['changed'] = True
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,364 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
author:
|
||||
- Kairo Araujo (@kairoaraujo)
|
||||
module: aix_lvg
|
||||
short_description: Manage LVM volume groups on AIX
|
||||
description:
|
||||
- This module creates, removes or resize volume groups on AIX LVM.
|
||||
options:
|
||||
force:
|
||||
description:
|
||||
- Force volume group creation.
|
||||
type: bool
|
||||
default: false
|
||||
pp_size:
|
||||
description:
|
||||
- The size of the physical partition in megabytes.
|
||||
type: int
|
||||
pvs:
|
||||
description:
|
||||
- List of comma-separated devices to use as physical devices in this volume group.
|
||||
- Required when creating or extending (C(present) state) the volume group.
|
||||
- If not informed reducing (C(absent) state) the volume group will be removed.
|
||||
type: list
|
||||
elements: str
|
||||
state:
|
||||
description:
|
||||
- Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff).
|
||||
type: str
|
||||
choices: [ absent, present, varyoff, varyon ]
|
||||
default: present
|
||||
vg:
|
||||
description:
|
||||
- The name of the volume group.
|
||||
type: str
|
||||
required: true
|
||||
vg_type:
|
||||
description:
|
||||
- The type of the volume group.
|
||||
type: str
|
||||
choices: [ big, normal, scalable ]
|
||||
default: normal
|
||||
notes:
|
||||
- AIX will permit remove VG only if all LV/Filesystems are not busy.
|
||||
- Module does not modify PP size for already present volume group.
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a volume group datavg
|
||||
community.general.aix_lvg:
|
||||
vg: datavg
|
||||
pp_size: 128
|
||||
vg_type: scalable
|
||||
state: present
|
||||
|
||||
- name: Removing a volume group datavg
|
||||
community.general.aix_lvg:
|
||||
vg: datavg
|
||||
state: absent
|
||||
|
||||
- name: Extending rootvg
|
||||
community.general.aix_lvg:
|
||||
vg: rootvg
|
||||
pvs: hdisk1
|
||||
state: present
|
||||
|
||||
- name: Reducing rootvg
|
||||
community.general.aix_lvg:
|
||||
vg: rootvg
|
||||
pvs: hdisk1
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r''' # '''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def _validate_pv(module, vg, pvs):
|
||||
"""
|
||||
Function to validate if the physical volume (PV) is not already in use by
|
||||
another volume group or Oracle ASM.
|
||||
|
||||
:param module: Ansible module argument spec.
|
||||
:param vg: Volume group name.
|
||||
:param pvs: Physical volume list.
|
||||
:return: [bool, message] or module.fail_json for errors.
|
||||
"""
|
||||
|
||||
lspv_cmd = module.get_bin_path('lspv', True)
|
||||
rc, current_lspv, stderr = module.run_command([lspv_cmd])
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr)
|
||||
|
||||
for pv in pvs:
|
||||
# Get pv list.
|
||||
lspv_list = {}
|
||||
for line in current_lspv.splitlines():
|
||||
pv_data = line.split()
|
||||
lspv_list[pv_data[0]] = pv_data[2]
|
||||
|
||||
# Check if pv exists and is free.
|
||||
if pv not in lspv_list.keys():
|
||||
module.fail_json(msg="Physical volume '%s' doesn't exist." % pv)
|
||||
|
||||
if lspv_list[pv] == 'None':
|
||||
# Disk None, looks free.
|
||||
# Check if PV is not already in use by Oracle ASM.
|
||||
lquerypv_cmd = module.get_bin_path('lquerypv', True)
|
||||
rc, current_lquerypv, stderr = module.run_command([lquerypv_cmd, "-h", "/dev/%s" % pv, "20", "10"])
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr)
|
||||
|
||||
if 'ORCLDISK' in current_lquerypv:
|
||||
module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv)
|
||||
|
||||
msg = "Physical volume '%s' is ok to be used." % pv
|
||||
return True, msg
|
||||
|
||||
# Check if PV is already in use for the same vg.
|
||||
elif vg != lspv_list[pv]:
|
||||
module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv]))
|
||||
|
||||
msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv])
|
||||
return False, msg
|
||||
|
||||
|
||||
def _validate_vg(module, vg):
|
||||
"""
|
||||
Check the current state of volume group.
|
||||
|
||||
:param module: Ansible module argument spec.
|
||||
:param vg: Volume Group name.
|
||||
:return: True (VG in varyon state) or False (VG in varyoff state) or
|
||||
None (VG does not exist), message.
|
||||
"""
|
||||
lsvg_cmd = module.get_bin_path('lsvg', True)
|
||||
rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"])
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
|
||||
|
||||
rc, current_all_vgs, err = module.run_command([lsvg_cmd])
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
|
||||
|
||||
if vg in current_all_vgs and vg not in current_active_vgs:
|
||||
msg = "Volume group '%s' is in varyoff state." % vg
|
||||
return False, msg
|
||||
|
||||
if vg in current_active_vgs:
|
||||
msg = "Volume group '%s' is in varyon state." % vg
|
||||
return True, msg
|
||||
|
||||
msg = "Volume group '%s' does not exist." % vg
|
||||
return None, msg
|
||||
|
||||
|
||||
def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation):
|
||||
""" Creates or extend a volume group. """
|
||||
|
||||
# Command option parameters.
|
||||
force_opt = {
|
||||
True: '-f',
|
||||
False: ''
|
||||
}
|
||||
|
||||
vg_opt = {
|
||||
'normal': '',
|
||||
'big': '-B',
|
||||
'scalable': '-S',
|
||||
}
|
||||
|
||||
# Validate if PV are not already in use.
|
||||
pv_state, msg = _validate_pv(module, vg, pvs)
|
||||
if not pv_state:
|
||||
changed = False
|
||||
return changed, msg
|
||||
|
||||
vg_state, msg = vg_validation
|
||||
if vg_state is False:
|
||||
changed = False
|
||||
return changed, msg
|
||||
|
||||
elif vg_state is True:
|
||||
# Volume group extension.
|
||||
changed = True
|
||||
msg = ""
|
||||
|
||||
if not module.check_mode:
|
||||
extendvg_cmd = module.get_bin_path('extendvg', True)
|
||||
rc, output, err = module.run_command([extendvg_cmd, vg] + pvs)
|
||||
if rc != 0:
|
||||
changed = False
|
||||
msg = "Extending volume group '%s' has failed." % vg
|
||||
return changed, msg
|
||||
|
||||
msg = "Volume group '%s' extended." % vg
|
||||
return changed, msg
|
||||
|
||||
elif vg_state is None:
|
||||
# Volume group creation.
|
||||
changed = True
|
||||
msg = ''
|
||||
|
||||
if not module.check_mode:
|
||||
mkvg_cmd = module.get_bin_path('mkvg', True)
|
||||
rc, output, err = module.run_command([mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], "-y", vg] + pvs)
|
||||
if rc != 0:
|
||||
changed = False
|
||||
msg = "Creating volume group '%s' failed." % vg
|
||||
return changed, msg
|
||||
|
||||
msg = "Volume group '%s' created." % vg
|
||||
return changed, msg
|
||||
|
||||
|
||||
def reduce_vg(module, vg, pvs, vg_validation):
|
||||
vg_state, msg = vg_validation
|
||||
|
||||
if vg_state is False:
|
||||
changed = False
|
||||
return changed, msg
|
||||
|
||||
elif vg_state is None:
|
||||
changed = False
|
||||
return changed, msg
|
||||
|
||||
# Define pvs_to_remove (list of physical volumes to be removed).
|
||||
if pvs is None:
|
||||
# Remove VG if pvs are note informed.
|
||||
# Remark: AIX will permit remove only if the VG has not LVs.
|
||||
lsvg_cmd = module.get_bin_path('lsvg', True)
|
||||
rc, current_pvs, err = module.run_command([lsvg_cmd, "-p", vg])
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd)
|
||||
|
||||
pvs_to_remove = []
|
||||
for line in current_pvs.splitlines()[2:]:
|
||||
pvs_to_remove.append(line.split()[0])
|
||||
|
||||
reduce_msg = "Volume group '%s' removed." % vg
|
||||
else:
|
||||
pvs_to_remove = pvs
|
||||
reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg))
|
||||
|
||||
# Reduce volume group.
|
||||
if len(pvs_to_remove) <= 0:
|
||||
changed = False
|
||||
msg = "No physical volumes to remove."
|
||||
return changed, msg
|
||||
|
||||
changed = True
|
||||
msg = ''
|
||||
|
||||
if not module.check_mode:
|
||||
reducevg_cmd = module.get_bin_path('reducevg', True)
|
||||
rc, stdout, stderr = module.run_command([reducevg_cmd, "-df", vg] + pvs_to_remove)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr)
|
||||
|
||||
msg = reduce_msg
|
||||
return changed, msg
|
||||
|
||||
|
||||
def state_vg(module, vg, state, vg_validation):
|
||||
vg_state, msg = vg_validation
|
||||
|
||||
if vg_state is None:
|
||||
module.fail_json(msg=msg)
|
||||
|
||||
if state == 'varyon':
|
||||
if vg_state is True:
|
||||
changed = False
|
||||
return changed, msg
|
||||
|
||||
changed = True
|
||||
msg = ''
|
||||
if not module.check_mode:
|
||||
varyonvg_cmd = module.get_bin_path('varyonvg', True)
|
||||
rc, varyonvg_out, err = module.run_command([varyonvg_cmd, vg])
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err)
|
||||
|
||||
msg = "Varyon volume group %s completed." % vg
|
||||
return changed, msg
|
||||
|
||||
elif state == 'varyoff':
|
||||
if vg_state is False:
|
||||
changed = False
|
||||
return changed, msg
|
||||
|
||||
changed = True
|
||||
msg = ''
|
||||
|
||||
if not module.check_mode:
|
||||
varyonvg_cmd = module.get_bin_path('varyoffvg', True)
|
||||
rc, varyonvg_out, stderr = module.run_command([varyonvg_cmd, vg])
|
||||
if rc != 0:
|
||||
module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr)
|
||||
|
||||
msg = "Varyoff volume group %s completed." % vg
|
||||
return changed, msg
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
force=dict(type='bool', default=False),
|
||||
pp_size=dict(type='int'),
|
||||
pvs=dict(type='list', elements='str'),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']),
|
||||
vg=dict(type='str', required=True),
|
||||
vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable'])
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
force = module.params['force']
|
||||
pp_size = module.params['pp_size']
|
||||
pvs = module.params['pvs']
|
||||
state = module.params['state']
|
||||
vg = module.params['vg']
|
||||
vg_type = module.params['vg_type']
|
||||
|
||||
if pp_size is None:
|
||||
pp_size = ''
|
||||
else:
|
||||
pp_size = "-s %s" % pp_size
|
||||
|
||||
vg_validation = _validate_vg(module, vg)
|
||||
|
||||
if state == 'present':
|
||||
if not pvs:
|
||||
changed = False
|
||||
msg = "pvs is required to state 'present'."
|
||||
module.fail_json(msg=msg)
|
||||
else:
|
||||
changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation)
|
||||
|
||||
elif state == 'absent':
|
||||
changed, msg = reduce_vg(module, vg, pvs, vg_validation)
|
||||
|
||||
elif state == 'varyon' or state == 'varyoff':
|
||||
changed, msg = state_vg(module, vg, state, vg_validation)
|
||||
|
||||
else:
|
||||
changed = False
|
||||
msg = "Unexpected state"
|
||||
|
||||
module.exit_json(changed=changed, msg=msg, state=state)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,340 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2016, Alain Dejoux <adejoux@djouxtech.net>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
author:
|
||||
- Alain Dejoux (@adejoux)
|
||||
module: aix_lvol
|
||||
short_description: Configure AIX LVM logical volumes
|
||||
description:
|
||||
- This module creates, removes or resizes AIX logical volumes. Inspired by lvol module.
|
||||
options:
|
||||
vg:
|
||||
description:
|
||||
- The volume group this logical volume is part of.
|
||||
type: str
|
||||
required: true
|
||||
lv:
|
||||
description:
|
||||
- The name of the logical volume.
|
||||
type: str
|
||||
required: true
|
||||
lv_type:
|
||||
description:
|
||||
- The type of the logical volume.
|
||||
type: str
|
||||
default: jfs2
|
||||
size:
|
||||
description:
|
||||
- The size of the logical volume with one of the [MGT] units.
|
||||
type: str
|
||||
copies:
|
||||
description:
|
||||
- The number of copies of the logical volume.
|
||||
- Maximum copies are 3.
|
||||
type: int
|
||||
default: 1
|
||||
policy:
|
||||
description:
|
||||
- Sets the interphysical volume allocation policy.
|
||||
- C(maximum) allocates logical partitions across the maximum number of physical volumes.
|
||||
- C(minimum) allocates logical partitions across the minimum number of physical volumes.
|
||||
type: str
|
||||
choices: [ maximum, minimum ]
|
||||
default: maximum
|
||||
state:
|
||||
description:
|
||||
- Control if the logical volume exists. If C(present) and the
|
||||
volume does not already exist then the C(size) option is required.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
opts:
|
||||
description:
|
||||
- Free-form options to be passed to the mklv command.
|
||||
type: str
|
||||
default: ''
|
||||
pvs:
|
||||
description:
|
||||
- A list of physical volumes e.g. C(hdisk1,hdisk2).
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a logical volume of 512M
|
||||
community.general.aix_lvol:
|
||||
vg: testvg
|
||||
lv: testlv
|
||||
size: 512M
|
||||
|
||||
- name: Create a logical volume of 512M with disks hdisk1 and hdisk2
|
||||
community.general.aix_lvol:
|
||||
vg: testvg
|
||||
lv: test2lv
|
||||
size: 512M
|
||||
pvs: [ hdisk1, hdisk2 ]
|
||||
|
||||
- name: Create a logical volume of 512M mirrored
|
||||
community.general.aix_lvol:
|
||||
vg: testvg
|
||||
lv: test3lv
|
||||
size: 512M
|
||||
copies: 2
|
||||
|
||||
- name: Create a logical volume of 1G with a minimum placement policy
|
||||
community.general.aix_lvol:
|
||||
vg: rootvg
|
||||
lv: test4lv
|
||||
size: 1G
|
||||
policy: minimum
|
||||
|
||||
- name: Create a logical volume with special options like mirror pool
|
||||
community.general.aix_lvol:
|
||||
vg: testvg
|
||||
lv: testlv
|
||||
size: 512M
|
||||
opts: -p copy1=poolA -p copy2=poolB
|
||||
|
||||
- name: Extend the logical volume to 1200M
|
||||
community.general.aix_lvol:
|
||||
vg: testvg
|
||||
lv: test4lv
|
||||
size: 1200M
|
||||
|
||||
- name: Remove the logical volume
|
||||
community.general.aix_lvol:
|
||||
vg: testvg
|
||||
lv: testlv
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
msg:
|
||||
type: str
|
||||
description: A friendly message describing the task result.
|
||||
returned: always
|
||||
sample: Logical volume testlv created.
|
||||
'''
|
||||
|
||||
import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def convert_size(module, size):
|
||||
unit = size[-1].upper()
|
||||
units = ['M', 'G', 'T']
|
||||
try:
|
||||
multiplier = 1024 ** units.index(unit)
|
||||
except ValueError:
|
||||
module.fail_json(msg="No valid size unit specified.")
|
||||
|
||||
return int(size[:-1]) * multiplier
|
||||
|
||||
|
||||
def round_ppsize(x, base=16):
|
||||
new_size = int(base * round(float(x) / base))
|
||||
if new_size < x:
|
||||
new_size += base
|
||||
return new_size
|
||||
|
||||
|
||||
def parse_lv(data):
|
||||
name = None
|
||||
|
||||
for line in data.splitlines():
|
||||
match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line)
|
||||
if match is not None:
|
||||
name = match.group(1)
|
||||
vg = match.group(2)
|
||||
continue
|
||||
match = re.search(r"LPs:\s+(\d+).*PPs", line)
|
||||
if match is not None:
|
||||
lps = int(match.group(1))
|
||||
continue
|
||||
match = re.search(r"PP SIZE:\s+(\d+)", line)
|
||||
if match is not None:
|
||||
pp_size = int(match.group(1))
|
||||
continue
|
||||
match = re.search(r"INTER-POLICY:\s+(\w+)", line)
|
||||
if match is not None:
|
||||
policy = match.group(1)
|
||||
continue
|
||||
|
||||
if not name:
|
||||
return None
|
||||
|
||||
size = lps * pp_size
|
||||
|
||||
return {'name': name, 'vg': vg, 'size': size, 'policy': policy}
|
||||
|
||||
|
||||
def parse_vg(data):
|
||||
|
||||
for line in data.splitlines():
|
||||
|
||||
match = re.search(r"VOLUME GROUP:\s+(\w+)", line)
|
||||
if match is not None:
|
||||
name = match.group(1)
|
||||
continue
|
||||
|
||||
match = re.search(r"TOTAL PP.*\((\d+)", line)
|
||||
if match is not None:
|
||||
size = int(match.group(1))
|
||||
continue
|
||||
|
||||
match = re.search(r"PP SIZE:\s+(\d+)", line)
|
||||
if match is not None:
|
||||
pp_size = int(match.group(1))
|
||||
continue
|
||||
|
||||
match = re.search(r"FREE PP.*\((\d+)", line)
|
||||
if match is not None:
|
||||
free = int(match.group(1))
|
||||
continue
|
||||
|
||||
return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size}
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
vg=dict(type='str', required=True),
|
||||
lv=dict(type='str', required=True),
|
||||
lv_type=dict(type='str', default='jfs2'),
|
||||
size=dict(type='str'),
|
||||
opts=dict(type='str', default=''),
|
||||
copies=dict(type='int', default=1),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']),
|
||||
pvs=dict(type='list', elements='str', default=list())
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
vg = module.params['vg']
|
||||
lv = module.params['lv']
|
||||
lv_type = module.params['lv_type']
|
||||
size = module.params['size']
|
||||
opts = module.params['opts']
|
||||
copies = module.params['copies']
|
||||
policy = module.params['policy']
|
||||
state = module.params['state']
|
||||
pvs = module.params['pvs']
|
||||
|
||||
pv_list = ' '.join(pvs)
|
||||
|
||||
if policy == 'maximum':
|
||||
lv_policy = 'x'
|
||||
else:
|
||||
lv_policy = 'm'
|
||||
|
||||
# Add echo command when running in check-mode
|
||||
if module.check_mode:
|
||||
test_opt = 'echo '
|
||||
else:
|
||||
test_opt = ''
|
||||
|
||||
# check if system commands are available
|
||||
lsvg_cmd = module.get_bin_path("lsvg", required=True)
|
||||
lslv_cmd = module.get_bin_path("lslv", required=True)
|
||||
|
||||
# Get information on volume group requested
|
||||
rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg))
|
||||
|
||||
if rc != 0:
|
||||
if state == 'absent':
|
||||
module.exit_json(changed=False, msg="Volume group %s does not exist." % vg)
|
||||
else:
|
||||
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err)
|
||||
|
||||
this_vg = parse_vg(vg_info)
|
||||
|
||||
if size is not None:
|
||||
# Calculate pp size and round it up based on pp size.
|
||||
lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size'])
|
||||
|
||||
# Get information on logical volume requested
|
||||
rc, lv_info, err = module.run_command(
|
||||
"%s %s" % (lslv_cmd, lv))
|
||||
|
||||
if rc != 0:
|
||||
if state == 'absent':
|
||||
module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv)
|
||||
|
||||
changed = False
|
||||
|
||||
this_lv = parse_lv(lv_info)
|
||||
|
||||
if state == 'present' and not size:
|
||||
if this_lv is None:
|
||||
module.fail_json(msg="No size given.")
|
||||
|
||||
if this_lv is None:
|
||||
if state == 'present':
|
||||
if lv_size > this_vg['free']:
|
||||
module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free']))
|
||||
|
||||
# create LV
|
||||
mklv_cmd = module.get_bin_path("mklv", required=True)
|
||||
|
||||
cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list)
|
||||
rc, out, err = module.run_command(cmd)
|
||||
if rc == 0:
|
||||
module.exit_json(changed=True, msg="Logical volume %s created." % lv)
|
||||
else:
|
||||
module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err)
|
||||
else:
|
||||
if state == 'absent':
|
||||
# remove LV
|
||||
rmlv_cmd = module.get_bin_path("rmlv", required=True)
|
||||
rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name']))
|
||||
if rc == 0:
|
||||
module.exit_json(changed=True, msg="Logical volume %s deleted." % lv)
|
||||
else:
|
||||
module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err)
|
||||
else:
|
||||
if this_lv['policy'] != policy:
|
||||
# change lv allocation policy
|
||||
chlv_cmd = module.get_bin_path("chlv", required=True)
|
||||
rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name']))
|
||||
if rc == 0:
|
||||
module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy))
|
||||
else:
|
||||
module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err)
|
||||
|
||||
if vg != this_lv['vg']:
|
||||
module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg']))
|
||||
|
||||
# from here the last remaining action is to resize it, if no size parameter is passed we do nothing.
|
||||
if not size:
|
||||
module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv))
|
||||
|
||||
# resize LV based on absolute values
|
||||
if int(lv_size) > this_lv['size']:
|
||||
extendlv_cmd = module.get_bin_path("extendlv", required=True)
|
||||
cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size'])
|
||||
rc, out, err = module.run_command(cmd)
|
||||
if rc == 0:
|
||||
module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size))
|
||||
else:
|
||||
module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err)
|
||||
elif lv_size < this_lv['size']:
|
||||
module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size']))
|
||||
else:
|
||||
module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,200 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2022, Christian Wollinger <@cwollinger>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: alerta_customer
|
||||
short_description: Manage customers in Alerta
|
||||
version_added: 4.8.0
|
||||
description:
|
||||
- Create or delete customers in Alerta with the REST API.
|
||||
author: Christian Wollinger (@cwollinger)
|
||||
seealso:
|
||||
- name: API documentation
|
||||
description: Documentation for Alerta API
|
||||
link: https://docs.alerta.io/api/reference.html#customers
|
||||
options:
|
||||
customer:
|
||||
description:
|
||||
- Name of the customer.
|
||||
required: true
|
||||
type: str
|
||||
match:
|
||||
description:
|
||||
- The matching logged in user for the customer.
|
||||
required: true
|
||||
type: str
|
||||
alerta_url:
|
||||
description:
|
||||
- The Alerta API endpoint.
|
||||
required: true
|
||||
type: str
|
||||
api_username:
|
||||
description:
|
||||
- The username for the API using basic auth.
|
||||
type: str
|
||||
api_password:
|
||||
description:
|
||||
- The password for the API using basic auth.
|
||||
type: str
|
||||
api_key:
|
||||
description:
|
||||
- The access token for the API.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether the customer should exist or not.
|
||||
- Both I(customer) and I(match) identify a customer that should be added or removed.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Create customer
|
||||
community.general.alerta_customer:
|
||||
alerta_url: https://alerta.example.com
|
||||
api_username: admin@example.com
|
||||
api_password: password
|
||||
customer: Developer
|
||||
match: dev@example.com
|
||||
|
||||
- name: Delete customer
|
||||
community.general.alerta_customer:
|
||||
alerta_url: https://alerta.example.com
|
||||
api_username: admin@example.com
|
||||
api_password: password
|
||||
customer: Developer
|
||||
match: dev@example.com
|
||||
state: absent
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
msg:
|
||||
description:
|
||||
- Success or failure message.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Customer customer1 created
|
||||
response:
|
||||
description:
|
||||
- The response from the API.
|
||||
returned: always
|
||||
type: dict
|
||||
"""
|
||||
|
||||
from ansible.module_utils.urls import fetch_url, basic_auth_header
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class AlertaInterface(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.state = module.params['state']
|
||||
self.customer = module.params['customer']
|
||||
self.match = module.params['match']
|
||||
self.alerta_url = module.params['alerta_url']
|
||||
self.headers = {"Content-Type": "application/json"}
|
||||
|
||||
if module.params.get('api_key', None):
|
||||
self.headers["Authorization"] = "Key %s" % module.params['api_key']
|
||||
else:
|
||||
self.headers["Authorization"] = basic_auth_header(module.params['api_username'], module.params['api_password'])
|
||||
|
||||
def send_request(self, url, data=None, method="GET"):
|
||||
response, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method)
|
||||
|
||||
status_code = info["status"]
|
||||
if status_code == 401:
|
||||
self.module.fail_json(failed=True, response=info, msg="Unauthorized to request '%s' on '%s'" % (method, url))
|
||||
elif status_code == 403:
|
||||
self.module.fail_json(failed=True, response=info, msg="Permission Denied for '%s' on '%s'" % (method, url))
|
||||
elif status_code == 404:
|
||||
self.module.fail_json(failed=True, response=info, msg="Not found for request '%s' on '%s'" % (method, url))
|
||||
elif status_code in (200, 201):
|
||||
return self.module.from_json(response.read())
|
||||
self.module.fail_json(failed=True, response=info, msg="Alerta API error with HTTP %d for %s" % (status_code, url))
|
||||
|
||||
def get_customers(self):
|
||||
url = "%s/api/customers" % self.alerta_url
|
||||
response = self.send_request(url)
|
||||
pages = response["pages"]
|
||||
if pages > 1:
|
||||
for page in range(2, pages + 1):
|
||||
page_url = url + '?page=' + str(page)
|
||||
new_results = self.send_request(page_url)
|
||||
response.update(new_results)
|
||||
return response
|
||||
|
||||
def create_customer(self):
|
||||
url = "%s/api/customer" % self.alerta_url
|
||||
|
||||
payload = {
|
||||
'customer': self.customer,
|
||||
'match': self.match,
|
||||
}
|
||||
|
||||
payload = self.module.jsonify(payload)
|
||||
response = self.send_request(url, payload, 'POST')
|
||||
return response
|
||||
|
||||
def delete_customer(self, id):
|
||||
url = "%s/api/customer/%s" % (self.alerta_url, id)
|
||||
|
||||
response = self.send_request(url, None, 'DELETE')
|
||||
return response
|
||||
|
||||
def find_customer_id(self, customer):
|
||||
for i in customer['customers']:
|
||||
if self.customer == i['customer'] and self.match == i['match']:
|
||||
return i['id']
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(choices=['present', 'absent'], default='present'),
|
||||
customer=dict(type='str', required=True),
|
||||
match=dict(type='str', required=True),
|
||||
alerta_url=dict(type='str', required=True),
|
||||
api_username=dict(type='str'),
|
||||
api_password=dict(type='str', no_log=True),
|
||||
api_key=dict(type='str', no_log=True),
|
||||
),
|
||||
required_together=[['api_username', 'api_password']],
|
||||
mutually_exclusive=[['api_username', 'api_key']],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
alerta_iface = AlertaInterface(module)
|
||||
|
||||
if alerta_iface.state == 'present':
|
||||
response = alerta_iface.get_customers()
|
||||
if alerta_iface.find_customer_id(response):
|
||||
module.exit_json(changed=False, response=response, msg="Customer %s already exists" % alerta_iface.customer)
|
||||
else:
|
||||
if not module.check_mode:
|
||||
response = alerta_iface.create_customer()
|
||||
module.exit_json(changed=True, response=response, msg="Customer %s created" % alerta_iface.customer)
|
||||
else:
|
||||
response = alerta_iface.get_customers()
|
||||
id = alerta_iface.find_customer_id(response)
|
||||
if id:
|
||||
if not module.check_mode:
|
||||
alerta_iface.delete_customer(id)
|
||||
module.exit_json(changed=True, response=response, msg="Customer %s with id %s deleted" % (alerta_iface.customer, id))
|
||||
else:
|
||||
module.exit_json(changed=False, response=response, msg="Customer %s does not exists" % alerta_iface.customer)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,402 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see http://www.gnu.org/licenses/.
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ali_instance_info
|
||||
short_description: Gather information on instances of Alibaba Cloud ECS
|
||||
description:
|
||||
- This module fetches data from the Open API in Alicloud.
|
||||
The module must be called from within the ECS instance itself.
|
||||
- This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change.
|
||||
|
||||
options:
|
||||
name_prefix:
|
||||
description:
|
||||
- Use a instance name prefix to filter ecs instances.
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
tags:
|
||||
description:
|
||||
- A hash/dictionaries of instance tags. C({"key":"value"})
|
||||
aliases: ["instance_tags"]
|
||||
type: dict
|
||||
filters:
|
||||
description:
|
||||
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
|
||||
all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details.
|
||||
Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to
|
||||
connect different words in one parameter. 'InstanceIds' should be a list.
|
||||
'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead.
|
||||
type: dict
|
||||
version_added: '0.2.0'
|
||||
author:
|
||||
- "He Guimin (@xiaozhu36)"
|
||||
requirements:
|
||||
- "python >= 3.6"
|
||||
- "footmark >= 1.13.0"
|
||||
extends_documentation_fragment:
|
||||
- community.general.alicloud
|
||||
- community.general.attributes
|
||||
- community.general.attributes.info_module
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Fetch instances details according to setting different filters
|
||||
|
||||
- name: Find all instances in the specified region
|
||||
community.general.ali_instance_info:
|
||||
register: all_instances
|
||||
|
||||
- name: Find all instances based on the specified ids
|
||||
community.general.ali_instance_info:
|
||||
instance_ids:
|
||||
- "i-35b333d9"
|
||||
- "i-ddav43kd"
|
||||
register: instances_by_ids
|
||||
|
||||
- name: Find all instances based on the specified name_prefix
|
||||
community.general.ali_instance_info:
|
||||
name_prefix: "ecs_instance_"
|
||||
register: instances_by_name_prefix
|
||||
|
||||
- name: Find instances based on tags
|
||||
community.general.ali_instance_info:
|
||||
tags:
|
||||
Test: "add"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
instances:
|
||||
description: List of ECS instances
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
availability_zone:
|
||||
description: The availability zone of the instance is in.
|
||||
returned: always
|
||||
type: str
|
||||
sample: cn-beijing-a
|
||||
block_device_mappings:
|
||||
description: Any block device mapping entries for the instance.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
device_name:
|
||||
description: The device name exposed to the instance (for example, /dev/xvda).
|
||||
returned: always
|
||||
type: str
|
||||
sample: /dev/xvda
|
||||
attach_time:
|
||||
description: The time stamp when the attachment initiated.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "2018-06-25T04:08:26Z"
|
||||
delete_on_termination:
|
||||
description: Indicates whether the volume is deleted on instance termination.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
status:
|
||||
description: The attachment state.
|
||||
returned: always
|
||||
type: str
|
||||
sample: in_use
|
||||
volume_id:
|
||||
description: The ID of the cloud disk.
|
||||
returned: always
|
||||
type: str
|
||||
sample: d-2zei53pjsi117y6gf9t6
|
||||
cpu:
|
||||
description: The CPU core count of the instance.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 4
|
||||
creation_time:
|
||||
description: The time the instance was created.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "2018-06-25T04:08Z"
|
||||
description:
|
||||
description: The instance description.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "my ansible instance"
|
||||
eip:
|
||||
description: The attribution of EIP associated with the instance.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
allocation_id:
|
||||
description: The ID of the EIP.
|
||||
returned: always
|
||||
type: str
|
||||
sample: eip-12345
|
||||
internet_charge_type:
|
||||
description: The internet charge type of the EIP.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "paybybandwidth"
|
||||
ip_address:
|
||||
description: EIP address.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 42.10.2.2
|
||||
expired_time:
|
||||
description: The time the instance will expire.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "2099-12-31T15:59Z"
|
||||
gpu:
|
||||
description: The attribution of instance GPU.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
amount:
|
||||
description: The count of the GPU.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 0
|
||||
spec:
|
||||
description: The specification of the GPU.
|
||||
returned: always
|
||||
type: str
|
||||
sample: ""
|
||||
host_name:
|
||||
description: The host name of the instance.
|
||||
returned: always
|
||||
type: str
|
||||
sample: iZ2zewaoZ
|
||||
id:
|
||||
description: Alias of instance_id.
|
||||
returned: always
|
||||
type: str
|
||||
sample: i-abc12345
|
||||
instance_id:
|
||||
description: ECS instance resource ID.
|
||||
returned: always
|
||||
type: str
|
||||
sample: i-abc12345
|
||||
image_id:
|
||||
description: The ID of the image used to launch the instance.
|
||||
returned: always
|
||||
type: str
|
||||
sample: m-0011223344
|
||||
inner_ip_address:
|
||||
description: The inner IPv4 address of the classic instance.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 10.0.0.2
|
||||
instance_charge_type:
|
||||
description: The instance charge type.
|
||||
returned: always
|
||||
type: str
|
||||
sample: PostPaid
|
||||
instance_name:
|
||||
description: The name of the instance.
|
||||
returned: always
|
||||
type: str
|
||||
sample: my-ecs
|
||||
instance_type_family:
|
||||
description: The instance type family of the instance belongs.
|
||||
returned: always
|
||||
type: str
|
||||
sample: ecs.sn1ne
|
||||
instance_type:
|
||||
description: The instance type of the running instance.
|
||||
returned: always
|
||||
type: str
|
||||
sample: ecs.sn1ne.xlarge
|
||||
internet_charge_type:
|
||||
description: The billing method of the network bandwidth.
|
||||
returned: always
|
||||
type: str
|
||||
sample: PayByBandwidth
|
||||
internet_max_bandwidth_in:
|
||||
description: Maximum incoming bandwidth from the internet network.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 200
|
||||
internet_max_bandwidth_out:
|
||||
description: Maximum incoming bandwidth from the internet network.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 20
|
||||
io_optimized:
|
||||
description: Indicates whether the instance is optimized for EBS I/O.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: false
|
||||
memory:
|
||||
description: Memory size of the instance.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 8192
|
||||
network_interfaces:
|
||||
description: One or more network interfaces for the instance.
|
||||
returned: always
|
||||
type: complex
|
||||
contains:
|
||||
mac_address:
|
||||
description: The MAC address.
|
||||
returned: always
|
||||
type: str
|
||||
sample: "00:11:22:33:44:55"
|
||||
network_interface_id:
|
||||
description: The ID of the network interface.
|
||||
returned: always
|
||||
type: str
|
||||
sample: eni-01234567
|
||||
primary_ip_address:
|
||||
description: The primary IPv4 address of the network interface within the vswitch.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 10.0.0.1
|
||||
osname:
|
||||
description: The operation system name of the instance owned.
|
||||
returned: always
|
||||
type: str
|
||||
sample: CentOS
|
||||
ostype:
|
||||
description: The operation system type of the instance owned.
|
||||
returned: always
|
||||
type: str
|
||||
sample: linux
|
||||
private_ip_address:
|
||||
description: The IPv4 address of the network interface within the subnet.
|
||||
returned: always
|
||||
type: str
|
||||
sample: 10.0.0.1
|
||||
public_ip_address:
|
||||
description: The public IPv4 address assigned to the instance or eip address
|
||||
returned: always
|
||||
type: str
|
||||
sample: 43.0.0.1
|
||||
resource_group_id:
|
||||
description: The id of the resource group to which the instance belongs.
|
||||
returned: always
|
||||
type: str
|
||||
sample: my-ecs-group
|
||||
security_groups:
|
||||
description: One or more security groups for the instance.
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
group_id:
|
||||
description: The ID of the security group.
|
||||
returned: always
|
||||
type: str
|
||||
sample: sg-0123456
|
||||
group_name:
|
||||
description: The name of the security group.
|
||||
returned: always
|
||||
type: str
|
||||
sample: my-security-group
|
||||
status:
|
||||
description: The current status of the instance.
|
||||
returned: always
|
||||
type: str
|
||||
sample: running
|
||||
tags:
|
||||
description: Any tags assigned to the instance.
|
||||
returned: always
|
||||
type: dict
|
||||
sample:
|
||||
vswitch_id:
|
||||
description: The ID of the vswitch in which the instance is running.
|
||||
returned: always
|
||||
type: str
|
||||
sample: vsw-dew00abcdef
|
||||
vpc_id:
|
||||
description: The ID of the VPC the instance is in.
|
||||
returned: always
|
||||
type: str
|
||||
sample: vpc-0011223344
|
||||
ids:
|
||||
description: List of ECS instance IDs
|
||||
returned: always
|
||||
type: list
|
||||
sample: [i-12345er, i-3245fs]
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import (
|
||||
ecs_argument_spec, ecs_connect, FOOTMARK_IMP_ERR, HAS_FOOTMARK
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ecs_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
name_prefix=dict(type='str'),
|
||||
tags=dict(type='dict', aliases=['instance_tags']),
|
||||
filters=dict(type='dict')
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if HAS_FOOTMARK is False:
|
||||
module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
|
||||
|
||||
ecs = ecs_connect(module)
|
||||
|
||||
instances = []
|
||||
instance_ids = []
|
||||
ids = []
|
||||
name_prefix = module.params['name_prefix']
|
||||
|
||||
filters = module.params['filters']
|
||||
if not filters:
|
||||
filters = {}
|
||||
for key, value in list(filters.items()):
|
||||
if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list):
|
||||
for id in value:
|
||||
if id not in ids:
|
||||
ids.append(value)
|
||||
if ids:
|
||||
filters['instance_ids'] = ids
|
||||
if module.params['tags']:
|
||||
filters['tags'] = module.params['tags']
|
||||
|
||||
for inst in ecs.describe_instances(**filters):
|
||||
if name_prefix:
|
||||
if not str(inst.instance_name).startswith(name_prefix):
|
||||
continue
|
||||
volumes = ecs.describe_disks(instance_id=inst.id)
|
||||
setattr(inst, 'block_device_mappings', volumes)
|
||||
setattr(inst, 'user_data', inst.describe_user_data())
|
||||
instances.append(inst.read())
|
||||
instance_ids.append(inst.id)
|
||||
|
||||
module.exit_json(changed=False, ids=instance_ids, instances=instances)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,398 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2014, Gabe Mulley <gabe.mulley@gmail.com>
|
||||
# Copyright (c) 2015, David Wittman <dwittman@gmail.com>
|
||||
# Copyright (c) 2022, Marius Rieder <marius.rieder@scs.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: alternatives
|
||||
short_description: Manages alternative programs for common commands
|
||||
description:
|
||||
- Manages symbolic links using the 'update-alternatives' tool.
|
||||
- Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
|
||||
author:
|
||||
- Marius Rieder (@jiuka)
|
||||
- David Wittman (@DavidWittman)
|
||||
- Gabe Mulley (@mulby)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The generic name of the link.
|
||||
type: str
|
||||
required: true
|
||||
path:
|
||||
description:
|
||||
- The path to the real executable that the link should point to.
|
||||
type: path
|
||||
required: true
|
||||
link:
|
||||
description:
|
||||
- The path to the symbolic link that should point to the real executable.
|
||||
- This option is always required on RHEL-based distributions. On Debian-based distributions this option is
|
||||
required when the alternative I(name) is unknown to the system.
|
||||
type: path
|
||||
priority:
|
||||
description:
|
||||
- The priority of the alternative. If no priority is given for creation C(50) is used as a fallback.
|
||||
type: int
|
||||
state:
|
||||
description:
|
||||
- C(present) - install the alternative (if not already installed), but do
|
||||
not set it as the currently selected alternative for the group.
|
||||
- C(selected) - install the alternative (if not already installed), and
|
||||
set it as the currently selected alternative for the group.
|
||||
- C(auto) - install the alternative (if not already installed), and
|
||||
set the group to auto mode. Added in community.general 5.1.0.
|
||||
- C(absent) - removes the alternative. Added in community.general 5.1.0.
|
||||
choices: [ present, selected, auto, absent ]
|
||||
default: selected
|
||||
type: str
|
||||
version_added: 4.8.0
|
||||
subcommands:
|
||||
description:
|
||||
- A list of subcommands.
|
||||
- Each subcommand needs a name, a link and a path parameter.
|
||||
type: list
|
||||
elements: dict
|
||||
aliases: ['slaves']
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- The generic name of the subcommand.
|
||||
type: str
|
||||
required: true
|
||||
path:
|
||||
description:
|
||||
- The path to the real executable that the subcommand should point to.
|
||||
type: path
|
||||
required: true
|
||||
link:
|
||||
description:
|
||||
- The path to the symbolic link that should point to the real subcommand executable.
|
||||
type: path
|
||||
required: true
|
||||
version_added: 5.1.0
|
||||
requirements: [ update-alternatives ]
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Correct java version selected
|
||||
community.general.alternatives:
|
||||
name: java
|
||||
path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
|
||||
|
||||
- name: Alternatives link created
|
||||
community.general.alternatives:
|
||||
name: hadoop-conf
|
||||
link: /etc/hadoop/conf
|
||||
path: /etc/hadoop/conf.ansible
|
||||
|
||||
- name: Make java 32 bit an alternative with low priority
|
||||
community.general.alternatives:
|
||||
name: java
|
||||
path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java
|
||||
priority: -10
|
||||
|
||||
- name: Install Python 3.5 but do not select it
|
||||
community.general.alternatives:
|
||||
name: python
|
||||
path: /usr/bin/python3.5
|
||||
link: /usr/bin/python
|
||||
state: present
|
||||
|
||||
- name: Install Python 3.5 and reset selection to auto
|
||||
community.general.alternatives:
|
||||
name: python
|
||||
path: /usr/bin/python3.5
|
||||
link: /usr/bin/python
|
||||
state: auto
|
||||
|
||||
- name: keytool is a subcommand of java
|
||||
community.general.alternatives:
|
||||
name: java
|
||||
link: /usr/bin/java
|
||||
path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
|
||||
subcommands:
|
||||
- name: keytool
|
||||
link: /usr/bin/keytool
|
||||
path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/keytool
|
||||
'''
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class AlternativeState:
|
||||
PRESENT = "present"
|
||||
SELECTED = "selected"
|
||||
ABSENT = "absent"
|
||||
AUTO = "auto"
|
||||
|
||||
@classmethod
|
||||
def to_list(cls):
|
||||
return [cls.PRESENT, cls.SELECTED, cls.ABSENT, cls.AUTO]
|
||||
|
||||
|
||||
class AlternativesModule(object):
|
||||
_UPDATE_ALTERNATIVES = None
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.result = dict(changed=False, diff=dict(before=dict(), after=dict()))
|
||||
self.module.run_command_environ_update = {'LC_ALL': 'C'}
|
||||
self.messages = []
|
||||
self.run()
|
||||
|
||||
@property
|
||||
def mode_present(self):
|
||||
return self.module.params.get('state') in [AlternativeState.PRESENT, AlternativeState.SELECTED, AlternativeState.AUTO]
|
||||
|
||||
@property
|
||||
def mode_selected(self):
|
||||
return self.module.params.get('state') == AlternativeState.SELECTED
|
||||
|
||||
@property
|
||||
def mode_auto(self):
|
||||
return self.module.params.get('state') == AlternativeState.AUTO
|
||||
|
||||
def run(self):
|
||||
self.parse()
|
||||
|
||||
if self.mode_present:
|
||||
# Check if we need to (re)install
|
||||
subcommands_parameter = self.module.params['subcommands']
|
||||
priority_parameter = self.module.params['priority']
|
||||
if (
|
||||
self.path not in self.current_alternatives or
|
||||
(priority_parameter is not None and self.current_alternatives[self.path].get('priority') != priority_parameter) or
|
||||
(subcommands_parameter is not None and (
|
||||
not all(s in subcommands_parameter for s in self.current_alternatives[self.path].get('subcommands')) or
|
||||
not all(s in self.current_alternatives[self.path].get('subcommands') for s in subcommands_parameter)
|
||||
))
|
||||
):
|
||||
self.install()
|
||||
|
||||
# Check if we need to set the preference
|
||||
if self.mode_selected and self.current_path != self.path:
|
||||
self.set()
|
||||
|
||||
# Check if we need to reset to auto
|
||||
if self.mode_auto and self.current_mode == 'manual':
|
||||
self.auto()
|
||||
else:
|
||||
# Check if we need to uninstall
|
||||
if self.path in self.current_alternatives:
|
||||
self.remove()
|
||||
|
||||
self.result['msg'] = ' '.join(self.messages)
|
||||
self.module.exit_json(**self.result)
|
||||
|
||||
def install(self):
|
||||
if not os.path.exists(self.path):
|
||||
self.module.fail_json(msg="Specified path %s does not exist" % self.path)
|
||||
if not self.link:
|
||||
self.module.fail_json(msg='Needed to install the alternative, but unable to do so as we are missing the link')
|
||||
|
||||
cmd = [self.UPDATE_ALTERNATIVES, '--install', self.link, self.name, self.path, str(self.priority)]
|
||||
|
||||
if self.module.params['subcommands'] is not None:
|
||||
subcommands = [['--slave', subcmd['link'], subcmd['name'], subcmd['path']] for subcmd in self.subcommands]
|
||||
cmd += [item for sublist in subcommands for item in sublist]
|
||||
|
||||
self.result['changed'] = True
|
||||
self.messages.append("Install alternative '%s' for '%s'." % (self.path, self.name))
|
||||
|
||||
if not self.module.check_mode:
|
||||
self.module.run_command(cmd, check_rc=True)
|
||||
|
||||
if self.module._diff:
|
||||
self.result['diff']['after'] = dict(
|
||||
state=AlternativeState.PRESENT,
|
||||
path=self.path,
|
||||
priority=self.priority,
|
||||
link=self.link,
|
||||
)
|
||||
if self.subcommands:
|
||||
self.result['diff']['after'].update(dict(
|
||||
subcommands=self.subcommands
|
||||
))
|
||||
|
||||
def remove(self):
|
||||
cmd = [self.UPDATE_ALTERNATIVES, '--remove', self.name, self.path]
|
||||
self.result['changed'] = True
|
||||
self.messages.append("Remove alternative '%s' from '%s'." % (self.path, self.name))
|
||||
|
||||
if not self.module.check_mode:
|
||||
self.module.run_command(cmd, check_rc=True)
|
||||
|
||||
if self.module._diff:
|
||||
self.result['diff']['after'] = dict(state=AlternativeState.ABSENT)
|
||||
|
||||
def set(self):
|
||||
cmd = [self.UPDATE_ALTERNATIVES, '--set', self.name, self.path]
|
||||
self.result['changed'] = True
|
||||
self.messages.append("Set alternative '%s' for '%s'." % (self.path, self.name))
|
||||
|
||||
if not self.module.check_mode:
|
||||
self.module.run_command(cmd, check_rc=True)
|
||||
|
||||
if self.module._diff:
|
||||
self.result['diff']['after']['state'] = AlternativeState.SELECTED
|
||||
|
||||
def auto(self):
|
||||
cmd = [self.UPDATE_ALTERNATIVES, '--auto', self.name]
|
||||
self.messages.append("Set alternative to auto for '%s'." % (self.name))
|
||||
self.result['changed'] = True
|
||||
|
||||
if not self.module.check_mode:
|
||||
self.module.run_command(cmd, check_rc=True)
|
||||
|
||||
if self.module._diff:
|
||||
self.result['diff']['after']['state'] = AlternativeState.PRESENT
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.module.params.get('name')
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return self.module.params.get('path')
|
||||
|
||||
@property
|
||||
def link(self):
|
||||
return self.module.params.get('link') or self.current_link
|
||||
|
||||
@property
|
||||
def priority(self):
|
||||
if self.module.params.get('priority') is not None:
|
||||
return self.module.params.get('priority')
|
||||
return self.current_alternatives.get(self.path, {}).get('priority', 50)
|
||||
|
||||
@property
|
||||
def subcommands(self):
|
||||
if self.module.params.get('subcommands') is not None:
|
||||
return self.module.params.get('subcommands')
|
||||
elif self.path in self.current_alternatives and self.current_alternatives[self.path].get('subcommands'):
|
||||
return self.current_alternatives[self.path].get('subcommands')
|
||||
return None
|
||||
|
||||
@property
|
||||
def UPDATE_ALTERNATIVES(self):
|
||||
if self._UPDATE_ALTERNATIVES is None:
|
||||
self._UPDATE_ALTERNATIVES = self.module.get_bin_path('update-alternatives', True)
|
||||
return self._UPDATE_ALTERNATIVES
|
||||
|
||||
def parse(self):
|
||||
self.current_mode = None
|
||||
self.current_path = None
|
||||
self.current_link = None
|
||||
self.current_alternatives = {}
|
||||
|
||||
# Run `update-alternatives --display <name>` to find existing alternatives
|
||||
(rc, display_output, dummy) = self.module.run_command(
|
||||
[self.UPDATE_ALTERNATIVES, '--display', self.name]
|
||||
)
|
||||
|
||||
if rc != 0:
|
||||
self.module.debug("No current alternative found. '%s' exited with %s" % (self.UPDATE_ALTERNATIVES, rc))
|
||||
return
|
||||
|
||||
current_mode_regex = re.compile(r'\s-\s(?:status\sis\s)?(\w*)(?:\smode|.)$', re.MULTILINE)
|
||||
current_path_regex = re.compile(r'^\s*link currently points to (.*)$', re.MULTILINE)
|
||||
current_link_regex = re.compile(r'^\s*link \w+ is (.*)$', re.MULTILINE)
|
||||
subcmd_path_link_regex = re.compile(r'^\s*slave (\S+) is (.*)$', re.MULTILINE)
|
||||
|
||||
alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s\S+\s)?priority\s(\d+)((?:\s+slave.*)*)', re.MULTILINE)
|
||||
subcmd_regex = re.compile(r'^\s+slave (.*): (.*)$', re.MULTILINE)
|
||||
|
||||
match = current_mode_regex.search(display_output)
|
||||
if not match:
|
||||
self.module.debug("No current mode found in output")
|
||||
return
|
||||
self.current_mode = match.group(1)
|
||||
|
||||
match = current_path_regex.search(display_output)
|
||||
if not match:
|
||||
self.module.debug("No current path found in output")
|
||||
else:
|
||||
self.current_path = match.group(1)
|
||||
|
||||
match = current_link_regex.search(display_output)
|
||||
if not match:
|
||||
self.module.debug("No current link found in output")
|
||||
else:
|
||||
self.current_link = match.group(1)
|
||||
|
||||
subcmd_path_map = dict(subcmd_path_link_regex.findall(display_output))
|
||||
if not subcmd_path_map and self.subcommands:
|
||||
subcmd_path_map = dict((s['name'], s['link']) for s in self.subcommands)
|
||||
|
||||
for path, prio, subcmd in alternative_regex.findall(display_output):
|
||||
self.current_alternatives[path] = dict(
|
||||
priority=int(prio),
|
||||
subcommands=[dict(
|
||||
name=name,
|
||||
path=spath,
|
||||
link=subcmd_path_map.get(name)
|
||||
) for name, spath in subcmd_regex.findall(subcmd) if spath != '(null)']
|
||||
)
|
||||
|
||||
if self.module._diff:
|
||||
if self.path in self.current_alternatives:
|
||||
self.result['diff']['before'].update(dict(
|
||||
state=AlternativeState.PRESENT,
|
||||
path=self.path,
|
||||
priority=self.current_alternatives[self.path].get('priority'),
|
||||
link=self.current_link,
|
||||
))
|
||||
if self.current_alternatives[self.path].get('subcommands'):
|
||||
self.result['diff']['before'].update(dict(
|
||||
subcommands=self.current_alternatives[self.path].get('subcommands')
|
||||
))
|
||||
if self.current_mode == 'manual' and self.current_path != self.path:
|
||||
self.result['diff']['before'].update(dict(
|
||||
state=AlternativeState.SELECTED
|
||||
))
|
||||
else:
|
||||
self.result['diff']['before'].update(dict(
|
||||
state=AlternativeState.ABSENT
|
||||
))
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', required=True),
|
||||
path=dict(type='path', required=True),
|
||||
link=dict(type='path'),
|
||||
priority=dict(type='int'),
|
||||
state=dict(
|
||||
type='str',
|
||||
choices=AlternativeState.to_list(),
|
||||
default=AlternativeState.SELECTED,
|
||||
),
|
||||
subcommands=dict(type='list', elements='dict', aliases=['slaves'], options=dict(
|
||||
name=dict(type='str', required=True),
|
||||
path=dict(type='path', required=True),
|
||||
link=dict(type='path', required=True),
|
||||
)),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
AlternativesModule(module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,350 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2021, Alexei Znamensky <russoz@gmail.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
module: ansible_galaxy_install
|
||||
author:
|
||||
- "Alexei Znamensky (@russoz)"
|
||||
short_description: Install Ansible roles or collections using ansible-galaxy
|
||||
version_added: 3.5.0
|
||||
description:
|
||||
- This module allows the installation of Ansible collections or roles using C(ansible-galaxy).
|
||||
notes:
|
||||
- >
|
||||
B(Ansible 2.9/2.10): The C(ansible-galaxy) command changed significantly between Ansible 2.9 and
|
||||
ansible-base 2.10 (later ansible-core 2.11). See comments in the parameters.
|
||||
requirements:
|
||||
- Ansible 2.9, ansible-base 2.10, or ansible-core 2.11 or newer
|
||||
options:
|
||||
type:
|
||||
description:
|
||||
- The type of installation performed by C(ansible-galaxy).
|
||||
- If I(type) is C(both), then I(requirements_file) must be passed and it may contain both roles and collections.
|
||||
- "Note however that the opposite is not true: if using a I(requirements_file), then I(type) can be any of the three choices."
|
||||
- "B(Ansible 2.9): The option C(both) will have the same effect as C(role)."
|
||||
type: str
|
||||
choices: [collection, role, both]
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name of the collection or role being installed.
|
||||
- >
|
||||
Versions can be specified with C(ansible-galaxy) usual formats.
|
||||
For example, the collection C(community.docker:1.6.1) or the role C(ansistrano.deploy,3.8.0).
|
||||
- I(name) and I(requirements_file) are mutually exclusive.
|
||||
type: str
|
||||
requirements_file:
|
||||
description:
|
||||
- Path to a file containing a list of requirements to be installed.
|
||||
- It works for I(type) equals to C(collection) and C(role).
|
||||
- I(name) and I(requirements_file) are mutually exclusive.
|
||||
- "B(Ansible 2.9): It can only be used to install either I(type=role) or I(type=collection), but not both at the same run."
|
||||
type: path
|
||||
dest:
|
||||
description:
|
||||
- The path to the directory containing your collections or roles, according to the value of I(type).
|
||||
- >
|
||||
Please notice that C(ansible-galaxy) will not install collections with I(type=both), when I(requirements_file)
|
||||
contains both roles and collections and I(dest) is specified.
|
||||
type: path
|
||||
no_deps:
|
||||
description:
|
||||
- Refrain from installing dependencies.
|
||||
version_added: 4.5.0
|
||||
type: bool
|
||||
default: false
|
||||
force:
|
||||
description:
|
||||
- Force overwriting an existing role or collection.
|
||||
- Using I(force=true) is mandatory when downgrading.
|
||||
- "B(Ansible 2.9 and 2.10): Must be C(true) to upgrade roles and collections."
|
||||
type: bool
|
||||
default: false
|
||||
ack_ansible29:
|
||||
description:
|
||||
- Acknowledge using Ansible 2.9 with its limitations, and prevents the module from generating warnings about them.
|
||||
- This option is completely ignored if using a version of Ansible greater than C(2.9.x).
|
||||
- Note that this option will be removed without any further deprecation warning once support
|
||||
for Ansible 2.9 is removed from this module.
|
||||
type: bool
|
||||
default: false
|
||||
ack_min_ansiblecore211:
|
||||
description:
|
||||
- Acknowledge the module is deprecating support for Ansible 2.9 and ansible-base 2.10.
|
||||
- Support for those versions will be removed in community.general 8.0.0.
|
||||
At the same time, this option will be removed without any deprecation warning!
|
||||
- This option is completely ignored if using a version of ansible-core/ansible-base/Ansible greater than C(2.11).
|
||||
- For the sake of conciseness, setting this parameter to C(true) implies I(ack_ansible29=true).
|
||||
type: bool
|
||||
default: false
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Install collection community.network
|
||||
community.general.ansible_galaxy_install:
|
||||
type: collection
|
||||
name: community.network
|
||||
|
||||
- name: Install role at specific path
|
||||
community.general.ansible_galaxy_install:
|
||||
type: role
|
||||
name: ansistrano.deploy
|
||||
dest: /ansible/roles
|
||||
|
||||
- name: Install collections and roles together
|
||||
community.general.ansible_galaxy_install:
|
||||
type: both
|
||||
requirements_file: requirements.yml
|
||||
|
||||
- name: Force-install collection community.network at specific version
|
||||
community.general.ansible_galaxy_install:
|
||||
type: collection
|
||||
name: community.network:3.0.2
|
||||
force: true
|
||||
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
type:
|
||||
description: The value of the I(type) parameter.
|
||||
type: str
|
||||
returned: always
|
||||
name:
|
||||
description: The value of the I(name) parameter.
|
||||
type: str
|
||||
returned: always
|
||||
dest:
|
||||
description: The value of the I(dest) parameter.
|
||||
type: str
|
||||
returned: always
|
||||
requirements_file:
|
||||
description: The value of the I(requirements_file) parameter.
|
||||
type: str
|
||||
returned: always
|
||||
force:
|
||||
description: The value of the I(force) parameter.
|
||||
type: bool
|
||||
returned: always
|
||||
installed_roles:
|
||||
description:
|
||||
- If I(requirements_file) is specified instead, returns dictionary with all the roles installed per path.
|
||||
- If I(name) is specified, returns that role name and the version installed per path.
|
||||
- "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand."
|
||||
type: dict
|
||||
returned: always when installing roles
|
||||
contains:
|
||||
"<path>":
|
||||
description: Roles and versions for that path.
|
||||
type: dict
|
||||
sample:
|
||||
/home/user42/.ansible/roles:
|
||||
ansistrano.deploy: 3.9.0
|
||||
baztian.xfce: v0.0.3
|
||||
/custom/ansible/roles:
|
||||
ansistrano.deploy: 3.8.0
|
||||
installed_collections:
|
||||
description:
|
||||
- If I(requirements_file) is specified instead, returns dictionary with all the collections installed per path.
|
||||
- If I(name) is specified, returns that collection name and the version installed per path.
|
||||
- "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand."
|
||||
type: dict
|
||||
returned: always when installing collections
|
||||
contains:
|
||||
"<path>":
|
||||
description: Collections and versions for that path
|
||||
type: dict
|
||||
sample:
|
||||
/home/az/.ansible/collections/ansible_collections:
|
||||
community.docker: 1.6.0
|
||||
community.general: 3.0.2
|
||||
/custom/ansible/ansible_collections:
|
||||
community.general: 3.1.0
|
||||
new_collections:
|
||||
description: New collections installed by this module.
|
||||
returned: success
|
||||
type: dict
|
||||
sample:
|
||||
community.general: 3.1.0
|
||||
community.docker: 1.6.1
|
||||
new_roles:
|
||||
description: New roles installed by this module.
|
||||
returned: success
|
||||
type: dict
|
||||
sample:
|
||||
ansistrano.deploy: 3.8.0
|
||||
baztian.xfce: v0.0.3
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt
|
||||
from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
|
||||
|
||||
|
||||
class AnsibleGalaxyInstall(ModuleHelper):
|
||||
_RE_GALAXY_VERSION = re.compile(r'^ansible-galaxy(?: \[core)? (?P<version>\d+\.\d+\.\d+)(?:\.\w+)?(?:\])?')
|
||||
_RE_LIST_PATH = re.compile(r'^# (?P<path>.*)$')
|
||||
_RE_LIST_COLL = re.compile(r'^(?P<elem>\w+\.\w+)\s+(?P<version>[\d\.]+)\s*$')
|
||||
_RE_LIST_ROLE = re.compile(r'^- (?P<elem>\w+\.\w+),\s+(?P<version>[\d\.]+)\s*$')
|
||||
_RE_INSTALL_OUTPUT = None # Set after determining ansible version, see __init_module__()
|
||||
ansible_version = None
|
||||
is_ansible29 = None
|
||||
|
||||
output_params = ('type', 'name', 'dest', 'requirements_file', 'force', 'no_deps')
|
||||
module = dict(
|
||||
argument_spec=dict(
|
||||
type=dict(type='str', choices=('collection', 'role', 'both'), required=True),
|
||||
name=dict(type='str'),
|
||||
requirements_file=dict(type='path'),
|
||||
dest=dict(type='path'),
|
||||
force=dict(type='bool', default=False),
|
||||
no_deps=dict(type='bool', default=False),
|
||||
ack_ansible29=dict(type='bool', default=False),
|
||||
ack_min_ansiblecore211=dict(type='bool', default=False),
|
||||
),
|
||||
mutually_exclusive=[('name', 'requirements_file')],
|
||||
required_one_of=[('name', 'requirements_file')],
|
||||
required_if=[('type', 'both', ['requirements_file'])],
|
||||
supports_check_mode=False,
|
||||
)
|
||||
|
||||
command = 'ansible-galaxy'
|
||||
command_args_formats = dict(
|
||||
type=fmt.as_func(lambda v: [] if v == 'both' else [v]),
|
||||
galaxy_cmd=fmt.as_list(),
|
||||
requirements_file=fmt.as_opt_val('-r'),
|
||||
dest=fmt.as_opt_val('-p'),
|
||||
force=fmt.as_bool("--force"),
|
||||
no_deps=fmt.as_bool("--no-deps"),
|
||||
version=fmt.as_bool("--version"),
|
||||
name=fmt.as_list(),
|
||||
)
|
||||
force_lang = "en_US.UTF-8"
|
||||
check_rc = True
|
||||
|
||||
def _get_ansible_galaxy_version(self):
|
||||
def process(rc, out, err):
|
||||
line = out.splitlines()[0]
|
||||
match = self._RE_GALAXY_VERSION.match(line)
|
||||
if not match:
|
||||
self.do_raise("Unable to determine ansible-galaxy version from: {0}".format(line))
|
||||
version = match.group("version")
|
||||
version = tuple(int(x) for x in version.split('.')[:3])
|
||||
return version
|
||||
|
||||
with self.runner("version", check_rc=True, output_process=process) as ctx:
|
||||
return ctx.run(version=True)
|
||||
|
||||
def __init_module__(self):
|
||||
self.runner = CmdRunner(self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=self.force_lang)
|
||||
self.ansible_version = self._get_ansible_galaxy_version()
|
||||
if self.ansible_version < (2, 11) and not self.vars.ack_min_ansiblecore211:
|
||||
self.module.deprecate(
|
||||
"Support for Ansible 2.9 and ansible-base 2.10 is being deprecated. "
|
||||
"At the same time support for them is ended, also the ack_ansible29 option will be removed. "
|
||||
"Upgrading is strongly recommended, or set 'ack_min_ansiblecore211' to suppress this message.",
|
||||
version="8.0.0",
|
||||
collection_name="community.general",
|
||||
)
|
||||
self.is_ansible29 = self.ansible_version < (2, 10)
|
||||
if self.is_ansible29:
|
||||
self._RE_INSTALL_OUTPUT = re.compile(r"^(?:.*Installing '(?P<collection>\w+\.\w+):(?P<cversion>[\d\.]+)'.*"
|
||||
r'|- (?P<role>\w+\.\w+) \((?P<rversion>[\d\.]+)\)'
|
||||
r' was installed successfully)$')
|
||||
else:
|
||||
# Collection install output changed:
|
||||
# ansible-base 2.10: "coll.name (x.y.z)"
|
||||
# ansible-core 2.11+: "coll.name:x.y.z"
|
||||
self._RE_INSTALL_OUTPUT = re.compile(r'^(?:(?P<collection>\w+\.\w+)(?: \(|:)(?P<cversion>[\d\.]+)\)?'
|
||||
r'|- (?P<role>\w+\.\w+) \((?P<rversion>[\d\.]+)\))'
|
||||
r' was installed successfully$')
|
||||
|
||||
def _list_element(self, _type, path_re, elem_re):
|
||||
def process(rc, out, err):
|
||||
return [] if "None of the provided paths were usable" in out else out.splitlines()
|
||||
|
||||
with self.runner('type galaxy_cmd dest', output_process=process, check_rc=False) as ctx:
|
||||
elems = ctx.run(type=_type, galaxy_cmd='list')
|
||||
|
||||
elems_dict = {}
|
||||
current_path = None
|
||||
for line in elems:
|
||||
if line.startswith("#"):
|
||||
match = path_re.match(line)
|
||||
if not match:
|
||||
continue
|
||||
if self.vars.dest is not None and match.group('path') != self.vars.dest:
|
||||
current_path = None
|
||||
continue
|
||||
current_path = match.group('path') if match else None
|
||||
elems_dict[current_path] = {}
|
||||
|
||||
elif current_path is not None:
|
||||
match = elem_re.match(line)
|
||||
if not match or (self.vars.name is not None and match.group('elem') != self.vars.name):
|
||||
continue
|
||||
elems_dict[current_path][match.group('elem')] = match.group('version')
|
||||
return elems_dict
|
||||
|
||||
def _list_collections(self):
|
||||
return self._list_element('collection', self._RE_LIST_PATH, self._RE_LIST_COLL)
|
||||
|
||||
def _list_roles(self):
|
||||
return self._list_element('role', self._RE_LIST_PATH, self._RE_LIST_ROLE)
|
||||
|
||||
def _setup29(self):
|
||||
self.vars.set("new_collections", {})
|
||||
self.vars.set("new_roles", {})
|
||||
self.vars.set("ansible29_change", False, change=True, output=False)
|
||||
if not (self.vars.ack_ansible29 or self.vars.ack_min_ansiblecore211):
|
||||
self.warn("Ansible 2.9 or older: unable to retrieve lists of roles and collections already installed")
|
||||
if self.vars.requirements_file is not None and self.vars.type == 'both':
|
||||
self.warn("Ansible 2.9 or older: will install only roles from requirement files")
|
||||
|
||||
def _setup210plus(self):
|
||||
self.vars.set("new_collections", {}, change=True)
|
||||
self.vars.set("new_roles", {}, change=True)
|
||||
if self.vars.type != "collection":
|
||||
self.vars.installed_roles = self._list_roles()
|
||||
if self.vars.type != "roles":
|
||||
self.vars.installed_collections = self._list_collections()
|
||||
|
||||
def __run__(self):
|
||||
def process(rc, out, err):
|
||||
for line in out.splitlines():
|
||||
match = self._RE_INSTALL_OUTPUT.match(line)
|
||||
if not match:
|
||||
continue
|
||||
if match.group("collection"):
|
||||
self.vars.new_collections[match.group("collection")] = match.group("cversion")
|
||||
if self.is_ansible29:
|
||||
self.vars.ansible29_change = True
|
||||
elif match.group("role"):
|
||||
self.vars.new_roles[match.group("role")] = match.group("rversion")
|
||||
if self.is_ansible29:
|
||||
self.vars.ansible29_change = True
|
||||
|
||||
if self.is_ansible29:
|
||||
if self.vars.type == 'both':
|
||||
raise ValueError("Type 'both' not supported in Ansible 2.9")
|
||||
self._setup29()
|
||||
else:
|
||||
self._setup210plus()
|
||||
with self.runner("type galaxy_cmd force no_deps dest requirements_file name", output_process=process) as ctx:
|
||||
ctx.run(galaxy_cmd="install")
|
||||
|
||||
|
||||
def main():
|
||||
galaxy = AnsibleGalaxyInstall()
|
||||
galaxy.run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,445 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2016, Olivier Boukili <boukili.olivier@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: apache2_mod_proxy
|
||||
author: Olivier Boukili (@oboukili)
|
||||
short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool
|
||||
description:
|
||||
- Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer
|
||||
pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member
|
||||
status page has to be enabled and accessible, as this module relies on parsing
|
||||
this page. This module supports ansible check_mode, and requires BeautifulSoup
|
||||
python module.
|
||||
options:
|
||||
balancer_url_suffix:
|
||||
type: str
|
||||
description:
|
||||
- Suffix of the balancer pool url required to access the balancer pool
|
||||
status page (e.g. balancer_vhost[:port]/balancer_url_suffix).
|
||||
default: /balancer-manager/
|
||||
balancer_vhost:
|
||||
type: str
|
||||
description:
|
||||
- (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool.
|
||||
required: true
|
||||
member_host:
|
||||
type: str
|
||||
description:
|
||||
- (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to.
|
||||
Port number is autodetected and should not be specified here.
|
||||
If undefined, apache2_mod_proxy module will return a members list of
|
||||
dictionaries of all the current balancer pool members' attributes.
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- Desired state of the member host.
|
||||
(absent|disabled),drained,hot_standby,ignore_errors can be
|
||||
simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors).
|
||||
- 'Accepted state values: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]'
|
||||
tls:
|
||||
description:
|
||||
- Use https to access balancer management page.
|
||||
type: bool
|
||||
default: false
|
||||
validate_certs:
|
||||
description:
|
||||
- Validate ssl/tls certificates.
|
||||
type: bool
|
||||
default: true
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get all current balancer pool members attributes
|
||||
community.general.apache2_mod_proxy:
|
||||
balancer_vhost: 10.0.0.2
|
||||
|
||||
- name: Get a specific member attributes
|
||||
community.general.apache2_mod_proxy:
|
||||
balancer_vhost: myws.mydomain.org
|
||||
balancer_suffix: /lb/
|
||||
member_host: node1.myws.mydomain.org
|
||||
|
||||
# Enable all balancer pool members:
|
||||
- name: Get attributes
|
||||
community.general.apache2_mod_proxy:
|
||||
balancer_vhost: '{{ myloadbalancer_host }}'
|
||||
register: result
|
||||
|
||||
- name: Enable all balancer pool members
|
||||
community.general.apache2_mod_proxy:
|
||||
balancer_vhost: '{{ myloadbalancer_host }}'
|
||||
member_host: '{{ item.host }}'
|
||||
state: present
|
||||
with_items: '{{ result.members }}'
|
||||
|
||||
# Gracefully disable a member from a loadbalancer node:
|
||||
- name: Step 1
|
||||
community.general.apache2_mod_proxy:
|
||||
balancer_vhost: '{{ vhost_host }}'
|
||||
member_host: '{{ member.host }}'
|
||||
state: drained
|
||||
delegate_to: myloadbalancernode
|
||||
|
||||
- name: Step 2
|
||||
ansible.builtin.wait_for:
|
||||
host: '{{ member.host }}'
|
||||
port: '{{ member.port }}'
|
||||
state: drained
|
||||
delegate_to: myloadbalancernode
|
||||
|
||||
- name: Step 3
|
||||
community.general.apache2_mod_proxy:
|
||||
balancer_vhost: '{{ vhost_host }}'
|
||||
member_host: '{{ member.host }}'
|
||||
state: absent
|
||||
delegate_to: myloadbalancernode
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
member:
|
||||
description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter.
|
||||
type: dict
|
||||
returned: success
|
||||
sample:
|
||||
{"attributes":
|
||||
{"Busy": "0",
|
||||
"Elected": "42",
|
||||
"Factor": "1",
|
||||
"From": "136K",
|
||||
"Load": "0",
|
||||
"Route": null,
|
||||
"RouteRedir": null,
|
||||
"Set": "0",
|
||||
"Status": "Init Ok ",
|
||||
"To": " 47K",
|
||||
"Worker URL": null
|
||||
},
|
||||
"balancer_url": "http://10.10.0.2/balancer-manager/",
|
||||
"host": "10.10.0.20",
|
||||
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
|
||||
"path": "/ws",
|
||||
"port": 8080,
|
||||
"protocol": "http",
|
||||
"status": {
|
||||
"disabled": false,
|
||||
"drained": false,
|
||||
"hot_standby": false,
|
||||
"ignore_errors": false
|
||||
}
|
||||
}
|
||||
members:
|
||||
description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args.
|
||||
returned: success
|
||||
type: list
|
||||
sample:
|
||||
[{"attributes": {
|
||||
"Busy": "0",
|
||||
"Elected": "42",
|
||||
"Factor": "1",
|
||||
"From": "136K",
|
||||
"Load": "0",
|
||||
"Route": null,
|
||||
"RouteRedir": null,
|
||||
"Set": "0",
|
||||
"Status": "Init Ok ",
|
||||
"To": " 47K",
|
||||
"Worker URL": null
|
||||
},
|
||||
"balancer_url": "http://10.10.0.2/balancer-manager/",
|
||||
"host": "10.10.0.20",
|
||||
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
|
||||
"path": "/ws",
|
||||
"port": 8080,
|
||||
"protocol": "http",
|
||||
"status": {
|
||||
"disabled": false,
|
||||
"drained": false,
|
||||
"hot_standby": false,
|
||||
"ignore_errors": false
|
||||
}
|
||||
},
|
||||
{"attributes": {
|
||||
"Busy": "0",
|
||||
"Elected": "42",
|
||||
"Factor": "1",
|
||||
"From": "136K",
|
||||
"Load": "0",
|
||||
"Route": null,
|
||||
"RouteRedir": null,
|
||||
"Set": "0",
|
||||
"Status": "Init Ok ",
|
||||
"To": " 47K",
|
||||
"Worker URL": null
|
||||
},
|
||||
"balancer_url": "http://10.10.0.2/balancer-manager/",
|
||||
"host": "10.10.0.21",
|
||||
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
|
||||
"path": "/ws",
|
||||
"port": 8080,
|
||||
"protocol": "http",
|
||||
"status": {
|
||||
"disabled": false,
|
||||
"drained": false,
|
||||
"hot_standby": false,
|
||||
"ignore_errors": false}
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
import re
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.six import iteritems
|
||||
|
||||
BEAUTIFUL_SOUP_IMP_ERR = None
|
||||
try:
|
||||
from BeautifulSoup import BeautifulSoup
|
||||
except ImportError:
|
||||
BEAUTIFUL_SOUP_IMP_ERR = traceback.format_exc()
|
||||
HAS_BEAUTIFULSOUP = False
|
||||
else:
|
||||
HAS_BEAUTIFULSOUP = True
|
||||
|
||||
# balancer member attributes extraction regexp:
|
||||
EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)"
|
||||
# Apache2 server version extraction regexp:
|
||||
APACHE_VERSION_EXPRESSION = r"SERVER VERSION: APACHE/([\d.]+)"
|
||||
|
||||
|
||||
def regexp_extraction(string, _regexp, groups=1):
|
||||
""" Returns the capture group (default=1) specified in the regexp, applied to the string """
|
||||
regexp_search = re.search(string=str(string), pattern=str(_regexp))
|
||||
if regexp_search:
|
||||
if regexp_search.group(groups) != '':
|
||||
return str(regexp_search.group(groups))
|
||||
return None
|
||||
|
||||
|
||||
class BalancerMember(object):
|
||||
""" Apache 2.4 mod_proxy LB balancer member.
|
||||
attributes:
|
||||
read-only:
|
||||
host -> member host (string),
|
||||
management_url -> member management url (string),
|
||||
protocol -> member protocol (string)
|
||||
port -> member port (string),
|
||||
path -> member location (string),
|
||||
balancer_url -> url of this member's parent balancer (string),
|
||||
attributes -> whole member attributes (dictionary)
|
||||
module -> ansible module instance (AnsibleModule object).
|
||||
writable:
|
||||
status -> status of the member (dictionary)
|
||||
"""
|
||||
|
||||
def __init__(self, management_url, balancer_url, module):
|
||||
self.host = regexp_extraction(management_url, str(EXPRESSION), 4)
|
||||
self.management_url = str(management_url)
|
||||
self.protocol = regexp_extraction(management_url, EXPRESSION, 3)
|
||||
self.port = regexp_extraction(management_url, EXPRESSION, 5)
|
||||
self.path = regexp_extraction(management_url, EXPRESSION, 6)
|
||||
self.balancer_url = str(balancer_url)
|
||||
self.module = module
|
||||
|
||||
def get_member_attributes(self):
|
||||
""" Returns a dictionary of a balancer member's attributes."""
|
||||
|
||||
balancer_member_page = fetch_url(self.module, self.management_url)
|
||||
|
||||
if balancer_member_page[1]['status'] != 200:
|
||||
self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1])
|
||||
else:
|
||||
try:
|
||||
soup = BeautifulSoup(balancer_member_page[0])
|
||||
except TypeError as exc:
|
||||
self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(exc))
|
||||
else:
|
||||
subsoup = soup.findAll('table')[1].findAll('tr')
|
||||
keys = subsoup[0].findAll('th')
|
||||
for valuesset in subsoup[1::1]:
|
||||
if re.search(pattern=self.host, string=str(valuesset)):
|
||||
values = valuesset.findAll('td')
|
||||
return dict((keys[x].string, values[x].string) for x in range(0, len(keys)))
|
||||
|
||||
def get_member_status(self):
|
||||
""" Returns a dictionary of a balancer member's status attributes."""
|
||||
status_mapping = {'disabled': 'Dis',
|
||||
'drained': 'Drn',
|
||||
'hot_standby': 'Stby',
|
||||
'ignore_errors': 'Ign'}
|
||||
actual_status = str(self.attributes['Status'])
|
||||
status = dict((mode, patt in actual_status) for mode, patt in iteritems(status_mapping))
|
||||
return status
|
||||
|
||||
def set_member_status(self, values):
|
||||
""" Sets a balancer member's status attributes amongst pre-mapped values."""
|
||||
values_mapping = {'disabled': '&w_status_D',
|
||||
'drained': '&w_status_N',
|
||||
'hot_standby': '&w_status_H',
|
||||
'ignore_errors': '&w_status_I'}
|
||||
|
||||
request_body = regexp_extraction(self.management_url, EXPRESSION, 1)
|
||||
values_url = "".join("{0}={1}".format(url_param, 1 if values[mode] else 0) for mode, url_param in iteritems(values_mapping))
|
||||
request_body = "{0}{1}".format(request_body, values_url)
|
||||
|
||||
response = fetch_url(self.module, self.management_url, data=request_body)
|
||||
if response[1]['status'] != 200:
|
||||
self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status'])
|
||||
|
||||
attributes = property(get_member_attributes)
|
||||
status = property(get_member_status, set_member_status)
|
||||
|
||||
|
||||
class Balancer(object):
|
||||
""" Apache httpd 2.4 mod_proxy balancer object"""
|
||||
|
||||
def __init__(self, host, suffix, module, members=None, tls=False):
|
||||
if tls:
|
||||
self.base_url = 'https://' + str(host)
|
||||
self.url = 'https://' + str(host) + str(suffix)
|
||||
else:
|
||||
self.base_url = 'http://' + str(host)
|
||||
self.url = 'http://' + str(host) + str(suffix)
|
||||
self.module = module
|
||||
self.page = self.fetch_balancer_page()
|
||||
if members is None:
|
||||
self._members = []
|
||||
|
||||
def fetch_balancer_page(self):
|
||||
""" Returns the balancer management html page as a string for later parsing."""
|
||||
page = fetch_url(self.module, str(self.url))
|
||||
if page[1]['status'] != 200:
|
||||
self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status']))
|
||||
else:
|
||||
content = page[0].read()
|
||||
apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1)
|
||||
if apache_version:
|
||||
if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
|
||||
self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version))
|
||||
return content
|
||||
else:
|
||||
self.module.fail_json(msg="Could not get the Apache server version from the balancer-manager")
|
||||
|
||||
def get_balancer_members(self):
|
||||
""" Returns members of the balancer as a generator object for later iteration."""
|
||||
try:
|
||||
soup = BeautifulSoup(self.page)
|
||||
except TypeError:
|
||||
self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page))
|
||||
else:
|
||||
for element in soup.findAll('a')[1::1]:
|
||||
balancer_member_suffix = str(element.get('href'))
|
||||
if not balancer_member_suffix:
|
||||
self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!")
|
||||
else:
|
||||
yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module)
|
||||
|
||||
members = property(get_balancer_members)
|
||||
|
||||
|
||||
def main():
|
||||
""" Initiates module."""
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
balancer_vhost=dict(required=True, type='str'),
|
||||
balancer_url_suffix=dict(default="/balancer-manager/", type='str'),
|
||||
member_host=dict(type='str'),
|
||||
state=dict(type='str'),
|
||||
tls=dict(default=False, type='bool'),
|
||||
validate_certs=dict(default=True, type='bool')
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if HAS_BEAUTIFULSOUP is False:
|
||||
module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR)
|
||||
|
||||
if module.params['state'] is not None:
|
||||
states = module.params['state'].split(',')
|
||||
if (len(states) > 1) and (("present" in states) or ("enabled" in states)):
|
||||
module.fail_json(msg="state present/enabled is mutually exclusive with other states!")
|
||||
else:
|
||||
for _state in states:
|
||||
if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
|
||||
module.fail_json(
|
||||
msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'."
|
||||
)
|
||||
else:
|
||||
states = ['None']
|
||||
|
||||
mybalancer = Balancer(module.params['balancer_vhost'],
|
||||
module.params['balancer_url_suffix'],
|
||||
module=module,
|
||||
tls=module.params['tls'])
|
||||
|
||||
if module.params['member_host'] is None:
|
||||
json_output_list = []
|
||||
for member in mybalancer.members:
|
||||
json_output_list.append({
|
||||
"host": member.host,
|
||||
"status": member.status,
|
||||
"protocol": member.protocol,
|
||||
"port": member.port,
|
||||
"path": member.path,
|
||||
"attributes": member.attributes,
|
||||
"management_url": member.management_url,
|
||||
"balancer_url": member.balancer_url
|
||||
})
|
||||
module.exit_json(
|
||||
changed=False,
|
||||
members=json_output_list
|
||||
)
|
||||
else:
|
||||
changed = False
|
||||
member_exists = False
|
||||
member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False}
|
||||
for mode in member_status.keys():
|
||||
for state in states:
|
||||
if mode == state:
|
||||
member_status[mode] = True
|
||||
elif mode == 'disabled' and state == 'absent':
|
||||
member_status[mode] = True
|
||||
|
||||
for member in mybalancer.members:
|
||||
if str(member.host) == str(module.params['member_host']):
|
||||
member_exists = True
|
||||
if module.params['state'] is not None:
|
||||
member_status_before = member.status
|
||||
if not module.check_mode:
|
||||
member_status_after = member.status = member_status
|
||||
else:
|
||||
member_status_after = member_status
|
||||
if member_status_before != member_status_after:
|
||||
changed = True
|
||||
json_output = {
|
||||
"host": member.host,
|
||||
"status": member.status,
|
||||
"protocol": member.protocol,
|
||||
"port": member.port,
|
||||
"path": member.path,
|
||||
"attributes": member.attributes,
|
||||
"management_url": member.management_url,
|
||||
"balancer_url": member.balancer_url
|
||||
}
|
||||
if member_exists:
|
||||
module.exit_json(
|
||||
changed=changed,
|
||||
member=json_output
|
||||
)
|
||||
else:
|
||||
module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,270 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2013-2014, Christian Berendt <berendt@b1-systems.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: apache2_module
|
||||
author:
|
||||
- Christian Berendt (@berendt)
|
||||
- Ralf Hertel (@n0trax)
|
||||
- Robin Roth (@robinro)
|
||||
short_description: Enables/disables a module of the Apache2 webserver
|
||||
description:
|
||||
- Enables or disables a specified module of the Apache2 webserver.
|
||||
options:
|
||||
name:
|
||||
type: str
|
||||
description:
|
||||
- Name of the module to enable/disable as given to C(a2enmod/a2dismod).
|
||||
required: true
|
||||
identifier:
|
||||
type: str
|
||||
description:
|
||||
- Identifier of the module as listed by C(apache2ctl -M).
|
||||
This is optional and usually determined automatically by the common convention of
|
||||
appending C(_module) to I(name) as well as custom exception for popular modules.
|
||||
required: false
|
||||
force:
|
||||
description:
|
||||
- Force disabling of default modules and override Debian warnings.
|
||||
required: false
|
||||
type: bool
|
||||
default: false
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- Desired state of the module.
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
ignore_configcheck:
|
||||
description:
|
||||
- Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.
|
||||
type: bool
|
||||
default: false
|
||||
requirements: ["a2enmod","a2dismod"]
|
||||
notes:
|
||||
- This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions.
|
||||
Whether it works on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Enable the Apache2 module wsgi
|
||||
community.general.apache2_module:
|
||||
state: present
|
||||
name: wsgi
|
||||
|
||||
- name: Disables the Apache2 module wsgi
|
||||
community.general.apache2_module:
|
||||
state: absent
|
||||
name: wsgi
|
||||
|
||||
- name: Disable default modules for Debian
|
||||
community.general.apache2_module:
|
||||
state: absent
|
||||
name: autoindex
|
||||
force: true
|
||||
|
||||
- name: Disable mpm_worker and ignore warnings about missing mpm module
|
||||
community.general.apache2_module:
|
||||
state: absent
|
||||
name: mpm_worker
|
||||
ignore_configcheck: true
|
||||
|
||||
- name: Enable dump_io module, which is identified as dumpio_module inside apache2
|
||||
community.general.apache2_module:
|
||||
state: present
|
||||
name: dump_io
|
||||
identifier: dumpio_module
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
result:
|
||||
description: message about action taken
|
||||
returned: always
|
||||
type: str
|
||||
warnings:
|
||||
description: list of warning messages
|
||||
returned: when needed
|
||||
type: list
|
||||
rc:
|
||||
description: return code of underlying command
|
||||
returned: failed
|
||||
type: int
|
||||
stdout:
|
||||
description: stdout of underlying command
|
||||
returned: failed
|
||||
type: str
|
||||
stderr:
|
||||
description: stderr of underlying command
|
||||
returned: failed
|
||||
type: str
|
||||
'''
|
||||
|
||||
import re
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
_re_threaded = re.compile(r'threaded: *yes')
|
||||
|
||||
|
||||
def _run_threaded(module):
|
||||
control_binary = _get_ctl_binary(module)
|
||||
result, stdout, stderr = module.run_command([control_binary, "-V"])
|
||||
|
||||
return bool(_re_threaded.search(stdout))
|
||||
|
||||
|
||||
def _get_ctl_binary(module):
|
||||
for command in ['apache2ctl', 'apachectl']:
|
||||
ctl_binary = module.get_bin_path(command)
|
||||
if ctl_binary is not None:
|
||||
return ctl_binary
|
||||
|
||||
module.fail_json(msg="Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.")
|
||||
|
||||
|
||||
def _module_is_enabled(module):
|
||||
control_binary = _get_ctl_binary(module)
|
||||
result, stdout, stderr = module.run_command([control_binary, "-M"])
|
||||
|
||||
if result != 0:
|
||||
error_msg = "Error executing %s: %s" % (control_binary, stderr)
|
||||
if module.params['ignore_configcheck']:
|
||||
if 'AH00534' in stderr and 'mpm_' in module.params['name']:
|
||||
module.warnings.append(
|
||||
"No MPM module loaded! apache2 reload AND other module actions"
|
||||
" will fail if no MPM module is loaded immediately."
|
||||
)
|
||||
else:
|
||||
module.warnings.append(error_msg)
|
||||
return False
|
||||
else:
|
||||
module.fail_json(msg=error_msg)
|
||||
|
||||
searchstring = ' ' + module.params['identifier']
|
||||
return searchstring in stdout
|
||||
|
||||
|
||||
def create_apache_identifier(name):
|
||||
"""
|
||||
By convention if a module is loaded via name, it appears in apache2ctl -M as
|
||||
name_module.
|
||||
|
||||
Some modules don't follow this convention and we use replacements for those."""
|
||||
|
||||
# a2enmod name replacement to apache2ctl -M names
|
||||
text_workarounds = [
|
||||
('shib', 'mod_shib'),
|
||||
('shib2', 'mod_shib'),
|
||||
('evasive', 'evasive20_module'),
|
||||
]
|
||||
|
||||
# re expressions to extract subparts of names
|
||||
re_workarounds = [
|
||||
('php', re.compile(r'^(php\d)\.')),
|
||||
]
|
||||
|
||||
for a2enmod_spelling, module_name in text_workarounds:
|
||||
if a2enmod_spelling in name:
|
||||
return module_name
|
||||
|
||||
for search, reexpr in re_workarounds:
|
||||
if search in name:
|
||||
try:
|
||||
rematch = reexpr.search(name)
|
||||
return rematch.group(1) + '_module'
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return name + '_module'
|
||||
|
||||
|
||||
def _set_state(module, state):
|
||||
name = module.params['name']
|
||||
force = module.params['force']
|
||||
|
||||
want_enabled = state == 'present'
|
||||
state_string = {'present': 'enabled', 'absent': 'disabled'}[state]
|
||||
a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state]
|
||||
success_msg = "Module %s %s" % (name, state_string)
|
||||
|
||||
if _module_is_enabled(module) != want_enabled:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True,
|
||||
result=success_msg,
|
||||
warnings=module.warnings)
|
||||
|
||||
a2mod_binary_path = module.get_bin_path(a2mod_binary)
|
||||
if a2mod_binary_path is None:
|
||||
module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary))
|
||||
|
||||
a2mod_binary_cmd = [a2mod_binary_path]
|
||||
|
||||
if not want_enabled and force:
|
||||
# force exists only for a2dismod on debian
|
||||
a2mod_binary_cmd.append('-f')
|
||||
|
||||
result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name])
|
||||
|
||||
if _module_is_enabled(module) == want_enabled:
|
||||
module.exit_json(changed=True,
|
||||
result=success_msg,
|
||||
warnings=module.warnings)
|
||||
else:
|
||||
msg = (
|
||||
'Failed to set module {name} to {state}:\n'
|
||||
'{stdout}\n'
|
||||
'Maybe the module identifier ({identifier}) was guessed incorrectly.'
|
||||
'Consider setting the "identifier" option.'
|
||||
).format(
|
||||
name=name,
|
||||
state=state_string,
|
||||
stdout=stdout,
|
||||
identifier=module.params['identifier']
|
||||
)
|
||||
module.fail_json(msg=msg,
|
||||
rc=result,
|
||||
stdout=stdout,
|
||||
stderr=stderr)
|
||||
else:
|
||||
module.exit_json(changed=False,
|
||||
result=success_msg,
|
||||
warnings=module.warnings)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=True),
|
||||
identifier=dict(type='str'),
|
||||
force=dict(type='bool', default=False),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
ignore_configcheck=dict(type='bool', default=False),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
module.warnings = []
|
||||
|
||||
name = module.params['name']
|
||||
if name == 'cgi' and _run_threaded(module):
|
||||
module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module cgi possible.")
|
||||
|
||||
if not module.params['identifier']:
|
||||
module.params['identifier'] = create_apache_identifier(module.params['name'])
|
||||
|
||||
if module.params['state'] in ['present', 'absent']:
|
||||
_set_state(module, module.params['state'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,371 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2015, Kevin Brebanov <https://github.com/kbrebanov>
|
||||
# Based on pacman (Afterburn <https://github.com/afterburn>, Aaron Bull Schaefer <aaron@elasticdog.com>)
|
||||
# and apt (Matthew Williams <matthew@flowroute.com>) modules.
|
||||
#
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: apk
|
||||
short_description: Manages apk packages
|
||||
description:
|
||||
- Manages I(apk) packages for Alpine Linux.
|
||||
author: "Kevin Brebanov (@kbrebanov)"
|
||||
options:
|
||||
available:
|
||||
description:
|
||||
- During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them)
|
||||
if the currently installed package is no longer available from any repository.
|
||||
type: bool
|
||||
default: false
|
||||
name:
|
||||
description:
|
||||
- A package name, like C(foo), or multiple packages, like C(foo, bar).
|
||||
type: list
|
||||
elements: str
|
||||
no_cache:
|
||||
description:
|
||||
- Do not use any local cache path.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 1.0.0
|
||||
repository:
|
||||
description:
|
||||
- A package repository or multiple repositories.
|
||||
Unlike with the underlying apk command, this list will override the system repositories rather than supplement them.
|
||||
type: list
|
||||
elements: str
|
||||
state:
|
||||
description:
|
||||
- Indicates the desired package(s) state.
|
||||
- C(present) ensures the package(s) is/are present. C(installed) can be used as an alias.
|
||||
- C(absent) ensures the package(s) is/are absent. C(removed) can be used as an alias.
|
||||
- C(latest) ensures the package(s) is/are present and the latest version(s).
|
||||
default: present
|
||||
choices: [ "present", "absent", "latest", "installed", "removed" ]
|
||||
type: str
|
||||
update_cache:
|
||||
description:
|
||||
- Update repository indexes. Can be run with other steps or on it's own.
|
||||
type: bool
|
||||
default: false
|
||||
upgrade:
|
||||
description:
|
||||
- Upgrade all installed packages to their latest version.
|
||||
type: bool
|
||||
default: false
|
||||
world:
|
||||
description:
|
||||
- Use a custom world file when checking for explicitly installed packages.
|
||||
type: str
|
||||
default: /etc/apk/world
|
||||
version_added: 5.4.0
|
||||
notes:
|
||||
- 'I(name) and I(upgrade) are mutually exclusive.'
|
||||
- When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Update repositories and install foo package
|
||||
community.general.apk:
|
||||
name: foo
|
||||
update_cache: true
|
||||
|
||||
- name: Update repositories and install foo and bar packages
|
||||
community.general.apk:
|
||||
name: foo,bar
|
||||
update_cache: true
|
||||
|
||||
- name: Remove foo package
|
||||
community.general.apk:
|
||||
name: foo
|
||||
state: absent
|
||||
|
||||
- name: Remove foo and bar packages
|
||||
community.general.apk:
|
||||
name: foo,bar
|
||||
state: absent
|
||||
|
||||
- name: Install the package foo
|
||||
community.general.apk:
|
||||
name: foo
|
||||
state: present
|
||||
|
||||
- name: Install the packages foo and bar
|
||||
community.general.apk:
|
||||
name: foo,bar
|
||||
state: present
|
||||
|
||||
- name: Update repositories and update package foo to latest version
|
||||
community.general.apk:
|
||||
name: foo
|
||||
state: latest
|
||||
update_cache: true
|
||||
|
||||
- name: Update repositories and update packages foo and bar to latest versions
|
||||
community.general.apk:
|
||||
name: foo,bar
|
||||
state: latest
|
||||
update_cache: true
|
||||
|
||||
- name: Update all installed packages to the latest versions
|
||||
community.general.apk:
|
||||
upgrade: true
|
||||
|
||||
- name: Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available
|
||||
community.general.apk:
|
||||
available: true
|
||||
upgrade: true
|
||||
|
||||
- name: Update repositories as a separate step
|
||||
community.general.apk:
|
||||
update_cache: true
|
||||
|
||||
- name: Install package from a specific repository
|
||||
community.general.apk:
|
||||
name: foo
|
||||
state: latest
|
||||
update_cache: true
|
||||
repository: http://dl-3.alpinelinux.org/alpine/edge/main
|
||||
|
||||
- name: Install package without using cache
|
||||
community.general.apk:
|
||||
name: foo
|
||||
state: latest
|
||||
no_cache: true
|
||||
|
||||
- name: Install package checking a custom world
|
||||
community.general.apk:
|
||||
name: foo
|
||||
state: latest
|
||||
world: /etc/apk/world.custom
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
packages:
|
||||
description: a list of packages that have been changed
|
||||
returned: when packages have changed
|
||||
type: list
|
||||
sample: ['package', 'other-package']
|
||||
'''
|
||||
|
||||
import re
|
||||
# Import module snippets.
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def parse_for_packages(stdout):
|
||||
packages = []
|
||||
data = stdout.split('\n')
|
||||
regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)')
|
||||
for l in data:
|
||||
p = regex.search(l)
|
||||
if p:
|
||||
packages.append(p.group(1))
|
||||
return packages
|
||||
|
||||
|
||||
def update_package_db(module, exit):
|
||||
cmd = "%s update" % (APK_PATH)
|
||||
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr)
|
||||
elif exit:
|
||||
module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr)
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def query_toplevel(module, name, world):
|
||||
# world contains a list of top-level packages separated by ' ' or \n
|
||||
# packages may contain repository (@) or version (=<>~) separator characters or start with negation !
|
||||
regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$')
|
||||
with open(world) as f:
|
||||
content = f.read().split()
|
||||
for p in content:
|
||||
if regex.search(p):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def query_package(module, name):
|
||||
cmd = "%s -v info --installed %s" % (APK_PATH, name)
|
||||
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
|
||||
if rc == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def query_latest(module, name):
|
||||
cmd = "%s version %s" % (APK_PATH, name)
|
||||
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
|
||||
search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name))
|
||||
match = re.search(search_pattern, stdout)
|
||||
if match and match.group(2) == "<":
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def query_virtual(module, name):
|
||||
cmd = "%s -v info --description %s" % (APK_PATH, name)
|
||||
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
|
||||
search_pattern = r"^%s: virtual meta package" % (re.escape(name))
|
||||
if re.search(search_pattern, stdout):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_dependencies(module, name):
|
||||
cmd = "%s -v info --depends %s" % (APK_PATH, name)
|
||||
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
|
||||
dependencies = stdout.split()
|
||||
if len(dependencies) > 1:
|
||||
return dependencies[1:]
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def upgrade_packages(module, available):
|
||||
if module.check_mode:
|
||||
cmd = "%s upgrade --simulate" % (APK_PATH)
|
||||
else:
|
||||
cmd = "%s upgrade" % (APK_PATH)
|
||||
if available:
|
||||
cmd = "%s --available" % cmd
|
||||
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
|
||||
packagelist = parse_for_packages(stdout)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist)
|
||||
if re.search(r'^OK', stdout):
|
||||
module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist)
|
||||
module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist)
|
||||
|
||||
|
||||
def install_packages(module, names, state, world):
|
||||
upgrade = False
|
||||
to_install = []
|
||||
to_upgrade = []
|
||||
for name in names:
|
||||
# Check if virtual package
|
||||
if query_virtual(module, name):
|
||||
# Get virtual package dependencies
|
||||
dependencies = get_dependencies(module, name)
|
||||
for dependency in dependencies:
|
||||
if state == 'latest' and not query_latest(module, dependency):
|
||||
to_upgrade.append(dependency)
|
||||
else:
|
||||
if not query_toplevel(module, name, world):
|
||||
to_install.append(name)
|
||||
elif state == 'latest' and not query_latest(module, name):
|
||||
to_upgrade.append(name)
|
||||
if to_upgrade:
|
||||
upgrade = True
|
||||
if not to_install and not upgrade:
|
||||
module.exit_json(changed=False, msg="package(s) already installed")
|
||||
packages = " ".join(to_install + to_upgrade)
|
||||
if upgrade:
|
||||
if module.check_mode:
|
||||
cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
|
||||
else:
|
||||
cmd = "%s add --upgrade %s" % (APK_PATH, packages)
|
||||
else:
|
||||
if module.check_mode:
|
||||
cmd = "%s add --simulate %s" % (APK_PATH, packages)
|
||||
else:
|
||||
cmd = "%s add %s" % (APK_PATH, packages)
|
||||
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
|
||||
packagelist = parse_for_packages(stdout)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
|
||||
module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
|
||||
|
||||
|
||||
def remove_packages(module, names):
|
||||
installed = []
|
||||
for name in names:
|
||||
if query_package(module, name):
|
||||
installed.append(name)
|
||||
if not installed:
|
||||
module.exit_json(changed=False, msg="package(s) already removed")
|
||||
names = " ".join(installed)
|
||||
if module.check_mode:
|
||||
cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
|
||||
else:
|
||||
cmd = "%s del --purge %s" % (APK_PATH, names)
|
||||
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
|
||||
packagelist = parse_for_packages(stdout)
|
||||
# Check to see if packages are still present because of dependencies
|
||||
for name in installed:
|
||||
if query_package(module, name):
|
||||
rc = 1
|
||||
break
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
|
||||
module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
|
||||
|
||||
# ==========================================
|
||||
# Main control flow.
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
|
||||
name=dict(type='list', elements='str'),
|
||||
no_cache=dict(default=False, type='bool'),
|
||||
repository=dict(type='list', elements='str'),
|
||||
update_cache=dict(default=False, type='bool'),
|
||||
upgrade=dict(default=False, type='bool'),
|
||||
available=dict(default=False, type='bool'),
|
||||
world=dict(default='/etc/apk/world', type='str'),
|
||||
),
|
||||
required_one_of=[['name', 'update_cache', 'upgrade']],
|
||||
mutually_exclusive=[['name', 'upgrade']],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
# Set LANG env since we parse stdout
|
||||
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
|
||||
|
||||
global APK_PATH
|
||||
APK_PATH = module.get_bin_path('apk', required=True)
|
||||
|
||||
p = module.params
|
||||
|
||||
if p['no_cache']:
|
||||
APK_PATH = "%s --no-cache" % (APK_PATH, )
|
||||
|
||||
# add repositories to the APK_PATH
|
||||
if p['repository']:
|
||||
for r in p['repository']:
|
||||
APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r)
|
||||
|
||||
# normalize the state parameter
|
||||
if p['state'] in ['present', 'installed']:
|
||||
p['state'] = 'present'
|
||||
if p['state'] in ['absent', 'removed']:
|
||||
p['state'] = 'absent'
|
||||
|
||||
if p['update_cache']:
|
||||
update_package_db(module, not p['name'] and not p['upgrade'])
|
||||
|
||||
if p['upgrade']:
|
||||
upgrade_packages(module, p['available'])
|
||||
|
||||
if p['state'] in ['present', 'latest']:
|
||||
install_packages(module, p['name'], p['state'], p['world'])
|
||||
elif p['state'] == 'absent':
|
||||
remove_packages(module, p['name'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,147 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2018, Mikhail Gordeev
|
||||
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: apt_repo
|
||||
short_description: Manage APT repositories via apt-repo
|
||||
description:
|
||||
- Manages APT repositories using apt-repo tool.
|
||||
- See U(https://www.altlinux.org/Apt-repo) for details about apt-repo
|
||||
notes:
|
||||
- This module works on ALT based distros.
|
||||
- Does NOT support checkmode, due to a limitation in apt-repo tool.
|
||||
options:
|
||||
repo:
|
||||
description:
|
||||
- Name of the repository to add or remove.
|
||||
required: true
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Indicates the desired repository state.
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
type: str
|
||||
remove_others:
|
||||
description:
|
||||
- Remove other then added repositories
|
||||
- Used if I(state=present)
|
||||
type: bool
|
||||
default: false
|
||||
update:
|
||||
description:
|
||||
- Update the package database after changing repositories.
|
||||
type: bool
|
||||
default: false
|
||||
author:
|
||||
- Mikhail Gordeev (@obirvalger)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Remove all repositories
|
||||
community.general.apt_repo:
|
||||
repo: all
|
||||
state: absent
|
||||
|
||||
- name: Add repository `Sisysphus` and remove other repositories
|
||||
community.general.apt_repo:
|
||||
repo: Sisysphus
|
||||
state: present
|
||||
remove_others: true
|
||||
|
||||
- name: Add local repository `/space/ALT/Sisyphus` and update package cache
|
||||
community.general.apt_repo:
|
||||
repo: copy:///space/ALT/Sisyphus
|
||||
state: present
|
||||
update: true
|
||||
'''
|
||||
|
||||
RETURN = ''' # '''
|
||||
|
||||
import os
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
APT_REPO_PATH = "/usr/bin/apt-repo"
|
||||
|
||||
|
||||
def apt_repo(module, *args):
|
||||
"""run apt-repo with args and return its output"""
|
||||
# make args list to use in concatenation
|
||||
args = list(args)
|
||||
rc, out, err = module.run_command([APT_REPO_PATH] + args)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="'%s' failed: %s" % (' '.join(['apt-repo'] + args), err))
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def add_repo(module, repo):
|
||||
"""add a repository"""
|
||||
apt_repo(module, 'add', repo)
|
||||
|
||||
|
||||
def rm_repo(module, repo):
|
||||
"""remove a repository"""
|
||||
apt_repo(module, 'rm', repo)
|
||||
|
||||
|
||||
def set_repo(module, repo):
|
||||
"""add a repository and remove other repositories"""
|
||||
# first add to validate repository
|
||||
apt_repo(module, 'add', repo)
|
||||
apt_repo(module, 'rm', 'all')
|
||||
apt_repo(module, 'add', repo)
|
||||
|
||||
|
||||
def update(module):
|
||||
"""update package cache"""
|
||||
apt_repo(module, 'update')
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
repo=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
remove_others=dict(type='bool', default=False),
|
||||
update=dict(type='bool', default=False),
|
||||
),
|
||||
)
|
||||
|
||||
if not os.path.exists(APT_REPO_PATH):
|
||||
module.fail_json(msg='cannot find /usr/bin/apt-repo')
|
||||
|
||||
params = module.params
|
||||
repo = params['repo']
|
||||
state = params['state']
|
||||
old_repositories = apt_repo(module)
|
||||
|
||||
if state == 'present':
|
||||
if params['remove_others']:
|
||||
set_repo(module, repo)
|
||||
else:
|
||||
add_repo(module, repo)
|
||||
elif state == 'absent':
|
||||
rm_repo(module, repo)
|
||||
|
||||
if params['update']:
|
||||
update(module)
|
||||
|
||||
new_repositories = apt_repo(module)
|
||||
changed = old_repositories != new_repositories
|
||||
module.exit_json(changed=changed, repo=repo, state=state)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,180 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2013, Evgenii Terechkov
|
||||
# Written by Evgenii Terechkov <evg@altlinux.org>
|
||||
# Based on urpmi module written by Philippe Makowski <philippem@mageia.org>
|
||||
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: apt_rpm
|
||||
short_description: APT-RPM package manager
|
||||
description:
|
||||
- Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
|
||||
options:
|
||||
package:
|
||||
description:
|
||||
- list of packages to install, upgrade or remove.
|
||||
required: true
|
||||
aliases: [ name, pkg ]
|
||||
type: list
|
||||
elements: str
|
||||
state:
|
||||
description:
|
||||
- Indicates the desired package state.
|
||||
choices: [ absent, present, installed, removed ]
|
||||
default: present
|
||||
type: str
|
||||
update_cache:
|
||||
description:
|
||||
- update the package database first C(apt-get update).
|
||||
type: bool
|
||||
default: false
|
||||
author:
|
||||
- Evgenii Terechkov (@evgkrsk)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install package foo
|
||||
community.general.apt_rpm:
|
||||
pkg: foo
|
||||
state: present
|
||||
|
||||
- name: Install packages foo and bar
|
||||
community.general.apt_rpm:
|
||||
pkg:
|
||||
- foo
|
||||
- bar
|
||||
state: present
|
||||
|
||||
- name: Remove package foo
|
||||
community.general.apt_rpm:
|
||||
pkg: foo
|
||||
state: absent
|
||||
|
||||
- name: Remove packages foo and bar
|
||||
community.general.apt_rpm:
|
||||
pkg: foo,bar
|
||||
state: absent
|
||||
|
||||
# bar will be the updated if a newer version exists
|
||||
- name: Update the package database and install bar
|
||||
community.general.apt_rpm:
|
||||
name: bar
|
||||
state: present
|
||||
update_cache: true
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
APT_PATH = "/usr/bin/apt-get"
|
||||
RPM_PATH = "/usr/bin/rpm"
|
||||
|
||||
|
||||
def query_package(module, name):
|
||||
# rpm -q returns 0 if the package is installed,
|
||||
# 1 if it is not installed
|
||||
rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name))
|
||||
if rc == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def query_package_provides(module, name):
|
||||
# rpm -q returns 0 if the package is installed,
|
||||
# 1 if it is not installed
|
||||
rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name))
|
||||
return rc == 0
|
||||
|
||||
|
||||
def update_package_db(module):
|
||||
rc, out, err = module.run_command("%s update" % APT_PATH)
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="could not update package db: %s" % err)
|
||||
|
||||
|
||||
def remove_packages(module, packages):
|
||||
|
||||
remove_c = 0
|
||||
# Using a for loop in case of error, we can report the package that failed
|
||||
for package in packages:
|
||||
# Query the package first, to see if we even need to remove
|
||||
if not query_package(module, package):
|
||||
continue
|
||||
|
||||
rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package))
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to remove %s: %s" % (package, err))
|
||||
|
||||
remove_c += 1
|
||||
|
||||
if remove_c > 0:
|
||||
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
|
||||
|
||||
module.exit_json(changed=False, msg="package(s) already absent")
|
||||
|
||||
|
||||
def install_packages(module, pkgspec):
|
||||
|
||||
packages = ""
|
||||
for package in pkgspec:
|
||||
if not query_package_provides(module, package):
|
||||
packages += "'%s' " % package
|
||||
|
||||
if len(packages) != 0:
|
||||
|
||||
rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages))
|
||||
|
||||
installed = True
|
||||
for packages in pkgspec:
|
||||
if not query_package_provides(module, package):
|
||||
installed = False
|
||||
|
||||
# apt-rpm always have 0 for exit code if --force is used
|
||||
if rc or not installed:
|
||||
module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
|
||||
else:
|
||||
module.exit_json(changed=True, msg="%s present(s)" % packages)
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']),
|
||||
update_cache=dict(type='bool', default=False),
|
||||
package=dict(type='list', elements='str', required=True, aliases=['name', 'pkg']),
|
||||
),
|
||||
)
|
||||
|
||||
if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
|
||||
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
|
||||
|
||||
p = module.params
|
||||
|
||||
if p['update_cache']:
|
||||
update_package_db(module)
|
||||
|
||||
packages = p['package']
|
||||
|
||||
if p['state'] in ['installed', 'present']:
|
||||
install_packages(module, packages)
|
||||
|
||||
elif p['state'] in ['absent', 'removed']:
|
||||
remove_packages(module, packages)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,669 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2016, Ben Doherty <bendohmv@gmail.com>
|
||||
# Sponsored by Oomph, Inc. http://www.oomphinc.com
|
||||
# Copyright (c) 2017, Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: archive
|
||||
short_description: Creates a compressed archive of one or more files or trees
|
||||
extends_documentation_fragment: files
|
||||
description:
|
||||
- Creates or extends an archive.
|
||||
- The source and archive are on the remote host, and the archive I(is not) copied to the local host.
|
||||
- Source files can be deleted after archival by specifying I(remove=True).
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
- Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive.
|
||||
type: list
|
||||
elements: path
|
||||
required: true
|
||||
format:
|
||||
description:
|
||||
- The type of compression to use.
|
||||
- Support for xz was added in Ansible 2.5.
|
||||
type: str
|
||||
choices: [ bz2, gz, tar, xz, zip ]
|
||||
default: gz
|
||||
dest:
|
||||
description:
|
||||
- The file name of the destination archive. The parent directory must exists on the remote host.
|
||||
- This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
|
||||
- If the destination archive already exists, it will be truncated and overwritten.
|
||||
type: path
|
||||
exclude_path:
|
||||
description:
|
||||
- Remote absolute path, glob, or list of paths or globs for the file or files to exclude from I(path) list and glob expansion.
|
||||
- Use I(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the I(path) list.
|
||||
type: list
|
||||
elements: path
|
||||
default: []
|
||||
exclusion_patterns:
|
||||
description:
|
||||
- Glob style patterns to exclude files or directories from the resulting archive.
|
||||
- This differs from I(exclude_path) which applies only to the source paths from I(path).
|
||||
type: list
|
||||
elements: path
|
||||
version_added: 3.2.0
|
||||
force_archive:
|
||||
description:
|
||||
- Allows you to force the module to treat this as an archive even if only a single file is specified.
|
||||
- By default when a single file is specified it is compressed only (not archived).
|
||||
- Enable this if you want to use M(ansible.builtin.unarchive) on an archive of a single file created with this module.
|
||||
type: bool
|
||||
default: false
|
||||
remove:
|
||||
description:
|
||||
- Remove any added source files and trees after adding to archive.
|
||||
type: bool
|
||||
default: false
|
||||
notes:
|
||||
- Can produce I(gzip), I(bzip2), I(lzma), and I(zip) compressed files or archives.
|
||||
- This module uses C(tarfile), C(zipfile), C(gzip), and C(bz2) packages on the target host to create archives.
|
||||
These are part of the Python standard library for Python 2 and 3.
|
||||
requirements:
|
||||
- Requires C(lzma) (standard library of Python 3) or L(backports.lzma, https://pypi.org/project/backports.lzma/) (Python 2) if using C(xz) format.
|
||||
seealso:
|
||||
- module: ansible.builtin.unarchive
|
||||
author:
|
||||
- Ben Doherty (@bendoh)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Compress directory /path/to/foo/ into /path/to/foo.tgz
|
||||
community.general.archive:
|
||||
path: /path/to/foo
|
||||
dest: /path/to/foo.tgz
|
||||
|
||||
- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it
|
||||
community.general.archive:
|
||||
path: /path/to/foo
|
||||
remove: true
|
||||
|
||||
- name: Create a zip archive of /path/to/foo
|
||||
community.general.archive:
|
||||
path: /path/to/foo
|
||||
format: zip
|
||||
|
||||
- name: Create a bz2 archive of multiple files, rooted at /path
|
||||
community.general.archive:
|
||||
path:
|
||||
- /path/to/foo
|
||||
- /path/wong/foo
|
||||
dest: /path/file.tar.bz2
|
||||
format: bz2
|
||||
|
||||
- name: Create a bz2 archive of a globbed path, while excluding specific dirnames
|
||||
community.general.archive:
|
||||
path:
|
||||
- /path/to/foo/*
|
||||
dest: /path/file.tar.bz2
|
||||
exclude_path:
|
||||
- /path/to/foo/bar
|
||||
- /path/to/foo/baz
|
||||
format: bz2
|
||||
|
||||
- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames
|
||||
community.general.archive:
|
||||
path:
|
||||
- /path/to/foo/*
|
||||
dest: /path/file.tar.bz2
|
||||
exclude_path:
|
||||
- /path/to/foo/ba*
|
||||
format: bz2
|
||||
|
||||
- name: Use gzip to compress a single archive (i.e don't archive it first with tar)
|
||||
community.general.archive:
|
||||
path: /path/to/foo/single.file
|
||||
dest: /path/file.gz
|
||||
format: gz
|
||||
|
||||
- name: Create a tar.gz archive of a single file.
|
||||
community.general.archive:
|
||||
path: /path/to/foo/single.file
|
||||
dest: /path/file.tar.gz
|
||||
format: gz
|
||||
force_archive: true
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
state:
|
||||
description:
|
||||
The state of the input C(path).
|
||||
type: str
|
||||
returned: always
|
||||
dest_state:
|
||||
description:
|
||||
- The state of the I(dest) file.
|
||||
- C(absent) when the file does not exist.
|
||||
- C(archive) when the file is an archive.
|
||||
- C(compress) when the file is compressed, but not an archive.
|
||||
- C(incomplete) when the file is an archive, but some files under I(path) were not found.
|
||||
type: str
|
||||
returned: success
|
||||
version_added: 3.4.0
|
||||
missing:
|
||||
description: Any files that were missing from the source.
|
||||
type: list
|
||||
returned: success
|
||||
archived:
|
||||
description: Any files that were compressed or added to the archive.
|
||||
type: list
|
||||
returned: success
|
||||
arcroot:
|
||||
description: The archive root.
|
||||
type: str
|
||||
returned: always
|
||||
expanded_paths:
|
||||
description: The list of matching paths from paths argument.
|
||||
type: list
|
||||
returned: always
|
||||
expanded_exclude_paths:
|
||||
description: The list of matching exclude paths from the exclude_path argument.
|
||||
type: list
|
||||
returned: always
|
||||
'''
|
||||
|
||||
import abc
|
||||
import bz2
|
||||
import glob
|
||||
import gzip
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import tarfile
|
||||
import zipfile
|
||||
from fnmatch import fnmatch
|
||||
from sys import version_info
|
||||
from traceback import format_exc
|
||||
from zlib import crc32
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native
|
||||
from ansible.module_utils import six
|
||||
|
||||
|
||||
LZMA_IMP_ERR = None
|
||||
if six.PY3:
|
||||
try:
|
||||
import lzma
|
||||
HAS_LZMA = True
|
||||
except ImportError:
|
||||
LZMA_IMP_ERR = format_exc()
|
||||
HAS_LZMA = False
|
||||
else:
|
||||
try:
|
||||
from backports import lzma
|
||||
HAS_LZMA = True
|
||||
except ImportError:
|
||||
LZMA_IMP_ERR = format_exc()
|
||||
HAS_LZMA = False
|
||||
|
||||
PY27 = version_info[0:2] >= (2, 7)
|
||||
|
||||
STATE_ABSENT = 'absent'
|
||||
STATE_ARCHIVED = 'archive'
|
||||
STATE_COMPRESSED = 'compress'
|
||||
STATE_INCOMPLETE = 'incomplete'
|
||||
|
||||
|
||||
def common_path(paths):
|
||||
empty = b'' if paths and isinstance(paths[0], six.binary_type) else ''
|
||||
|
||||
return os.path.join(
|
||||
os.path.dirname(os.path.commonprefix([os.path.join(os.path.dirname(p), empty) for p in paths])), empty
|
||||
)
|
||||
|
||||
|
||||
def expand_paths(paths):
|
||||
expanded_path = []
|
||||
is_globby = False
|
||||
for path in paths:
|
||||
b_path = _to_bytes(path)
|
||||
if b'*' in b_path or b'?' in b_path:
|
||||
e_paths = glob.glob(b_path)
|
||||
is_globby = True
|
||||
else:
|
||||
e_paths = [b_path]
|
||||
expanded_path.extend(e_paths)
|
||||
return expanded_path, is_globby
|
||||
|
||||
|
||||
def matches_exclusion_patterns(path, exclusion_patterns):
|
||||
return any(fnmatch(path, p) for p in exclusion_patterns)
|
||||
|
||||
|
||||
def is_archive(path):
|
||||
return re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(path), re.IGNORECASE)
|
||||
|
||||
|
||||
def strip_prefix(prefix, string):
|
||||
return string[len(prefix):] if string.startswith(prefix) else string
|
||||
|
||||
|
||||
def _to_bytes(s):
|
||||
return to_bytes(s, errors='surrogate_or_strict')
|
||||
|
||||
|
||||
def _to_native(s):
|
||||
return to_native(s, errors='surrogate_or_strict')
|
||||
|
||||
|
||||
def _to_native_ascii(s):
|
||||
return to_native(s, errors='surrogate_or_strict', encoding='ascii')
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class Archive(object):
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
self.destination = _to_bytes(module.params['dest']) if module.params['dest'] else None
|
||||
self.exclusion_patterns = module.params['exclusion_patterns'] or []
|
||||
self.format = module.params['format']
|
||||
self.must_archive = module.params['force_archive']
|
||||
self.remove = module.params['remove']
|
||||
|
||||
self.changed = False
|
||||
self.destination_state = STATE_ABSENT
|
||||
self.errors = []
|
||||
self.file = None
|
||||
self.successes = []
|
||||
self.targets = []
|
||||
self.not_found = []
|
||||
|
||||
paths = module.params['path']
|
||||
self.expanded_paths, has_globs = expand_paths(paths)
|
||||
self.expanded_exclude_paths = expand_paths(module.params['exclude_path'])[0]
|
||||
|
||||
self.paths = sorted(set(self.expanded_paths) - set(self.expanded_exclude_paths))
|
||||
|
||||
if not self.paths:
|
||||
module.fail_json(
|
||||
path=', '.join(paths),
|
||||
expanded_paths=_to_native(b', '.join(self.expanded_paths)),
|
||||
expanded_exclude_paths=_to_native(b', '.join(self.expanded_exclude_paths)),
|
||||
msg='Error, no source paths were found'
|
||||
)
|
||||
|
||||
self.root = common_path(self.paths)
|
||||
|
||||
if not self.must_archive:
|
||||
self.must_archive = any([has_globs, os.path.isdir(self.paths[0]), len(self.paths) > 1])
|
||||
|
||||
if not self.destination and not self.must_archive:
|
||||
self.destination = b'%s.%s' % (self.paths[0], _to_bytes(self.format))
|
||||
|
||||
if self.must_archive and not self.destination:
|
||||
module.fail_json(
|
||||
dest=_to_native(self.destination),
|
||||
path=', '.join(paths),
|
||||
msg='Error, must specify "dest" when archiving multiple files or trees'
|
||||
)
|
||||
|
||||
if self.remove:
|
||||
self._check_removal_safety()
|
||||
|
||||
self.original_checksums = self.destination_checksums()
|
||||
self.original_size = self.destination_size()
|
||||
|
||||
def add(self, path, archive_name):
|
||||
try:
|
||||
self._add(_to_native_ascii(path), _to_native(archive_name))
|
||||
if self.contains(_to_native(archive_name)):
|
||||
self.successes.append(path)
|
||||
except Exception as e:
|
||||
self.errors.append('%s: %s' % (_to_native_ascii(path), _to_native(e)))
|
||||
|
||||
def add_single_target(self, path):
|
||||
if self.format in ('zip', 'tar'):
|
||||
self.open()
|
||||
self.add(path, strip_prefix(self.root, path))
|
||||
self.close()
|
||||
self.destination_state = STATE_ARCHIVED
|
||||
else:
|
||||
try:
|
||||
f_out = self._open_compressed_file(_to_native_ascii(self.destination), 'wb')
|
||||
with open(path, 'rb') as f_in:
|
||||
shutil.copyfileobj(f_in, f_out)
|
||||
f_out.close()
|
||||
self.successes.append(path)
|
||||
self.destination_state = STATE_COMPRESSED
|
||||
except (IOError, OSError) as e:
|
||||
self.module.fail_json(
|
||||
path=_to_native(path),
|
||||
dest=_to_native(self.destination),
|
||||
msg='Unable to write to compressed file: %s' % _to_native(e), exception=format_exc()
|
||||
)
|
||||
|
||||
def add_targets(self):
|
||||
self.open()
|
||||
try:
|
||||
for target in self.targets:
|
||||
if os.path.isdir(target):
|
||||
for directory_path, directory_names, file_names in os.walk(target, topdown=True):
|
||||
for directory_name in directory_names:
|
||||
full_path = os.path.join(directory_path, directory_name)
|
||||
self.add(full_path, strip_prefix(self.root, full_path))
|
||||
|
||||
for file_name in file_names:
|
||||
full_path = os.path.join(directory_path, file_name)
|
||||
self.add(full_path, strip_prefix(self.root, full_path))
|
||||
else:
|
||||
self.add(target, strip_prefix(self.root, target))
|
||||
except Exception as e:
|
||||
if self.format in ('zip', 'tar'):
|
||||
archive_format = self.format
|
||||
else:
|
||||
archive_format = 'tar.' + self.format
|
||||
self.module.fail_json(
|
||||
msg='Error when writing %s archive at %s: %s' % (
|
||||
archive_format, _to_native(self.destination), _to_native(e)
|
||||
),
|
||||
exception=format_exc()
|
||||
)
|
||||
self.close()
|
||||
|
||||
if self.errors:
|
||||
self.module.fail_json(
|
||||
msg='Errors when writing archive at %s: %s' % (_to_native(self.destination), '; '.join(self.errors))
|
||||
)
|
||||
|
||||
def is_different_from_original(self):
|
||||
if self.original_checksums is None:
|
||||
return self.original_size != self.destination_size()
|
||||
else:
|
||||
return self.original_checksums != self.destination_checksums()
|
||||
|
||||
def destination_checksums(self):
|
||||
if self.destination_exists() and self.destination_readable():
|
||||
return self._get_checksums(self.destination)
|
||||
return None
|
||||
|
||||
def destination_exists(self):
|
||||
return self.destination and os.path.exists(self.destination)
|
||||
|
||||
def destination_readable(self):
|
||||
return self.destination and os.access(self.destination, os.R_OK)
|
||||
|
||||
def destination_size(self):
|
||||
return os.path.getsize(self.destination) if self.destination_exists() else 0
|
||||
|
||||
def find_targets(self):
|
||||
for path in self.paths:
|
||||
if not os.path.lexists(path):
|
||||
self.not_found.append(path)
|
||||
else:
|
||||
self.targets.append(path)
|
||||
|
||||
def has_targets(self):
|
||||
return bool(self.targets)
|
||||
|
||||
def has_unfound_targets(self):
|
||||
return bool(self.not_found)
|
||||
|
||||
def remove_single_target(self, path):
|
||||
try:
|
||||
os.remove(path)
|
||||
except OSError as e:
|
||||
self.module.fail_json(
|
||||
path=_to_native(path),
|
||||
msg='Unable to remove source file: %s' % _to_native(e), exception=format_exc()
|
||||
)
|
||||
|
||||
def remove_targets(self):
|
||||
for path in self.successes:
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
if os.path.isdir(path):
|
||||
shutil.rmtree(path)
|
||||
else:
|
||||
os.remove(path)
|
||||
except OSError:
|
||||
self.errors.append(_to_native(path))
|
||||
for path in self.paths:
|
||||
try:
|
||||
if os.path.isdir(path):
|
||||
shutil.rmtree(path)
|
||||
except OSError:
|
||||
self.errors.append(_to_native(path))
|
||||
|
||||
if self.errors:
|
||||
self.module.fail_json(
|
||||
dest=_to_native(self.destination), msg='Error deleting some source files: ', files=self.errors
|
||||
)
|
||||
|
||||
def update_permissions(self):
|
||||
file_args = self.module.load_file_common_arguments(self.module.params, path=self.destination)
|
||||
self.changed = self.module.set_fs_attributes_if_different(file_args, self.changed)
|
||||
|
||||
@property
|
||||
def result(self):
|
||||
return {
|
||||
'archived': [_to_native(p) for p in self.successes],
|
||||
'dest': _to_native(self.destination),
|
||||
'dest_state': self.destination_state,
|
||||
'changed': self.changed,
|
||||
'arcroot': _to_native(self.root),
|
||||
'missing': [_to_native(p) for p in self.not_found],
|
||||
'expanded_paths': [_to_native(p) for p in self.expanded_paths],
|
||||
'expanded_exclude_paths': [_to_native(p) for p in self.expanded_exclude_paths],
|
||||
}
|
||||
|
||||
def _check_removal_safety(self):
|
||||
for path in self.paths:
|
||||
if os.path.isdir(path) and self.destination.startswith(os.path.join(path, b'')):
|
||||
self.module.fail_json(
|
||||
path=b', '.join(self.paths),
|
||||
msg='Error, created archive can not be contained in source paths when remove=true'
|
||||
)
|
||||
|
||||
def _open_compressed_file(self, path, mode):
|
||||
f = None
|
||||
if self.format == 'gz':
|
||||
f = gzip.open(path, mode)
|
||||
elif self.format == 'bz2':
|
||||
f = bz2.BZ2File(path, mode)
|
||||
elif self.format == 'xz':
|
||||
f = lzma.LZMAFile(path, mode)
|
||||
else:
|
||||
self.module.fail_json(msg="%s is not a valid format" % self.format)
|
||||
|
||||
return f
|
||||
|
||||
@abc.abstractmethod
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def contains(self, name):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def open(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _add(self, path, archive_name):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_checksums(self, path):
|
||||
pass
|
||||
|
||||
|
||||
class ZipArchive(Archive):
|
||||
def __init__(self, module):
|
||||
super(ZipArchive, self).__init__(module)
|
||||
|
||||
def close(self):
|
||||
self.file.close()
|
||||
|
||||
def contains(self, name):
|
||||
try:
|
||||
self.file.getinfo(name)
|
||||
except KeyError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def open(self):
|
||||
self.file = zipfile.ZipFile(_to_native_ascii(self.destination), 'w', zipfile.ZIP_DEFLATED, True)
|
||||
|
||||
def _add(self, path, archive_name):
|
||||
if not matches_exclusion_patterns(path, self.exclusion_patterns):
|
||||
self.file.write(path, archive_name)
|
||||
|
||||
def _get_checksums(self, path):
|
||||
try:
|
||||
archive = zipfile.ZipFile(_to_native_ascii(path), 'r')
|
||||
checksums = set((info.filename, info.CRC) for info in archive.infolist())
|
||||
archive.close()
|
||||
except zipfile.BadZipfile:
|
||||
checksums = set()
|
||||
return checksums
|
||||
|
||||
|
||||
class TarArchive(Archive):
|
||||
def __init__(self, module):
|
||||
super(TarArchive, self).__init__(module)
|
||||
self.fileIO = None
|
||||
|
||||
def close(self):
|
||||
self.file.close()
|
||||
if self.format == 'xz':
|
||||
with lzma.open(_to_native(self.destination), 'wb') as f:
|
||||
f.write(self.fileIO.getvalue())
|
||||
self.fileIO.close()
|
||||
|
||||
def contains(self, name):
|
||||
try:
|
||||
self.file.getmember(name)
|
||||
except KeyError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def open(self):
|
||||
if self.format in ('gz', 'bz2'):
|
||||
self.file = tarfile.open(_to_native_ascii(self.destination), 'w|' + self.format)
|
||||
# python3 tarfile module allows xz format but for python2 we have to create the tarfile
|
||||
# in memory and then compress it with lzma.
|
||||
elif self.format == 'xz':
|
||||
self.fileIO = io.BytesIO()
|
||||
self.file = tarfile.open(fileobj=self.fileIO, mode='w')
|
||||
elif self.format == 'tar':
|
||||
self.file = tarfile.open(_to_native_ascii(self.destination), 'w')
|
||||
else:
|
||||
self.module.fail_json(msg="%s is not a valid archive format" % self.format)
|
||||
|
||||
def _add(self, path, archive_name):
|
||||
def py27_filter(tarinfo):
|
||||
return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo
|
||||
|
||||
def py26_filter(path):
|
||||
return matches_exclusion_patterns(path, self.exclusion_patterns)
|
||||
|
||||
if PY27:
|
||||
self.file.add(path, archive_name, recursive=False, filter=py27_filter)
|
||||
else:
|
||||
self.file.add(path, archive_name, recursive=False, exclude=py26_filter)
|
||||
|
||||
def _get_checksums(self, path):
|
||||
if HAS_LZMA:
|
||||
LZMAError = lzma.LZMAError
|
||||
else:
|
||||
# Just picking another exception that's also listed below
|
||||
LZMAError = tarfile.ReadError
|
||||
try:
|
||||
if self.format == 'xz':
|
||||
with lzma.open(_to_native_ascii(path), 'r') as f:
|
||||
archive = tarfile.open(fileobj=f)
|
||||
checksums = set((info.name, info.chksum) for info in archive.getmembers())
|
||||
archive.close()
|
||||
else:
|
||||
archive = tarfile.open(_to_native_ascii(path), 'r|' + self.format)
|
||||
checksums = set((info.name, info.chksum) for info in archive.getmembers())
|
||||
archive.close()
|
||||
except (LZMAError, tarfile.ReadError, tarfile.CompressionError):
|
||||
try:
|
||||
# The python implementations of gzip, bz2, and lzma do not support restoring compressed files
|
||||
# to their original names so only file checksum is returned
|
||||
f = self._open_compressed_file(_to_native_ascii(path), 'r')
|
||||
checksums = set([(b'', crc32(f.read()))])
|
||||
f.close()
|
||||
except Exception:
|
||||
checksums = set()
|
||||
return checksums
|
||||
|
||||
|
||||
def get_archive(module):
|
||||
if module.params['format'] == 'zip':
|
||||
return ZipArchive(module)
|
||||
else:
|
||||
return TarArchive(module)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
path=dict(type='list', elements='path', required=True),
|
||||
format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']),
|
||||
dest=dict(type='path'),
|
||||
exclude_path=dict(type='list', elements='path', default=[]),
|
||||
exclusion_patterns=dict(type='list', elements='path'),
|
||||
force_archive=dict(type='bool', default=False),
|
||||
remove=dict(type='bool', default=False),
|
||||
),
|
||||
add_file_common_args=True,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not HAS_LZMA and module.params['format'] == 'xz':
|
||||
module.fail_json(
|
||||
msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"), exception=LZMA_IMP_ERR
|
||||
)
|
||||
|
||||
check_mode = module.check_mode
|
||||
|
||||
archive = get_archive(module)
|
||||
archive.find_targets()
|
||||
|
||||
if not archive.has_targets():
|
||||
if archive.destination_exists():
|
||||
archive.destination_state = STATE_ARCHIVED if is_archive(archive.destination) else STATE_COMPRESSED
|
||||
elif archive.has_targets() and archive.must_archive:
|
||||
if check_mode:
|
||||
archive.changed = True
|
||||
else:
|
||||
archive.add_targets()
|
||||
archive.destination_state = STATE_INCOMPLETE if archive.has_unfound_targets() else STATE_ARCHIVED
|
||||
archive.changed |= archive.is_different_from_original()
|
||||
if archive.remove:
|
||||
archive.remove_targets()
|
||||
else:
|
||||
if check_mode:
|
||||
if not archive.destination_exists():
|
||||
archive.changed = True
|
||||
else:
|
||||
path = archive.paths[0]
|
||||
archive.add_single_target(path)
|
||||
archive.changed |= archive.is_different_from_original()
|
||||
if archive.remove:
|
||||
archive.remove_single_target(path)
|
||||
|
||||
if archive.destination_exists():
|
||||
archive.update_permissions()
|
||||
|
||||
module.exit_json(**archive.result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,210 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: atomic_container
|
||||
short_description: Manage the containers on the atomic host platform
|
||||
description:
|
||||
- Manage the containers on the atomic host platform.
|
||||
- Allows to manage the lifecycle of a container on the atomic host platform.
|
||||
author: "Giuseppe Scrivano (@giuseppe)"
|
||||
notes:
|
||||
- Host should support C(atomic) command
|
||||
requirements:
|
||||
- atomic
|
||||
- "python >= 2.6"
|
||||
options:
|
||||
backend:
|
||||
description:
|
||||
- Define the backend to use for the container.
|
||||
required: true
|
||||
choices: ["docker", "ostree"]
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of the container.
|
||||
required: true
|
||||
type: str
|
||||
image:
|
||||
description:
|
||||
- The image to use to install the container.
|
||||
required: true
|
||||
type: str
|
||||
rootfs:
|
||||
description:
|
||||
- Define the rootfs of the image.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- State of the container.
|
||||
choices: ["absent", "latest", "present", "rollback"]
|
||||
default: "latest"
|
||||
type: str
|
||||
mode:
|
||||
description:
|
||||
- Define if it is an user or a system container.
|
||||
choices: ["user", "system"]
|
||||
type: str
|
||||
values:
|
||||
description:
|
||||
- Values for the installation of the container.
|
||||
- This option is permitted only with mode 'user' or 'system'.
|
||||
- The values specified here will be used at installation time as --set arguments for atomic install.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
|
||||
- name: Install the etcd system container
|
||||
community.general.atomic_container:
|
||||
name: etcd
|
||||
image: rhel/etcd
|
||||
backend: ostree
|
||||
state: latest
|
||||
mode: system
|
||||
values:
|
||||
- ETCD_NAME=etcd.server
|
||||
|
||||
- name: Uninstall the etcd system container
|
||||
community.general.atomic_container:
|
||||
name: etcd
|
||||
image: rhel/etcd
|
||||
backend: ostree
|
||||
state: absent
|
||||
mode: system
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
msg:
|
||||
description: The command standard output
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'Using default tag: latest ...'
|
||||
'''
|
||||
|
||||
# import module snippets
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def do_install(module, mode, rootfs, container, image, values_list, backend):
|
||||
system_list = ["--system"] if mode == 'system' else []
|
||||
user_list = ["--user"] if mode == 'user' else []
|
||||
rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else []
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
else:
|
||||
changed = "Extracting" in out or "Copying blob" in out
|
||||
module.exit_json(msg=out, changed=changed)
|
||||
|
||||
|
||||
def do_update(module, container, image, values_list):
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'containers', 'update', "--rebase=%s" % image] + values_list + [container]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
else:
|
||||
changed = "Extracting" in out or "Copying blob" in out
|
||||
module.exit_json(msg=out, changed=changed)
|
||||
|
||||
|
||||
def do_uninstall(module, name, backend):
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'uninstall', "--storage=%s" % backend, name]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
module.exit_json(msg=out, changed=True)
|
||||
|
||||
|
||||
def do_rollback(module, name):
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'containers', 'rollback', name]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
else:
|
||||
changed = "Rolling back" in out
|
||||
module.exit_json(msg=out, changed=changed)
|
||||
|
||||
|
||||
def core(module):
|
||||
mode = module.params['mode']
|
||||
name = module.params['name']
|
||||
image = module.params['image']
|
||||
rootfs = module.params['rootfs']
|
||||
values = module.params['values']
|
||||
backend = module.params['backend']
|
||||
state = module.params['state']
|
||||
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
|
||||
|
||||
values_list = ["--set=%s" % x for x in values] if values else []
|
||||
|
||||
args = [atomic_bin, 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
return
|
||||
present = name in out
|
||||
|
||||
if state == 'present' and present:
|
||||
module.exit_json(msg=out, changed=False)
|
||||
elif (state in ['latest', 'present']) and not present:
|
||||
do_install(module, mode, rootfs, name, image, values_list, backend)
|
||||
elif state == 'latest':
|
||||
do_update(module, name, image, values_list)
|
||||
elif state == 'absent':
|
||||
if not present:
|
||||
module.exit_json(msg="The container is not present", changed=False)
|
||||
else:
|
||||
do_uninstall(module, name, backend)
|
||||
elif state == 'rollback':
|
||||
do_rollback(module, name)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
mode=dict(choices=['user', 'system']),
|
||||
name=dict(required=True),
|
||||
image=dict(required=True),
|
||||
rootfs=dict(),
|
||||
state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
|
||||
backend=dict(required=True, choices=['docker', 'ostree']),
|
||||
values=dict(type='list', default=[], elements='str'),
|
||||
),
|
||||
)
|
||||
|
||||
if module.params['values'] is not None and module.params['mode'] == 'default':
|
||||
module.fail_json(msg="values is supported only with user or system mode")
|
||||
|
||||
# Verify that the platform supports atomic command
|
||||
dummy = module.get_bin_path('atomic', required=True)
|
||||
|
||||
try:
|
||||
core(module)
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,98 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: atomic_host
|
||||
short_description: Manage the atomic host platform
|
||||
description:
|
||||
- Manage the atomic host platform.
|
||||
- Rebooting of Atomic host platform should be done outside this module.
|
||||
author:
|
||||
- Saravanan KR (@krsacme)
|
||||
notes:
|
||||
- Host should be an atomic platform (verified by existence of '/run/ostree-booted' file).
|
||||
requirements:
|
||||
- atomic
|
||||
- python >= 2.6
|
||||
options:
|
||||
revision:
|
||||
description:
|
||||
- The version number of the atomic host to be deployed.
|
||||
- Providing C(latest) will upgrade to the latest available version.
|
||||
default: 'latest'
|
||||
aliases: [ version ]
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Upgrade the atomic host platform to the latest version (atomic host upgrade)
|
||||
community.general.atomic_host:
|
||||
revision: latest
|
||||
|
||||
- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130)
|
||||
community.general.atomic_host:
|
||||
revision: 23.130
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
msg:
|
||||
description: The command standard output
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'Already on latest'
|
||||
'''
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def core(module):
|
||||
revision = module.params['revision']
|
||||
atomic_bin = module.get_bin_path('atomic', required=True)
|
||||
|
||||
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
|
||||
|
||||
if revision == 'latest':
|
||||
args = [atomic_bin, 'host', 'upgrade']
|
||||
else:
|
||||
args = [atomic_bin, 'host', 'deploy', revision]
|
||||
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
|
||||
if rc == 77 and revision == 'latest':
|
||||
module.exit_json(msg="Already on latest", changed=False)
|
||||
elif rc != 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
else:
|
||||
module.exit_json(msg=out, changed=True)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
revision=dict(type='str', default='latest', aliases=["version"]),
|
||||
),
|
||||
)
|
||||
|
||||
# Verify that the platform is atomic host
|
||||
if not os.path.exists("/run/ostree-booted"):
|
||||
module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only")
|
||||
|
||||
try:
|
||||
core(module)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,170 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: atomic_image
|
||||
short_description: Manage the container images on the atomic host platform
|
||||
description:
|
||||
- Manage the container images on the atomic host platform.
|
||||
- Allows to execute the commands specified by the RUN label in the container image when present.
|
||||
author:
|
||||
- Saravanan KR (@krsacme)
|
||||
notes:
|
||||
- Host should support C(atomic) command.
|
||||
requirements:
|
||||
- atomic
|
||||
- python >= 2.6
|
||||
options:
|
||||
backend:
|
||||
description:
|
||||
- Define the backend where the image is pulled.
|
||||
choices: [ 'docker', 'ostree' ]
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of the container image.
|
||||
required: true
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The state of the container image.
|
||||
- The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running.
|
||||
choices: [ 'absent', 'latest', 'present' ]
|
||||
default: 'latest'
|
||||
type: str
|
||||
started:
|
||||
description:
|
||||
- Start or Stop the container.
|
||||
type: bool
|
||||
default: true
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog)
|
||||
community.general.atomic_image:
|
||||
name: rhel7/rsyslog
|
||||
state: latest
|
||||
|
||||
- name: Pull busybox to the OSTree backend
|
||||
community.general.atomic_image:
|
||||
name: busybox
|
||||
state: latest
|
||||
backend: ostree
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
msg:
|
||||
description: The command standard output
|
||||
returned: always
|
||||
type: str
|
||||
sample: 'Using default tag: latest ...'
|
||||
'''
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def do_upgrade(module, image):
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
args = [atomic_bin, 'update', '--force', image]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc != 0: # something went wrong emit the msg
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
elif 'Image is up to date' in out:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def core(module):
|
||||
image = module.params['name']
|
||||
state = module.params['state']
|
||||
started = module.params['started']
|
||||
backend = module.params['backend']
|
||||
is_upgraded = False
|
||||
|
||||
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
|
||||
atomic_bin = module.get_bin_path('atomic')
|
||||
out = {}
|
||||
err = {}
|
||||
rc = 0
|
||||
|
||||
if backend:
|
||||
if state == 'present' or state == 'latest':
|
||||
args = [atomic_bin, 'pull', "--storage=%s" % backend, image]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc < 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
else:
|
||||
out_run = ""
|
||||
if started:
|
||||
args = [atomic_bin, 'run', "--storage=%s" % backend, image]
|
||||
rc, out_run, err = module.run_command(args, check_rc=False)
|
||||
if rc < 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
|
||||
changed = "Extracting" in out or "Copying blob" in out
|
||||
module.exit_json(msg=(out + out_run), changed=changed)
|
||||
elif state == 'absent':
|
||||
args = [atomic_bin, 'images', 'delete', "--storage=%s" % backend, image]
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
if rc < 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
else:
|
||||
changed = "Unable to find" not in out
|
||||
module.exit_json(msg=out, changed=changed)
|
||||
return
|
||||
|
||||
if state == 'present' or state == 'latest':
|
||||
if state == 'latest':
|
||||
is_upgraded = do_upgrade(module, image)
|
||||
|
||||
if started:
|
||||
args = [atomic_bin, 'run', image]
|
||||
else:
|
||||
args = [atomic_bin, 'install', image]
|
||||
elif state == 'absent':
|
||||
args = [atomic_bin, 'uninstall', image]
|
||||
|
||||
rc, out, err = module.run_command(args, check_rc=False)
|
||||
|
||||
if rc < 0:
|
||||
module.fail_json(rc=rc, msg=err)
|
||||
elif rc == 1 and 'already present' in err:
|
||||
module.exit_json(restult=err, changed=is_upgraded)
|
||||
elif started and 'Container is running' in out:
|
||||
module.exit_json(result=out, changed=is_upgraded)
|
||||
else:
|
||||
module.exit_json(msg=out, changed=True)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
backend=dict(type='str', choices=['docker', 'ostree']),
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']),
|
||||
started=dict(type='bool', default=True),
|
||||
),
|
||||
)
|
||||
|
||||
# Verify that the platform supports atomic command
|
||||
dummy = module.get_bin_path('atomic', required=True)
|
||||
|
||||
try:
|
||||
core(module)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,154 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017, Ted Trask <ttrask01@yahoo.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: awall
|
||||
short_description: Manage awall policies
|
||||
author: Ted Trask (@tdtrask) <ttrask01@yahoo.com>
|
||||
description:
|
||||
- This modules allows for enable/disable/activate of I(awall) policies.
|
||||
- Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files
|
||||
and activates the configuration on the system.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- One or more policy names.
|
||||
type: list
|
||||
elements: str
|
||||
state:
|
||||
description:
|
||||
- Whether the policies should be enabled or disabled.
|
||||
type: str
|
||||
choices: [ disabled, enabled ]
|
||||
default: enabled
|
||||
activate:
|
||||
description:
|
||||
- Activate the new firewall rules.
|
||||
- Can be run with other steps or on its own.
|
||||
type: bool
|
||||
default: false
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Enable "foo" and "bar" policy
|
||||
community.general.awall:
|
||||
name: [ foo bar ]
|
||||
state: enabled
|
||||
|
||||
- name: Disable "foo" and "bar" policy and activate new rules
|
||||
community.general.awall:
|
||||
name:
|
||||
- foo
|
||||
- bar
|
||||
state: disabled
|
||||
activate: false
|
||||
|
||||
- name: Activate currently enabled firewall rules
|
||||
community.general.awall:
|
||||
activate: true
|
||||
'''
|
||||
|
||||
RETURN = ''' # '''
|
||||
|
||||
import re
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def activate(module):
|
||||
cmd = "%s activate --force" % (AWALL_PATH)
|
||||
rc, stdout, stderr = module.run_command(cmd)
|
||||
if rc == 0:
|
||||
return True
|
||||
else:
|
||||
module.fail_json(msg="could not activate new rules", stdout=stdout, stderr=stderr)
|
||||
|
||||
|
||||
def is_policy_enabled(module, name):
|
||||
cmd = "%s list" % (AWALL_PATH)
|
||||
rc, stdout, stderr = module.run_command(cmd)
|
||||
if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def enable_policy(module, names, act):
|
||||
policies = []
|
||||
for name in names:
|
||||
if not is_policy_enabled(module, name):
|
||||
policies.append(name)
|
||||
if not policies:
|
||||
module.exit_json(changed=False, msg="policy(ies) already enabled")
|
||||
names = " ".join(policies)
|
||||
if module.check_mode:
|
||||
cmd = "%s list" % (AWALL_PATH)
|
||||
else:
|
||||
cmd = "%s enable %s" % (AWALL_PATH, names)
|
||||
rc, stdout, stderr = module.run_command(cmd)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr)
|
||||
if act and not module.check_mode:
|
||||
activate(module)
|
||||
module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names)
|
||||
|
||||
|
||||
def disable_policy(module, names, act):
|
||||
policies = []
|
||||
for name in names:
|
||||
if is_policy_enabled(module, name):
|
||||
policies.append(name)
|
||||
if not policies:
|
||||
module.exit_json(changed=False, msg="policy(ies) already disabled")
|
||||
names = " ".join(policies)
|
||||
if module.check_mode:
|
||||
cmd = "%s list" % (AWALL_PATH)
|
||||
else:
|
||||
cmd = "%s disable %s" % (AWALL_PATH, names)
|
||||
rc, stdout, stderr = module.run_command(cmd)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr)
|
||||
if act and not module.check_mode:
|
||||
activate(module)
|
||||
module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', default='enabled', choices=['disabled', 'enabled']),
|
||||
name=dict(type='list', elements='str'),
|
||||
activate=dict(type='bool', default=False),
|
||||
),
|
||||
required_one_of=[['name', 'activate']],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
global AWALL_PATH
|
||||
AWALL_PATH = module.get_bin_path('awall', required=True)
|
||||
|
||||
p = module.params
|
||||
|
||||
if p['name']:
|
||||
if p['state'] == 'enabled':
|
||||
enable_policy(module, p['name'], p['activate'])
|
||||
elif p['state'] == 'disabled':
|
||||
disable_policy(module, p['name'], p['activate'])
|
||||
|
||||
if p['activate']:
|
||||
if not module.check_mode:
|
||||
activate(module)
|
||||
module.exit_json(changed=True, msg="activated awall rules")
|
||||
|
||||
module.fail_json(msg="no action defined")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,408 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2016, Adam Števko <adam.stevko@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: beadm
|
||||
short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems
|
||||
description:
|
||||
- Create, delete or activate ZFS boot environments.
|
||||
- Mount and unmount ZFS boot environments.
|
||||
author: Adam Števko (@xen0l)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- ZFS boot environment name.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ "be" ]
|
||||
snapshot:
|
||||
description:
|
||||
- If specified, the new boot environment will be cloned from the given
|
||||
snapshot or inactive boot environment.
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
- Associate a description with a new boot environment. This option is
|
||||
available only on Solarish platforms.
|
||||
type: str
|
||||
options:
|
||||
description:
|
||||
- Create the datasets for new BE with specific ZFS properties.
|
||||
- Multiple options can be specified.
|
||||
- This option is available only on Solarish platforms.
|
||||
type: str
|
||||
mountpoint:
|
||||
description:
|
||||
- Path where to mount the ZFS boot environment.
|
||||
type: path
|
||||
state:
|
||||
description:
|
||||
- Create or delete ZFS boot environment.
|
||||
type: str
|
||||
choices: [ absent, activated, mounted, present, unmounted ]
|
||||
default: present
|
||||
force:
|
||||
description:
|
||||
- Specifies if the unmount should be forced.
|
||||
type: bool
|
||||
default: false
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create ZFS boot environment
|
||||
community.general.beadm:
|
||||
name: upgrade-be
|
||||
state: present
|
||||
|
||||
- name: Create ZFS boot environment from existing inactive boot environment
|
||||
community.general.beadm:
|
||||
name: upgrade-be
|
||||
snapshot: be@old
|
||||
state: present
|
||||
|
||||
- name: Create ZFS boot environment with compression enabled and description "upgrade"
|
||||
community.general.beadm:
|
||||
name: upgrade-be
|
||||
options: "compression=on"
|
||||
description: upgrade
|
||||
state: present
|
||||
|
||||
- name: Delete ZFS boot environment
|
||||
community.general.beadm:
|
||||
name: old-be
|
||||
state: absent
|
||||
|
||||
- name: Mount ZFS boot environment on /tmp/be
|
||||
community.general.beadm:
|
||||
name: BE
|
||||
mountpoint: /tmp/be
|
||||
state: mounted
|
||||
|
||||
- name: Unmount ZFS boot environment
|
||||
community.general.beadm:
|
||||
name: BE
|
||||
state: unmounted
|
||||
|
||||
- name: Activate ZFS boot environment
|
||||
community.general.beadm:
|
||||
name: upgrade-be
|
||||
state: activated
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
name:
|
||||
description: BE name
|
||||
returned: always
|
||||
type: str
|
||||
sample: pre-upgrade
|
||||
snapshot:
|
||||
description: ZFS snapshot to create BE from
|
||||
returned: always
|
||||
type: str
|
||||
sample: rpool/ROOT/oi-hipster@fresh
|
||||
description:
|
||||
description: BE description
|
||||
returned: always
|
||||
type: str
|
||||
sample: Upgrade from 9.0 to 10.0
|
||||
options:
|
||||
description: BE additional options
|
||||
returned: always
|
||||
type: str
|
||||
sample: compression=on
|
||||
mountpoint:
|
||||
description: BE mountpoint
|
||||
returned: always
|
||||
type: str
|
||||
sample: /mnt/be
|
||||
state:
|
||||
description: state of the target
|
||||
returned: always
|
||||
type: str
|
||||
sample: present
|
||||
force:
|
||||
description: If forced action is wanted
|
||||
returned: always
|
||||
type: bool
|
||||
sample: false
|
||||
'''
|
||||
|
||||
import os
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class BE(object):
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
self.name = module.params['name']
|
||||
self.snapshot = module.params['snapshot']
|
||||
self.description = module.params['description']
|
||||
self.options = module.params['options']
|
||||
self.mountpoint = module.params['mountpoint']
|
||||
self.state = module.params['state']
|
||||
self.force = module.params['force']
|
||||
self.is_freebsd = os.uname()[0] == 'FreeBSD'
|
||||
|
||||
def _beadm_list(self):
|
||||
cmd = [self.module.get_bin_path('beadm'), 'list', '-H']
|
||||
if '@' in self.name:
|
||||
cmd.append('-s')
|
||||
return self.module.run_command(cmd)
|
||||
|
||||
def _find_be_by_name(self, out):
|
||||
if '@' in self.name:
|
||||
for line in out.splitlines():
|
||||
if self.is_freebsd:
|
||||
check = line.split()
|
||||
if check == []:
|
||||
continue
|
||||
full_name = check[0].split('/')
|
||||
if full_name == []:
|
||||
continue
|
||||
check[0] = full_name[len(full_name) - 1]
|
||||
if check[0] == self.name:
|
||||
return check
|
||||
else:
|
||||
check = line.split(';')
|
||||
if check[0] == self.name:
|
||||
return check
|
||||
else:
|
||||
for line in out.splitlines():
|
||||
if self.is_freebsd:
|
||||
check = line.split()
|
||||
if check[0] == self.name:
|
||||
return check
|
||||
else:
|
||||
check = line.split(';')
|
||||
if check[0] == self.name:
|
||||
return check
|
||||
return None
|
||||
|
||||
def exists(self):
|
||||
(rc, out, dummy) = self._beadm_list()
|
||||
|
||||
if rc == 0:
|
||||
if self._find_be_by_name(out):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
|
||||
def is_activated(self):
|
||||
(rc, out, dummy) = self._beadm_list()
|
||||
|
||||
if rc == 0:
|
||||
line = self._find_be_by_name(out)
|
||||
if line is None:
|
||||
return False
|
||||
if self.is_freebsd:
|
||||
if 'R' in line[1]:
|
||||
return True
|
||||
else:
|
||||
if 'R' in line[2]:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def activate_be(self):
|
||||
cmd = [self.module.get_bin_path('beadm'), 'activate', self.name]
|
||||
return self.module.run_command(cmd)
|
||||
|
||||
def create_be(self):
|
||||
cmd = [self.module.get_bin_path('beadm'), 'create']
|
||||
|
||||
if self.snapshot:
|
||||
cmd.extend(['-e', self.snapshot])
|
||||
if not self.is_freebsd:
|
||||
if self.description:
|
||||
cmd.extend(['-d', self.description])
|
||||
if self.options:
|
||||
cmd.extend(['-o', self.options])
|
||||
|
||||
cmd.append(self.name)
|
||||
|
||||
return self.module.run_command(cmd)
|
||||
|
||||
def destroy_be(self):
|
||||
cmd = [self.module.get_bin_path('beadm'), 'destroy', '-F', self.name]
|
||||
return self.module.run_command(cmd)
|
||||
|
||||
def is_mounted(self):
|
||||
(rc, out, dummy) = self._beadm_list()
|
||||
|
||||
if rc == 0:
|
||||
line = self._find_be_by_name(out)
|
||||
if line is None:
|
||||
return False
|
||||
if self.is_freebsd:
|
||||
# On FreeBSD, we exclude currently mounted BE on /, as it is
|
||||
# special and can be activated even if it is mounted. That is not
|
||||
# possible with non-root BEs.
|
||||
if line[2] != '-' and line[2] != '/':
|
||||
return True
|
||||
else:
|
||||
if line[3]:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def mount_be(self):
|
||||
cmd = [self.module.get_bin_path('beadm'), 'mount', self.name]
|
||||
|
||||
if self.mountpoint:
|
||||
cmd.append(self.mountpoint)
|
||||
|
||||
return self.module.run_command(cmd)
|
||||
|
||||
def unmount_be(self):
|
||||
cmd = [self.module.get_bin_path('beadm'), 'unmount']
|
||||
if self.force:
|
||||
cmd.append('-f')
|
||||
cmd.append(self.name)
|
||||
|
||||
return self.module.run_command(cmd)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', required=True, aliases=['be']),
|
||||
snapshot=dict(type='str'),
|
||||
description=dict(type='str'),
|
||||
options=dict(type='str'),
|
||||
mountpoint=dict(type='path'),
|
||||
state=dict(type='str', default='present', choices=['absent', 'activated', 'mounted', 'present', 'unmounted']),
|
||||
force=dict(type='bool', default=False),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
be = BE(module)
|
||||
|
||||
rc = None
|
||||
out = ''
|
||||
err = ''
|
||||
result = {}
|
||||
result['name'] = be.name
|
||||
result['state'] = be.state
|
||||
|
||||
if be.snapshot:
|
||||
result['snapshot'] = be.snapshot
|
||||
|
||||
if be.description:
|
||||
result['description'] = be.description
|
||||
|
||||
if be.options:
|
||||
result['options'] = be.options
|
||||
|
||||
if be.mountpoint:
|
||||
result['mountpoint'] = be.mountpoint
|
||||
|
||||
if be.state == 'absent':
|
||||
# beadm on FreeBSD and Solarish systems differs in delete behaviour in
|
||||
# that we are not allowed to delete activated BE on FreeBSD while on
|
||||
# Solarish systems we cannot delete BE if it is mounted. We add mount
|
||||
# check for both platforms as BE should be explicitly unmounted before
|
||||
# being deleted. On FreeBSD, we also check if the BE is activated.
|
||||
if be.exists():
|
||||
if not be.is_mounted():
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
if be.is_freebsd:
|
||||
if be.is_activated():
|
||||
module.fail_json(msg='Unable to remove active BE!')
|
||||
|
||||
(rc, out, err) = be.destroy_be()
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg='Error while destroying BE: "%s"' % err,
|
||||
name=be.name,
|
||||
stderr=err,
|
||||
rc=rc)
|
||||
else:
|
||||
module.fail_json(msg='Unable to remove BE as it is mounted!')
|
||||
|
||||
elif be.state == 'present':
|
||||
if not be.exists():
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
(rc, out, err) = be.create_be()
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg='Error while creating BE: "%s"' % err,
|
||||
name=be.name,
|
||||
stderr=err,
|
||||
rc=rc)
|
||||
|
||||
elif be.state == 'activated':
|
||||
if not be.is_activated():
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
# On FreeBSD, beadm is unable to activate mounted BEs, so we add
|
||||
# an explicit check for that case.
|
||||
if be.is_freebsd:
|
||||
if be.is_mounted():
|
||||
module.fail_json(msg='Unable to activate mounted BE!')
|
||||
|
||||
(rc, out, err) = be.activate_be()
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg='Error while activating BE: "%s"' % err,
|
||||
name=be.name,
|
||||
stderr=err,
|
||||
rc=rc)
|
||||
elif be.state == 'mounted':
|
||||
if not be.is_mounted():
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
(rc, out, err) = be.mount_be()
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg='Error while mounting BE: "%s"' % err,
|
||||
name=be.name,
|
||||
stderr=err,
|
||||
rc=rc)
|
||||
|
||||
elif be.state == 'unmounted':
|
||||
if be.is_mounted():
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
(rc, out, err) = be.unmount_be()
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg='Error while unmounting BE: "%s"' % err,
|
||||
name=be.name,
|
||||
stderr=err,
|
||||
rc=rc)
|
||||
|
||||
if rc is None:
|
||||
result['changed'] = False
|
||||
else:
|
||||
result['changed'] = True
|
||||
|
||||
if out:
|
||||
result['stdout'] = out
|
||||
if err:
|
||||
result['stderr'] = err
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,168 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2016, Jiangge Zhang <tonyseek@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: bearychat
|
||||
short_description: Send BearyChat notifications
|
||||
description:
|
||||
- The M(community.general.bearychat) module sends notifications to U(https://bearychat.com)
|
||||
via the Incoming Robot integration.
|
||||
author: "Jiangge Zhang (@tonyseek)"
|
||||
options:
|
||||
url:
|
||||
type: str
|
||||
description:
|
||||
- BearyChat WebHook URL. This authenticates you to the bearychat
|
||||
service. It looks like
|
||||
C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60).
|
||||
required: true
|
||||
text:
|
||||
type: str
|
||||
description:
|
||||
- Message to send.
|
||||
markdown:
|
||||
description:
|
||||
- If C(true), text will be parsed as markdown.
|
||||
default: true
|
||||
type: bool
|
||||
channel:
|
||||
type: str
|
||||
description:
|
||||
- Channel to send the message to. If absent, the message goes to the
|
||||
default channel selected by the I(url).
|
||||
attachments:
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
- Define a list of attachments. For more information, see
|
||||
https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Send notification message via BearyChat
|
||||
local_action:
|
||||
module: bearychat
|
||||
url: |
|
||||
https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
|
||||
text: "{{ inventory_hostname }} completed"
|
||||
|
||||
- name: Send notification message via BearyChat all options
|
||||
local_action:
|
||||
module: bearychat
|
||||
url: |
|
||||
https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
|
||||
text: "{{ inventory_hostname }} completed"
|
||||
markdown: false
|
||||
channel: "#ansible"
|
||||
attachments:
|
||||
- title: "Ansible on {{ inventory_hostname }}"
|
||||
text: "May the Force be with you."
|
||||
color: "#ffffff"
|
||||
images:
|
||||
- http://example.com/index.png
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
msg:
|
||||
description: execution result
|
||||
returned: success
|
||||
type: str
|
||||
sample: "OK"
|
||||
"""
|
||||
|
||||
try:
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
|
||||
HAS_URLPARSE = True
|
||||
except Exception:
|
||||
HAS_URLPARSE = False
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
def build_payload_for_bearychat(module, text, markdown, channel, attachments):
|
||||
payload = {}
|
||||
if text is not None:
|
||||
payload['text'] = text
|
||||
if markdown is not None:
|
||||
payload['markdown'] = markdown
|
||||
if channel is not None:
|
||||
payload['channel'] = channel
|
||||
if attachments is not None:
|
||||
payload.setdefault('attachments', []).extend(
|
||||
build_payload_for_bearychat_attachment(
|
||||
module, item.get('title'), item.get('text'), item.get('color'),
|
||||
item.get('images'))
|
||||
for item in attachments)
|
||||
payload = 'payload=%s' % module.jsonify(payload)
|
||||
return payload
|
||||
|
||||
|
||||
def build_payload_for_bearychat_attachment(module, title, text, color, images):
|
||||
attachment = {}
|
||||
if title is not None:
|
||||
attachment['title'] = title
|
||||
if text is not None:
|
||||
attachment['text'] = text
|
||||
if color is not None:
|
||||
attachment['color'] = color
|
||||
if images is not None:
|
||||
target_images = attachment.setdefault('images', [])
|
||||
if not isinstance(images, (list, tuple)):
|
||||
images = [images]
|
||||
for image in images:
|
||||
if isinstance(image, dict) and 'url' in image:
|
||||
image = {'url': image['url']}
|
||||
elif hasattr(image, 'startswith') and image.startswith('http'):
|
||||
image = {'url': image}
|
||||
else:
|
||||
module.fail_json(
|
||||
msg="BearyChat doesn't have support for this kind of "
|
||||
"attachment image")
|
||||
target_images.append(image)
|
||||
return attachment
|
||||
|
||||
|
||||
def do_notify_bearychat(module, url, payload):
|
||||
response, info = fetch_url(module, url, data=payload)
|
||||
if info['status'] != 200:
|
||||
url_info = urlparse(url)
|
||||
obscured_incoming_webhook = urlunparse(
|
||||
(url_info.scheme, url_info.netloc, '[obscured]', '', '', ''))
|
||||
module.fail_json(
|
||||
msg=" failed to send %s to %s: %s" % (
|
||||
payload, obscured_incoming_webhook, info['msg']))
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec={
|
||||
'url': dict(type='str', required=True, no_log=True),
|
||||
'text': dict(type='str'),
|
||||
'markdown': dict(default=True, type='bool'),
|
||||
'channel': dict(type='str'),
|
||||
'attachments': dict(type='list', elements='dict'),
|
||||
})
|
||||
|
||||
if not HAS_URLPARSE:
|
||||
module.fail_json(msg='urlparse is not installed')
|
||||
|
||||
url = module.params['url']
|
||||
text = module.params['text']
|
||||
markdown = module.params['markdown']
|
||||
channel = module.params['channel']
|
||||
attachments = module.params['attachments']
|
||||
|
||||
payload = build_payload_for_bearychat(
|
||||
module, text, markdown, channel, attachments)
|
||||
do_notify_bearychat(module, url, payload)
|
||||
|
||||
module.exit_json(msg="OK")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,219 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: bigpanda
|
||||
author: "Hagai Kariti (@hkariti)"
|
||||
short_description: Notify BigPanda about deployments
|
||||
description:
|
||||
- Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
|
||||
options:
|
||||
component:
|
||||
type: str
|
||||
description:
|
||||
- "The name of the component being deployed. Ex: billing"
|
||||
required: true
|
||||
aliases: ['name']
|
||||
version:
|
||||
type: str
|
||||
description:
|
||||
- The deployment version.
|
||||
required: true
|
||||
token:
|
||||
type: str
|
||||
description:
|
||||
- API token.
|
||||
required: true
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- State of the deployment.
|
||||
required: true
|
||||
choices: ['started', 'finished', 'failed']
|
||||
hosts:
|
||||
type: str
|
||||
description:
|
||||
- Name of affected host name. Can be a list.
|
||||
- If not specified, it defaults to the remote system's hostname.
|
||||
required: false
|
||||
aliases: ['host']
|
||||
env:
|
||||
type: str
|
||||
description:
|
||||
- The environment name, typically 'production', 'staging', etc.
|
||||
required: false
|
||||
owner:
|
||||
type: str
|
||||
description:
|
||||
- The person responsible for the deployment.
|
||||
required: false
|
||||
description:
|
||||
type: str
|
||||
description:
|
||||
- Free text description of the deployment.
|
||||
required: false
|
||||
url:
|
||||
type: str
|
||||
description:
|
||||
- Base URL of the API server.
|
||||
required: false
|
||||
default: https://api.bigpanda.io
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(false), SSL certificates for the target url will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: true
|
||||
type: bool
|
||||
deployment_message:
|
||||
type: str
|
||||
description:
|
||||
- Message about the deployment.
|
||||
version_added: '0.2.0'
|
||||
source_system:
|
||||
type: str
|
||||
description:
|
||||
- Source system used in the requests to the API
|
||||
default: ansible
|
||||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ ]
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Notify BigPanda about a deployment
|
||||
community.general.bigpanda:
|
||||
component: myapp
|
||||
version: '1.3'
|
||||
token: '{{ bigpanda_token }}'
|
||||
state: started
|
||||
|
||||
- name: Notify BigPanda about a deployment
|
||||
community.general.bigpanda:
|
||||
component: myapp
|
||||
version: '1.3'
|
||||
token: '{{ bigpanda_token }}'
|
||||
state: finished
|
||||
|
||||
# If outside servers aren't reachable from your machine, use delegate_to and override hosts:
|
||||
- name: Notify BigPanda about a deployment
|
||||
community.general.bigpanda:
|
||||
component: myapp
|
||||
version: '1.3'
|
||||
token: '{{ bigpanda_token }}'
|
||||
hosts: '{{ ansible_hostname }}'
|
||||
state: started
|
||||
delegate_to: localhost
|
||||
register: deployment
|
||||
|
||||
- name: Notify BigPanda about a deployment
|
||||
community.general.bigpanda:
|
||||
component: '{{ deployment.component }}'
|
||||
version: '{{ deployment.version }}'
|
||||
token: '{{ deployment.token }}'
|
||||
state: finished
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
import json
|
||||
import socket
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
component=dict(required=True, aliases=['name']),
|
||||
version=dict(required=True),
|
||||
token=dict(required=True, no_log=True),
|
||||
state=dict(required=True, choices=['started', 'finished', 'failed']),
|
||||
hosts=dict(required=False, aliases=['host']),
|
||||
env=dict(required=False),
|
||||
owner=dict(required=False),
|
||||
description=dict(required=False),
|
||||
deployment_message=dict(required=False),
|
||||
source_system=dict(required=False, default='ansible'),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
url=dict(required=False, default='https://api.bigpanda.io'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
token = module.params['token']
|
||||
state = module.params['state']
|
||||
url = module.params['url']
|
||||
|
||||
# Build the common request body
|
||||
body = dict()
|
||||
for k in ('component', 'version', 'hosts'):
|
||||
v = module.params[k]
|
||||
if v is not None:
|
||||
body[k] = v
|
||||
if body.get('hosts') is None:
|
||||
body['hosts'] = [socket.gethostname()]
|
||||
|
||||
if not isinstance(body['hosts'], list):
|
||||
body['hosts'] = [body['hosts']]
|
||||
|
||||
# Insert state-specific attributes to body
|
||||
if state == 'started':
|
||||
for k in ('source_system', 'env', 'owner', 'description'):
|
||||
v = module.params[k]
|
||||
if v is not None:
|
||||
body[k] = v
|
||||
|
||||
request_url = url + '/data/events/deployments/start'
|
||||
else:
|
||||
message = module.params['deployment_message']
|
||||
if message is not None:
|
||||
body['errorMessage'] = message
|
||||
|
||||
if state == 'finished':
|
||||
body['status'] = 'success'
|
||||
else:
|
||||
body['status'] = 'failure'
|
||||
|
||||
request_url = url + '/data/events/deployments/end'
|
||||
|
||||
# Build the deployment object we return
|
||||
deployment = dict(token=token, url=url)
|
||||
deployment.update(body)
|
||||
if 'errorMessage' in deployment:
|
||||
message = deployment.pop('errorMessage')
|
||||
deployment['message'] = message
|
||||
|
||||
# If we're in check mode, just exit pretending like we succeeded
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True, **deployment)
|
||||
|
||||
# Send the data to bigpanda
|
||||
data = json.dumps(body)
|
||||
headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
|
||||
try:
|
||||
response, info = fetch_url(module, request_url, data=data, headers=headers)
|
||||
if info['status'] == 200:
|
||||
module.exit_json(changed=True, **deployment)
|
||||
else:
|
||||
module.fail_json(msg=json.dumps(info))
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,275 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: bitbucket_access_key
|
||||
short_description: Manages Bitbucket repository access keys
|
||||
description:
|
||||
- Manages Bitbucket repository access keys (also called deploy keys).
|
||||
author:
|
||||
- Evgeniy Krysanov (@catcombo)
|
||||
extends_documentation_fragment:
|
||||
- community.general.bitbucket
|
||||
options:
|
||||
repository:
|
||||
description:
|
||||
- The repository name.
|
||||
type: str
|
||||
required: true
|
||||
workspace:
|
||||
description:
|
||||
- The repository owner.
|
||||
- I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user).
|
||||
type: str
|
||||
required: true
|
||||
key:
|
||||
description:
|
||||
- The SSH public key.
|
||||
type: str
|
||||
label:
|
||||
description:
|
||||
- The key label.
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Indicates desired state of the access key.
|
||||
type: str
|
||||
required: true
|
||||
choices: [ absent, present ]
|
||||
notes:
|
||||
- Bitbucket OAuth consumer or App password should have permissions to read and administrate account repositories.
|
||||
- Check mode is supported.
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create access key
|
||||
community.general.bitbucket_access_key:
|
||||
repository: 'bitbucket-repo'
|
||||
workspace: bitbucket_workspace
|
||||
key: '{{lookup("file", "bitbucket.pub") }}'
|
||||
label: 'Bitbucket'
|
||||
state: present
|
||||
|
||||
- name: Delete access key
|
||||
community.general.bitbucket_access_key:
|
||||
repository: bitbucket-repo
|
||||
workspace: bitbucket_workspace
|
||||
label: Bitbucket
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r''' # '''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
|
||||
|
||||
error_messages = {
|
||||
'required_key': '`key` is required when the `state` is `present`',
|
||||
'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository',
|
||||
'invalid_workspace_or_repo': 'Invalid `repository` or `workspace`',
|
||||
'invalid_key': 'Invalid SSH key or key is already in use',
|
||||
}
|
||||
|
||||
BITBUCKET_API_ENDPOINTS = {
|
||||
'deploy-key-list': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL,
|
||||
'deploy-key-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL,
|
||||
}
|
||||
|
||||
|
||||
def get_existing_deploy_key(module, bitbucket):
|
||||
"""
|
||||
Search for an existing deploy key on Bitbucket
|
||||
with the label specified in module param `label`
|
||||
|
||||
:param module: instance of the :class:`AnsibleModule`
|
||||
:param bitbucket: instance of the :class:`BitbucketHelper`
|
||||
:return: existing deploy key or None if not found
|
||||
:rtype: dict or None
|
||||
|
||||
Return example::
|
||||
|
||||
{
|
||||
"id": 123,
|
||||
"label": "mykey",
|
||||
"created_on": "2019-03-23T10:15:21.517377+00:00",
|
||||
"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
|
||||
"type": "deploy_key",
|
||||
"comment": "",
|
||||
"last_used": None,
|
||||
"repository": {
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://bitbucket.org/mleu/test"
|
||||
},
|
||||
"avatar": {
|
||||
"href": "..."
|
||||
}
|
||||
},
|
||||
"type": "repository",
|
||||
"name": "test",
|
||||
"full_name": "mleu/test",
|
||||
"uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
|
||||
},
|
||||
"links": {
|
||||
"self": {
|
||||
"href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
|
||||
}
|
||||
},
|
||||
}
|
||||
"""
|
||||
content = {
|
||||
'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format(
|
||||
workspace=module.params['workspace'],
|
||||
repo_slug=module.params['repository'],
|
||||
)
|
||||
}
|
||||
|
||||
# Look through the all response pages in search of deploy key we need
|
||||
while 'next' in content:
|
||||
info, content = bitbucket.request(
|
||||
api_url=content['next'],
|
||||
method='GET',
|
||||
)
|
||||
|
||||
if info['status'] == 404:
|
||||
module.fail_json(msg=error_messages['invalid_workspace_or_repo'])
|
||||
|
||||
if info['status'] == 403:
|
||||
module.fail_json(msg=error_messages['required_permission'])
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info))
|
||||
|
||||
res = next(iter(filter(lambda v: v['label'] == module.params['label'], content['values'])), None)
|
||||
|
||||
if res is not None:
|
||||
return res
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def create_deploy_key(module, bitbucket):
|
||||
info, content = bitbucket.request(
|
||||
api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format(
|
||||
workspace=module.params['workspace'],
|
||||
repo_slug=module.params['repository'],
|
||||
),
|
||||
method='POST',
|
||||
data={
|
||||
'key': module.params['key'],
|
||||
'label': module.params['label'],
|
||||
},
|
||||
)
|
||||
|
||||
if info['status'] == 404:
|
||||
module.fail_json(msg=error_messages['invalid_workspace_or_repo'])
|
||||
|
||||
if info['status'] == 403:
|
||||
module.fail_json(msg=error_messages['required_permission'])
|
||||
|
||||
if info['status'] == 400:
|
||||
module.fail_json(msg=error_messages['invalid_key'])
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg='Failed to create deploy key `{label}`: {info}'.format(
|
||||
label=module.params['label'],
|
||||
info=info,
|
||||
))
|
||||
|
||||
|
||||
def delete_deploy_key(module, bitbucket, key_id):
|
||||
info, content = bitbucket.request(
|
||||
api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format(
|
||||
workspace=module.params['workspace'],
|
||||
repo_slug=module.params['repository'],
|
||||
key_id=key_id,
|
||||
),
|
||||
method='DELETE',
|
||||
)
|
||||
|
||||
if info['status'] == 404:
|
||||
module.fail_json(msg=error_messages['invalid_workspace_or_repo'])
|
||||
|
||||
if info['status'] == 403:
|
||||
module.fail_json(msg=error_messages['required_permission'])
|
||||
|
||||
if info['status'] != 204:
|
||||
module.fail_json(msg='Failed to delete deploy key `{label}`: {info}'.format(
|
||||
label=module.params['label'],
|
||||
info=info,
|
||||
))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = BitbucketHelper.bitbucket_argument_spec()
|
||||
argument_spec.update(
|
||||
repository=dict(type='str', required=True),
|
||||
workspace=dict(
|
||||
type='str', required=True,
|
||||
),
|
||||
key=dict(type='str', no_log=False),
|
||||
label=dict(type='str', required=True),
|
||||
state=dict(type='str', choices=['present', 'absent'], required=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=BitbucketHelper.bitbucket_required_one_of(),
|
||||
required_together=BitbucketHelper.bitbucket_required_together(),
|
||||
)
|
||||
|
||||
bitbucket = BitbucketHelper(module)
|
||||
|
||||
key = module.params['key']
|
||||
state = module.params['state']
|
||||
|
||||
# Check parameters
|
||||
if (key is None) and (state == 'present'):
|
||||
module.fail_json(msg=error_messages['required_key'])
|
||||
|
||||
# Retrieve access token for authorized API requests
|
||||
bitbucket.fetch_access_token()
|
||||
|
||||
# Retrieve existing deploy key (if any)
|
||||
existing_deploy_key = get_existing_deploy_key(module, bitbucket)
|
||||
changed = False
|
||||
|
||||
# Create new deploy key in case it doesn't exists
|
||||
if not existing_deploy_key and (state == 'present'):
|
||||
if not module.check_mode:
|
||||
create_deploy_key(module, bitbucket)
|
||||
changed = True
|
||||
|
||||
# Update deploy key if the old value does not match the new one
|
||||
elif existing_deploy_key and (state == 'present'):
|
||||
if not key.startswith(existing_deploy_key.get('key')):
|
||||
if not module.check_mode:
|
||||
# Bitbucket doesn't support update key for the same label,
|
||||
# so we need to delete the old one first
|
||||
delete_deploy_key(module, bitbucket, existing_deploy_key['id'])
|
||||
create_deploy_key(module, bitbucket)
|
||||
changed = True
|
||||
|
||||
# Delete deploy key
|
||||
elif existing_deploy_key and (state == 'absent'):
|
||||
if not module.check_mode:
|
||||
delete_deploy_key(module, bitbucket, existing_deploy_key['id'])
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,201 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: bitbucket_pipeline_key_pair
|
||||
short_description: Manages Bitbucket pipeline SSH key pair
|
||||
description:
|
||||
- Manages Bitbucket pipeline SSH key pair.
|
||||
author:
|
||||
- Evgeniy Krysanov (@catcombo)
|
||||
extends_documentation_fragment:
|
||||
- community.general.bitbucket
|
||||
options:
|
||||
repository:
|
||||
description:
|
||||
- The repository name.
|
||||
type: str
|
||||
required: true
|
||||
workspace:
|
||||
description:
|
||||
- The repository owner.
|
||||
- I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user).
|
||||
type: str
|
||||
required: true
|
||||
public_key:
|
||||
description:
|
||||
- The public key.
|
||||
type: str
|
||||
private_key:
|
||||
description:
|
||||
- The private key.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Indicates desired state of the key pair.
|
||||
type: str
|
||||
required: true
|
||||
choices: [ absent, present ]
|
||||
notes:
|
||||
- Check mode is supported.
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create or update SSH key pair
|
||||
community.general.bitbucket_pipeline_key_pair:
|
||||
repository: 'bitbucket-repo'
|
||||
workspace: bitbucket_workspace
|
||||
public_key: '{{lookup("file", "bitbucket.pub") }}'
|
||||
private_key: '{{lookup("file", "bitbucket") }}'
|
||||
state: present
|
||||
|
||||
- name: Remove SSH key pair
|
||||
community.general.bitbucket_pipeline_key_pair:
|
||||
repository: bitbucket-repo
|
||||
workspace: bitbucket_workspace
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r''' # '''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
|
||||
|
||||
error_messages = {
|
||||
'invalid_params': 'Account, repository or SSH key pair was not found',
|
||||
'required_keys': '`public_key` and `private_key` are required when the `state` is `present`',
|
||||
}
|
||||
|
||||
BITBUCKET_API_ENDPOINTS = {
|
||||
'ssh-key-pair': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL,
|
||||
}
|
||||
|
||||
|
||||
def get_existing_ssh_key_pair(module, bitbucket):
|
||||
"""
|
||||
Retrieves an existing ssh key pair from repository
|
||||
specified in module param `repository`
|
||||
|
||||
:param module: instance of the :class:`AnsibleModule`
|
||||
:param bitbucket: instance of the :class:`BitbucketHelper`
|
||||
:return: existing key pair or None if not found
|
||||
:rtype: dict or None
|
||||
|
||||
Return example::
|
||||
|
||||
{
|
||||
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...2E8HAeT",
|
||||
"type": "pipeline_ssh_key_pair"
|
||||
}
|
||||
"""
|
||||
api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
|
||||
workspace=module.params['workspace'],
|
||||
repo_slug=module.params['repository'],
|
||||
)
|
||||
|
||||
info, content = bitbucket.request(
|
||||
api_url=api_url,
|
||||
method='GET',
|
||||
)
|
||||
|
||||
if info['status'] == 404:
|
||||
# Account, repository or SSH key pair was not found.
|
||||
return None
|
||||
|
||||
return content
|
||||
|
||||
|
||||
def update_ssh_key_pair(module, bitbucket):
|
||||
info, content = bitbucket.request(
|
||||
api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
|
||||
workspace=module.params['workspace'],
|
||||
repo_slug=module.params['repository'],
|
||||
),
|
||||
method='PUT',
|
||||
data={
|
||||
'private_key': module.params['private_key'],
|
||||
'public_key': module.params['public_key'],
|
||||
},
|
||||
)
|
||||
|
||||
if info['status'] == 404:
|
||||
module.fail_json(msg=error_messages['invalid_params'])
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg='Failed to create or update pipeline ssh key pair : {0}'.format(info))
|
||||
|
||||
|
||||
def delete_ssh_key_pair(module, bitbucket):
|
||||
info, content = bitbucket.request(
|
||||
api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
|
||||
workspace=module.params['workspace'],
|
||||
repo_slug=module.params['repository'],
|
||||
),
|
||||
method='DELETE',
|
||||
)
|
||||
|
||||
if info['status'] == 404:
|
||||
module.fail_json(msg=error_messages['invalid_params'])
|
||||
|
||||
if info['status'] != 204:
|
||||
module.fail_json(msg='Failed to delete pipeline ssh key pair: {0}'.format(info))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = BitbucketHelper.bitbucket_argument_spec()
|
||||
argument_spec.update(
|
||||
repository=dict(type='str', required=True),
|
||||
workspace=dict(type='str', required=True),
|
||||
public_key=dict(type='str'),
|
||||
private_key=dict(type='str', no_log=True),
|
||||
state=dict(type='str', choices=['present', 'absent'], required=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=BitbucketHelper.bitbucket_required_one_of(),
|
||||
required_together=BitbucketHelper.bitbucket_required_together(),
|
||||
)
|
||||
|
||||
bitbucket = BitbucketHelper(module)
|
||||
|
||||
state = module.params['state']
|
||||
public_key = module.params['public_key']
|
||||
private_key = module.params['private_key']
|
||||
|
||||
# Check parameters
|
||||
if ((public_key is None) or (private_key is None)) and (state == 'present'):
|
||||
module.fail_json(msg=error_messages['required_keys'])
|
||||
|
||||
# Retrieve access token for authorized API requests
|
||||
bitbucket.fetch_access_token()
|
||||
|
||||
# Retrieve existing ssh key
|
||||
key_pair = get_existing_ssh_key_pair(module, bitbucket)
|
||||
changed = False
|
||||
|
||||
# Create or update key pair
|
||||
if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'):
|
||||
if not module.check_mode:
|
||||
update_ssh_key_pair(module, bitbucket)
|
||||
changed = True
|
||||
|
||||
# Delete key pair
|
||||
elif key_pair and (state == 'absent'):
|
||||
if not module.check_mode:
|
||||
delete_ssh_key_pair(module, bitbucket)
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,298 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: bitbucket_pipeline_known_host
|
||||
short_description: Manages Bitbucket pipeline known hosts
|
||||
description:
|
||||
- Manages Bitbucket pipeline known hosts under the "SSH Keys" menu.
|
||||
- The host fingerprint will be retrieved automatically, but in case of an error, one can use I(key) field to specify it manually.
|
||||
author:
|
||||
- Evgeniy Krysanov (@catcombo)
|
||||
extends_documentation_fragment:
|
||||
- community.general.bitbucket
|
||||
requirements:
|
||||
- paramiko
|
||||
options:
|
||||
repository:
|
||||
description:
|
||||
- The repository name.
|
||||
type: str
|
||||
required: true
|
||||
workspace:
|
||||
description:
|
||||
- The repository owner.
|
||||
- I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user).
|
||||
type: str
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- The FQDN of the known host.
|
||||
type: str
|
||||
required: true
|
||||
key:
|
||||
description:
|
||||
- The public key.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Indicates desired state of the record.
|
||||
type: str
|
||||
required: true
|
||||
choices: [ absent, present ]
|
||||
notes:
|
||||
- Check mode is supported.
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create known hosts from the list
|
||||
community.general.bitbucket_pipeline_known_host:
|
||||
repository: 'bitbucket-repo'
|
||||
workspace: bitbucket_workspace
|
||||
name: '{{ item }}'
|
||||
state: present
|
||||
with_items:
|
||||
- bitbucket.org
|
||||
- example.com
|
||||
|
||||
- name: Remove known host
|
||||
community.general.bitbucket_pipeline_known_host:
|
||||
repository: bitbucket-repo
|
||||
workspace: bitbucket_workspace
|
||||
name: bitbucket.org
|
||||
state: absent
|
||||
|
||||
- name: Specify public key file
|
||||
community.general.bitbucket_pipeline_known_host:
|
||||
repository: bitbucket-repo
|
||||
workspace: bitbucket_workspace
|
||||
name: bitbucket.org
|
||||
key: '{{lookup("file", "bitbucket.pub") }}'
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r''' # '''
|
||||
|
||||
import socket
|
||||
|
||||
try:
|
||||
import paramiko
|
||||
HAS_PARAMIKO = True
|
||||
except ImportError:
|
||||
HAS_PARAMIKO = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
|
||||
|
||||
error_messages = {
|
||||
'invalid_params': 'Account or repository was not found',
|
||||
'unknown_key_type': 'Public key type is unknown',
|
||||
}
|
||||
|
||||
BITBUCKET_API_ENDPOINTS = {
|
||||
'known-host-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/' % BitbucketHelper.BITBUCKET_API_URL,
|
||||
'known-host-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/{known_host_uuid}' % BitbucketHelper.BITBUCKET_API_URL,
|
||||
}
|
||||
|
||||
|
||||
def get_existing_known_host(module, bitbucket):
|
||||
"""
|
||||
Search for a host in Bitbucket pipelines known hosts
|
||||
with the name specified in module param `name`
|
||||
|
||||
:param module: instance of the :class:`AnsibleModule`
|
||||
:param bitbucket: instance of the :class:`BitbucketHelper`
|
||||
:return: existing host or None if not found
|
||||
:rtype: dict or None
|
||||
|
||||
Return example::
|
||||
|
||||
{
|
||||
'type': 'pipeline_known_host',
|
||||
'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}'
|
||||
'hostname': 'bitbucket.org',
|
||||
'public_key': {
|
||||
'type': 'pipeline_ssh_public_key',
|
||||
'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
|
||||
'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
|
||||
'key_type': 'ssh-rsa',
|
||||
'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
|
||||
},
|
||||
}
|
||||
"""
|
||||
content = {
|
||||
'next': BITBUCKET_API_ENDPOINTS['known-host-list'].format(
|
||||
workspace=module.params['workspace'],
|
||||
repo_slug=module.params['repository'],
|
||||
)
|
||||
}
|
||||
|
||||
# Look through all response pages in search of hostname we need
|
||||
while 'next' in content:
|
||||
info, content = bitbucket.request(
|
||||
api_url=content['next'],
|
||||
method='GET',
|
||||
)
|
||||
|
||||
if info['status'] == 404:
|
||||
module.fail_json(msg='Invalid `repository` or `workspace`.')
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg='Failed to retrieve list of known hosts: {0}'.format(info))
|
||||
|
||||
host = next(filter(lambda v: v['hostname'] == module.params['name'], content['values']), None)
|
||||
|
||||
if host is not None:
|
||||
return host
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_host_key(module, hostname):
|
||||
"""
|
||||
Fetches public key for specified host
|
||||
|
||||
:param module: instance of the :class:`AnsibleModule`
|
||||
:param hostname: host name
|
||||
:return: key type and key content
|
||||
:rtype: tuple
|
||||
|
||||
Return example::
|
||||
|
||||
(
|
||||
'ssh-rsa',
|
||||
'AAAAB3NzaC1yc2EAAAABIwAAA...SBne8+seeFVBoGqzHM9yXw==',
|
||||
)
|
||||
"""
|
||||
try:
|
||||
sock = socket.socket()
|
||||
sock.connect((hostname, 22))
|
||||
except socket.error:
|
||||
module.fail_json(msg='Error opening socket to {0}'.format(hostname))
|
||||
|
||||
try:
|
||||
trans = paramiko.transport.Transport(sock)
|
||||
trans.start_client()
|
||||
host_key = trans.get_remote_server_key()
|
||||
except paramiko.SSHException:
|
||||
module.fail_json(msg='SSH error on retrieving {0} server key'.format(hostname))
|
||||
|
||||
trans.close()
|
||||
sock.close()
|
||||
|
||||
key_type = host_key.get_name()
|
||||
key = host_key.get_base64()
|
||||
|
||||
return key_type, key
|
||||
|
||||
|
||||
def create_known_host(module, bitbucket):
|
||||
hostname = module.params['name']
|
||||
key_param = module.params['key']
|
||||
|
||||
if key_param is None:
|
||||
key_type, key = get_host_key(module, hostname)
|
||||
elif ' ' in key_param:
|
||||
key_type, key = key_param.split(' ', 1)
|
||||
else:
|
||||
module.fail_json(msg=error_messages['unknown_key_type'])
|
||||
|
||||
info, content = bitbucket.request(
|
||||
api_url=BITBUCKET_API_ENDPOINTS['known-host-list'].format(
|
||||
workspace=module.params['workspace'],
|
||||
repo_slug=module.params['repository'],
|
||||
),
|
||||
method='POST',
|
||||
data={
|
||||
'hostname': hostname,
|
||||
'public_key': {
|
||||
'key_type': key_type,
|
||||
'key': key,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
if info['status'] == 404:
|
||||
module.fail_json(msg=error_messages['invalid_params'])
|
||||
|
||||
if info['status'] != 201:
|
||||
module.fail_json(msg='Failed to create known host `{hostname}`: {info}'.format(
|
||||
hostname=module.params['hostname'],
|
||||
info=info,
|
||||
))
|
||||
|
||||
|
||||
def delete_known_host(module, bitbucket, known_host_uuid):
|
||||
info, content = bitbucket.request(
|
||||
api_url=BITBUCKET_API_ENDPOINTS['known-host-detail'].format(
|
||||
workspace=module.params['workspace'],
|
||||
repo_slug=module.params['repository'],
|
||||
known_host_uuid=known_host_uuid,
|
||||
),
|
||||
method='DELETE',
|
||||
)
|
||||
|
||||
if info['status'] == 404:
|
||||
module.fail_json(msg=error_messages['invalid_params'])
|
||||
|
||||
if info['status'] != 204:
|
||||
module.fail_json(msg='Failed to delete known host `{hostname}`: {info}'.format(
|
||||
hostname=module.params['name'],
|
||||
info=info,
|
||||
))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = BitbucketHelper.bitbucket_argument_spec()
|
||||
argument_spec.update(
|
||||
repository=dict(type='str', required=True),
|
||||
workspace=dict(type='str', required=True),
|
||||
name=dict(type='str', required=True),
|
||||
key=dict(type='str', no_log=False),
|
||||
state=dict(type='str', choices=['present', 'absent'], required=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=BitbucketHelper.bitbucket_required_one_of(),
|
||||
required_together=BitbucketHelper.bitbucket_required_together(),
|
||||
)
|
||||
|
||||
if (module.params['key'] is None) and (not HAS_PARAMIKO):
|
||||
module.fail_json(msg='`paramiko` package not found, please install it.')
|
||||
|
||||
bitbucket = BitbucketHelper(module)
|
||||
|
||||
# Retrieve access token for authorized API requests
|
||||
bitbucket.fetch_access_token()
|
||||
|
||||
# Retrieve existing known host
|
||||
existing_host = get_existing_known_host(module, bitbucket)
|
||||
state = module.params['state']
|
||||
changed = False
|
||||
|
||||
# Create new host in case it doesn't exists
|
||||
if not existing_host and (state == 'present'):
|
||||
if not module.check_mode:
|
||||
create_known_host(module, bitbucket)
|
||||
changed = True
|
||||
|
||||
# Delete host
|
||||
elif existing_host and (state == 'absent'):
|
||||
if not module.check_mode:
|
||||
delete_known_host(module, bitbucket, existing_host['uuid'])
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,270 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: bitbucket_pipeline_variable
|
||||
short_description: Manages Bitbucket pipeline variables
|
||||
description:
|
||||
- Manages Bitbucket pipeline variables.
|
||||
author:
|
||||
- Evgeniy Krysanov (@catcombo)
|
||||
extends_documentation_fragment:
|
||||
- community.general.bitbucket
|
||||
options:
|
||||
repository:
|
||||
description:
|
||||
- The repository name.
|
||||
type: str
|
||||
required: true
|
||||
workspace:
|
||||
description:
|
||||
- The repository owner.
|
||||
- I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user).
|
||||
type: str
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- The pipeline variable name.
|
||||
type: str
|
||||
required: true
|
||||
value:
|
||||
description:
|
||||
- The pipeline variable value.
|
||||
type: str
|
||||
secured:
|
||||
description:
|
||||
- Whether to encrypt the variable value.
|
||||
type: bool
|
||||
default: false
|
||||
state:
|
||||
description:
|
||||
- Indicates desired state of the variable.
|
||||
type: str
|
||||
required: true
|
||||
choices: [ absent, present ]
|
||||
notes:
|
||||
- Check mode is supported.
|
||||
- For secured values return parameter C(changed) is always C(True).
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create or update pipeline variables from the list
|
||||
community.general.bitbucket_pipeline_variable:
|
||||
repository: 'bitbucket-repo'
|
||||
workspace: bitbucket_workspace
|
||||
name: '{{ item.name }}'
|
||||
value: '{{ item.value }}'
|
||||
secured: '{{ item.secured }}'
|
||||
state: present
|
||||
with_items:
|
||||
- { name: AWS_ACCESS_KEY, value: ABCD1234, secured: false }
|
||||
- { name: AWS_SECRET, value: qwe789poi123vbn0, secured: true }
|
||||
|
||||
- name: Remove pipeline variable
|
||||
community.general.bitbucket_pipeline_variable:
|
||||
repository: bitbucket-repo
|
||||
workspace: bitbucket_workspace
|
||||
name: AWS_ACCESS_KEY
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r''' # '''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, _load_params
|
||||
from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
|
||||
|
||||
error_messages = {
|
||||
'required_value': '`value` is required when the `state` is `present`',
|
||||
}
|
||||
|
||||
BITBUCKET_API_ENDPOINTS = {
|
||||
'pipeline-variable-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/' % BitbucketHelper.BITBUCKET_API_URL,
|
||||
'pipeline-variable-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/{variable_uuid}' % BitbucketHelper.BITBUCKET_API_URL,
|
||||
}
|
||||
|
||||
|
||||
def get_existing_pipeline_variable(module, bitbucket):
|
||||
"""
|
||||
Search for a pipeline variable
|
||||
|
||||
:param module: instance of the :class:`AnsibleModule`
|
||||
:param bitbucket: instance of the :class:`BitbucketHelper`
|
||||
:return: existing variable or None if not found
|
||||
:rtype: dict or None
|
||||
|
||||
Return example::
|
||||
|
||||
{
|
||||
'name': 'AWS_ACCESS_OBKEY_ID',
|
||||
'value': 'x7HU80-a2',
|
||||
'type': 'pipeline_variable',
|
||||
'secured': False,
|
||||
'uuid': '{9ddb0507-439a-495a-99f3-5464f15128127}'
|
||||
}
|
||||
|
||||
The `value` key in dict is absent in case of secured variable.
|
||||
"""
|
||||
variables_base_url = BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format(
|
||||
workspace=module.params['workspace'],
|
||||
repo_slug=module.params['repository'],
|
||||
)
|
||||
# Look through the all response pages in search of variable we need
|
||||
page = 1
|
||||
while True:
|
||||
next_url = "%s?page=%s" % (variables_base_url, page)
|
||||
info, content = bitbucket.request(
|
||||
api_url=next_url,
|
||||
method='GET',
|
||||
)
|
||||
|
||||
if info['status'] == 404:
|
||||
module.fail_json(msg='Invalid `repository` or `workspace`.')
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg='Failed to retrieve the list of pipeline variables: {0}'.format(info))
|
||||
|
||||
# We are at the end of list
|
||||
if 'pagelen' in content and content['pagelen'] == 0:
|
||||
return None
|
||||
|
||||
page += 1
|
||||
var = next(filter(lambda v: v['key'] == module.params['name'], content['values']), None)
|
||||
|
||||
if var is not None:
|
||||
var['name'] = var.pop('key')
|
||||
return var
|
||||
|
||||
|
||||
def create_pipeline_variable(module, bitbucket):
|
||||
info, content = bitbucket.request(
|
||||
api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format(
|
||||
workspace=module.params['workspace'],
|
||||
repo_slug=module.params['repository'],
|
||||
),
|
||||
method='POST',
|
||||
data={
|
||||
'key': module.params['name'],
|
||||
'value': module.params['value'],
|
||||
'secured': module.params['secured'],
|
||||
},
|
||||
)
|
||||
|
||||
if info['status'] != 201:
|
||||
module.fail_json(msg='Failed to create pipeline variable `{name}`: {info}'.format(
|
||||
name=module.params['name'],
|
||||
info=info,
|
||||
))
|
||||
|
||||
|
||||
def update_pipeline_variable(module, bitbucket, variable_uuid):
|
||||
info, content = bitbucket.request(
|
||||
api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format(
|
||||
workspace=module.params['workspace'],
|
||||
repo_slug=module.params['repository'],
|
||||
variable_uuid=variable_uuid,
|
||||
),
|
||||
method='PUT',
|
||||
data={
|
||||
'value': module.params['value'],
|
||||
'secured': module.params['secured'],
|
||||
},
|
||||
)
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg='Failed to update pipeline variable `{name}`: {info}'.format(
|
||||
name=module.params['name'],
|
||||
info=info,
|
||||
))
|
||||
|
||||
|
||||
def delete_pipeline_variable(module, bitbucket, variable_uuid):
|
||||
info, content = bitbucket.request(
|
||||
api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format(
|
||||
workspace=module.params['workspace'],
|
||||
repo_slug=module.params['repository'],
|
||||
variable_uuid=variable_uuid,
|
||||
),
|
||||
method='DELETE',
|
||||
)
|
||||
|
||||
if info['status'] != 204:
|
||||
module.fail_json(msg='Failed to delete pipeline variable `{name}`: {info}'.format(
|
||||
name=module.params['name'],
|
||||
info=info,
|
||||
))
|
||||
|
||||
|
||||
class BitBucketPipelineVariable(AnsibleModule):
|
||||
def __init__(self, *args, **kwargs):
|
||||
params = _load_params() or {}
|
||||
if params.get('secured'):
|
||||
kwargs['argument_spec']['value'].update({'no_log': True})
|
||||
super(BitBucketPipelineVariable, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = BitbucketHelper.bitbucket_argument_spec()
|
||||
argument_spec.update(
|
||||
repository=dict(type='str', required=True),
|
||||
workspace=dict(type='str', required=True),
|
||||
name=dict(type='str', required=True),
|
||||
value=dict(type='str'),
|
||||
secured=dict(type='bool', default=False),
|
||||
state=dict(type='str', choices=['present', 'absent'], required=True),
|
||||
)
|
||||
module = BitBucketPipelineVariable(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_one_of=BitbucketHelper.bitbucket_required_one_of(),
|
||||
required_together=BitbucketHelper.bitbucket_required_together(),
|
||||
)
|
||||
|
||||
bitbucket = BitbucketHelper(module)
|
||||
|
||||
value = module.params['value']
|
||||
state = module.params['state']
|
||||
secured = module.params['secured']
|
||||
|
||||
# Check parameters
|
||||
if (value is None) and (state == 'present'):
|
||||
module.fail_json(msg=error_messages['required_value'])
|
||||
|
||||
# Retrieve access token for authorized API requests
|
||||
bitbucket.fetch_access_token()
|
||||
|
||||
# Retrieve existing pipeline variable (if any)
|
||||
existing_variable = get_existing_pipeline_variable(module, bitbucket)
|
||||
changed = False
|
||||
|
||||
# Create new variable in case it doesn't exists
|
||||
if not existing_variable and (state == 'present'):
|
||||
if not module.check_mode:
|
||||
create_pipeline_variable(module, bitbucket)
|
||||
changed = True
|
||||
|
||||
# Update variable if it is secured or the old value does not match the new one
|
||||
elif existing_variable and (state == 'present'):
|
||||
if (existing_variable['secured'] != secured) or (existing_variable.get('value') != value):
|
||||
if not module.check_mode:
|
||||
update_pipeline_variable(module, bitbucket, existing_variable['uuid'])
|
||||
changed = True
|
||||
|
||||
# Delete variable
|
||||
elif existing_variable and (state == 'absent'):
|
||||
if not module.check_mode:
|
||||
delete_pipeline_variable(module, bitbucket, existing_variable['uuid'])
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,229 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2014, Michael Warkentin <mwarkentin@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: bower
|
||||
short_description: Manage bower packages with bower
|
||||
description:
|
||||
- Manage bower packages with bower
|
||||
author: "Michael Warkentin (@mwarkentin)"
|
||||
options:
|
||||
name:
|
||||
type: str
|
||||
description:
|
||||
- The name of a bower package to install
|
||||
offline:
|
||||
description:
|
||||
- Install packages from local cache, if the packages were installed before
|
||||
type: bool
|
||||
default: false
|
||||
production:
|
||||
description:
|
||||
- Install with --production flag
|
||||
type: bool
|
||||
default: false
|
||||
path:
|
||||
type: path
|
||||
description:
|
||||
- The base path where to install the bower packages
|
||||
required: true
|
||||
relative_execpath:
|
||||
type: path
|
||||
description:
|
||||
- Relative path to bower executable from install path
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- The state of the bower package
|
||||
default: present
|
||||
choices: [ "present", "absent", "latest" ]
|
||||
version:
|
||||
type: str
|
||||
description:
|
||||
- The version to be installed
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install "bootstrap" bower package.
|
||||
community.general.bower:
|
||||
name: bootstrap
|
||||
|
||||
- name: Install "bootstrap" bower package on version 3.1.1.
|
||||
community.general.bower:
|
||||
name: bootstrap
|
||||
version: '3.1.1'
|
||||
|
||||
- name: Remove the "bootstrap" bower package.
|
||||
community.general.bower:
|
||||
name: bootstrap
|
||||
state: absent
|
||||
|
||||
- name: Install packages based on bower.json.
|
||||
community.general.bower:
|
||||
path: /app/location
|
||||
|
||||
- name: Update packages based on bower.json to their latest version.
|
||||
community.general.bower:
|
||||
path: /app/location
|
||||
state: latest
|
||||
|
||||
# install bower locally and run from there
|
||||
- npm:
|
||||
path: /app/location
|
||||
name: bower
|
||||
global: false
|
||||
- community.general.bower:
|
||||
path: /app/location
|
||||
relative_execpath: node_modules/.bin
|
||||
'''
|
||||
import json
|
||||
import os
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class Bower(object):
|
||||
def __init__(self, module, **kwargs):
|
||||
self.module = module
|
||||
self.name = kwargs['name']
|
||||
self.offline = kwargs['offline']
|
||||
self.production = kwargs['production']
|
||||
self.path = kwargs['path']
|
||||
self.relative_execpath = kwargs['relative_execpath']
|
||||
self.version = kwargs['version']
|
||||
|
||||
if kwargs['version']:
|
||||
self.name_version = self.name + '#' + self.version
|
||||
else:
|
||||
self.name_version = self.name
|
||||
|
||||
def _exec(self, args, run_in_check_mode=False, check_rc=True):
|
||||
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
|
||||
cmd = []
|
||||
|
||||
if self.relative_execpath:
|
||||
cmd.append(os.path.join(self.path, self.relative_execpath, "bower"))
|
||||
if not os.path.isfile(cmd[-1]):
|
||||
self.module.fail_json(msg="bower not found at relative path %s" % self.relative_execpath)
|
||||
else:
|
||||
cmd.append("bower")
|
||||
|
||||
cmd.extend(args)
|
||||
cmd.extend(['--config.interactive=false', '--allow-root'])
|
||||
|
||||
if self.name:
|
||||
cmd.append(self.name_version)
|
||||
|
||||
if self.offline:
|
||||
cmd.append('--offline')
|
||||
|
||||
if self.production:
|
||||
cmd.append('--production')
|
||||
|
||||
# If path is specified, cd into that path and run the command.
|
||||
cwd = None
|
||||
if self.path:
|
||||
if not os.path.exists(self.path):
|
||||
os.makedirs(self.path)
|
||||
if not os.path.isdir(self.path):
|
||||
self.module.fail_json(msg="path %s is not a directory" % self.path)
|
||||
cwd = self.path
|
||||
|
||||
rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
|
||||
return out
|
||||
return ''
|
||||
|
||||
def list(self):
|
||||
cmd = ['list', '--json']
|
||||
|
||||
installed = list()
|
||||
missing = list()
|
||||
outdated = list()
|
||||
data = json.loads(self._exec(cmd, True, False))
|
||||
if 'dependencies' in data:
|
||||
for dep in data['dependencies']:
|
||||
dep_data = data['dependencies'][dep]
|
||||
if dep_data.get('missing', False):
|
||||
missing.append(dep)
|
||||
elif ('version' in dep_data['pkgMeta'] and
|
||||
'update' in dep_data and
|
||||
dep_data['pkgMeta']['version'] != dep_data['update']['latest']):
|
||||
outdated.append(dep)
|
||||
elif dep_data.get('incompatible', False):
|
||||
outdated.append(dep)
|
||||
else:
|
||||
installed.append(dep)
|
||||
# Named dependency not installed
|
||||
else:
|
||||
missing.append(self.name)
|
||||
|
||||
return installed, missing, outdated
|
||||
|
||||
def install(self):
|
||||
return self._exec(['install'])
|
||||
|
||||
def update(self):
|
||||
return self._exec(['update'])
|
||||
|
||||
def uninstall(self):
|
||||
return self._exec(['uninstall'])
|
||||
|
||||
|
||||
def main():
|
||||
arg_spec = dict(
|
||||
name=dict(default=None),
|
||||
offline=dict(default=False, type='bool'),
|
||||
production=dict(default=False, type='bool'),
|
||||
path=dict(required=True, type='path'),
|
||||
relative_execpath=dict(default=None, required=False, type='path'),
|
||||
state=dict(default='present', choices=['present', 'absent', 'latest', ]),
|
||||
version=dict(default=None),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=arg_spec
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
offline = module.params['offline']
|
||||
production = module.params['production']
|
||||
path = module.params['path']
|
||||
relative_execpath = module.params['relative_execpath']
|
||||
state = module.params['state']
|
||||
version = module.params['version']
|
||||
|
||||
if state == 'absent' and not name:
|
||||
module.fail_json(msg='uninstalling a package is only available for named packages')
|
||||
|
||||
bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version)
|
||||
|
||||
changed = False
|
||||
if state == 'present':
|
||||
installed, missing, outdated = bower.list()
|
||||
if missing:
|
||||
changed = True
|
||||
bower.install()
|
||||
elif state == 'latest':
|
||||
installed, missing, outdated = bower.list()
|
||||
if missing or outdated:
|
||||
changed = True
|
||||
bower.update()
|
||||
else: # Absent
|
||||
installed, missing, outdated = bower.list()
|
||||
if name in installed:
|
||||
changed = True
|
||||
bower.uninstall()
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,204 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2015, Tim Hoiberg <tim.hoiberg@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: bundler
|
||||
short_description: Manage Ruby Gem dependencies with Bundler
|
||||
description:
|
||||
- Manage installation and Gem version dependencies for Ruby using the Bundler gem
|
||||
options:
|
||||
executable:
|
||||
type: str
|
||||
description:
|
||||
- The path to the bundler executable
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version
|
||||
choices: [present, latest]
|
||||
default: present
|
||||
chdir:
|
||||
type: path
|
||||
description:
|
||||
- The directory to execute the bundler commands from. This directory
|
||||
needs to contain a valid Gemfile or .bundle/ directory
|
||||
- If not specified, it will default to the temporary working directory
|
||||
exclude_groups:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- A list of Gemfile groups to exclude during operations. This only
|
||||
applies when state is C(present). Bundler considers this
|
||||
a 'remembered' property for the Gemfile and will automatically exclude
|
||||
groups in future operations even if C(exclude_groups) is not set
|
||||
clean:
|
||||
description:
|
||||
- Only applies if state is C(present). If set removes any gems on the
|
||||
target host that are not in the gemfile
|
||||
type: bool
|
||||
default: false
|
||||
gemfile:
|
||||
type: path
|
||||
description:
|
||||
- Only applies if state is C(present). The path to the gemfile to use to install gems.
|
||||
- If not specified it will default to the Gemfile in current directory
|
||||
local:
|
||||
description:
|
||||
- If set only installs gems from the cache on the target host
|
||||
type: bool
|
||||
default: false
|
||||
deployment_mode:
|
||||
description:
|
||||
- Only applies if state is C(present). If set it will install gems in
|
||||
./vendor/bundle instead of the default location. Requires a Gemfile.lock
|
||||
file to have been created prior
|
||||
type: bool
|
||||
default: false
|
||||
user_install:
|
||||
description:
|
||||
- Only applies if state is C(present). Installs gems in the local user's cache or for all users
|
||||
type: bool
|
||||
default: true
|
||||
gem_path:
|
||||
type: path
|
||||
description:
|
||||
- Only applies if state is C(present). Specifies the directory to
|
||||
install the gems into. If C(chdir) is set then this path is relative to
|
||||
C(chdir)
|
||||
- If not specified the default RubyGems gem paths will be used.
|
||||
binstub_directory:
|
||||
type: path
|
||||
description:
|
||||
- Only applies if state is C(present). Specifies the directory to
|
||||
install any gem bins files to. When executed the bin files will run
|
||||
within the context of the Gemfile and fail if any required gem
|
||||
dependencies are not installed. If C(chdir) is set then this path is
|
||||
relative to C(chdir)
|
||||
extra_args:
|
||||
type: str
|
||||
description:
|
||||
- A space separated string of additional commands that can be applied to
|
||||
the Bundler command. Refer to the Bundler documentation for more
|
||||
information
|
||||
author: "Tim Hoiberg (@thoiberg)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install gems from a Gemfile in the current directory
|
||||
community.general.bundler:
|
||||
state: present
|
||||
executable: ~/.rvm/gems/2.1.5/bin/bundle
|
||||
|
||||
- name: Exclude the production group from installing
|
||||
community.general.bundler:
|
||||
state: present
|
||||
exclude_groups: production
|
||||
|
||||
- name: Install gems into ./vendor/bundle
|
||||
community.general.bundler:
|
||||
state: present
|
||||
deployment_mode: true
|
||||
|
||||
- name: Install gems using a Gemfile in another directory
|
||||
community.general.bundler:
|
||||
state: present
|
||||
gemfile: ../rails_project/Gemfile
|
||||
|
||||
- name: Update Gemfile in another directory
|
||||
community.general.bundler:
|
||||
state: latest
|
||||
chdir: ~/rails_project
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def get_bundler_executable(module):
|
||||
if module.params.get('executable'):
|
||||
result = module.params.get('executable').split(' ')
|
||||
else:
|
||||
result = [module.get_bin_path('bundle', True)]
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
executable=dict(default=None, required=False),
|
||||
state=dict(default='present', required=False, choices=['present', 'latest']),
|
||||
chdir=dict(default=None, required=False, type='path'),
|
||||
exclude_groups=dict(default=None, required=False, type='list', elements='str'),
|
||||
clean=dict(default=False, required=False, type='bool'),
|
||||
gemfile=dict(default=None, required=False, type='path'),
|
||||
local=dict(default=False, required=False, type='bool'),
|
||||
deployment_mode=dict(default=False, required=False, type='bool'),
|
||||
user_install=dict(default=True, required=False, type='bool'),
|
||||
gem_path=dict(default=None, required=False, type='path'),
|
||||
binstub_directory=dict(default=None, required=False, type='path'),
|
||||
extra_args=dict(default=None, required=False),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
state = module.params.get('state')
|
||||
chdir = module.params.get('chdir')
|
||||
exclude_groups = module.params.get('exclude_groups')
|
||||
clean = module.params.get('clean')
|
||||
gemfile = module.params.get('gemfile')
|
||||
local = module.params.get('local')
|
||||
deployment_mode = module.params.get('deployment_mode')
|
||||
user_install = module.params.get('user_install')
|
||||
gem_path = module.params.get('gem_path')
|
||||
binstub_directory = module.params.get('binstub_directory')
|
||||
extra_args = module.params.get('extra_args')
|
||||
|
||||
cmd = get_bundler_executable(module)
|
||||
|
||||
if module.check_mode:
|
||||
cmd.append('check')
|
||||
rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False)
|
||||
|
||||
module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err)
|
||||
|
||||
if state == 'present':
|
||||
cmd.append('install')
|
||||
if exclude_groups:
|
||||
cmd.extend(['--without', ':'.join(exclude_groups)])
|
||||
if clean:
|
||||
cmd.append('--clean')
|
||||
if gemfile:
|
||||
cmd.extend(['--gemfile', gemfile])
|
||||
if local:
|
||||
cmd.append('--local')
|
||||
if deployment_mode:
|
||||
cmd.append('--deployment')
|
||||
if not user_install:
|
||||
cmd.append('--system')
|
||||
if gem_path:
|
||||
cmd.extend(['--path', gem_path])
|
||||
if binstub_directory:
|
||||
cmd.extend(['--binstubs', binstub_directory])
|
||||
else:
|
||||
cmd.append('update')
|
||||
if local:
|
||||
cmd.append('--local')
|
||||
|
||||
if extra_args:
|
||||
cmd.extend(extra_args.split(' '))
|
||||
|
||||
rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True)
|
||||
|
||||
module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,195 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2013, André Paramés <git@andreparames.com>
|
||||
# Based on the Git module by Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: bzr
|
||||
author:
|
||||
- André Paramés (@andreparames)
|
||||
short_description: Deploy software (or files) from bzr branches
|
||||
description:
|
||||
- Manage I(bzr) branches to deploy files or software.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- SSH or HTTP protocol address of the parent branch.
|
||||
aliases: [ parent ]
|
||||
required: true
|
||||
type: str
|
||||
dest:
|
||||
description:
|
||||
- Absolute path of where the branch should be cloned to.
|
||||
required: true
|
||||
type: path
|
||||
version:
|
||||
description:
|
||||
- What version of the branch to clone. This can be the
|
||||
bzr revno or revid.
|
||||
default: head
|
||||
type: str
|
||||
force:
|
||||
description:
|
||||
- If C(true), any modified files in the working
|
||||
tree will be discarded. Before 1.9 the default
|
||||
value was C(true).
|
||||
type: bool
|
||||
default: false
|
||||
executable:
|
||||
description:
|
||||
- Path to bzr executable to use. If not supplied,
|
||||
the normal mechanism for resolving binary paths will be used.
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Checkout
|
||||
community.general.bzr:
|
||||
name: bzr+ssh://foosball.example.org/path/to/branch
|
||||
dest: /srv/checkout
|
||||
version: 22
|
||||
'''
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class Bzr(object):
|
||||
def __init__(self, module, parent, dest, version, bzr_path):
|
||||
self.module = module
|
||||
self.parent = parent
|
||||
self.dest = dest
|
||||
self.version = version
|
||||
self.bzr_path = bzr_path
|
||||
|
||||
def _command(self, args_list, cwd=None, **kwargs):
|
||||
(rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
|
||||
return (rc, out, err)
|
||||
|
||||
def get_version(self):
|
||||
'''samples the version of the bzr branch'''
|
||||
|
||||
cmd = "%s revno" % self.bzr_path
|
||||
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
|
||||
revno = stdout.strip()
|
||||
return revno
|
||||
|
||||
def clone(self):
|
||||
'''makes a new bzr branch if it does not already exist'''
|
||||
dest_dirname = os.path.dirname(self.dest)
|
||||
try:
|
||||
os.makedirs(dest_dirname)
|
||||
except Exception:
|
||||
pass
|
||||
if self.version.lower() != 'head':
|
||||
args_list = ["branch", "-r", self.version, self.parent, self.dest]
|
||||
else:
|
||||
args_list = ["branch", self.parent, self.dest]
|
||||
return self._command(args_list, check_rc=True, cwd=dest_dirname)
|
||||
|
||||
def has_local_mods(self):
|
||||
|
||||
cmd = "%s status -S" % self.bzr_path
|
||||
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
|
||||
lines = stdout.splitlines()
|
||||
|
||||
lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
|
||||
return len(lines) > 0
|
||||
|
||||
def reset(self, force):
|
||||
'''
|
||||
Resets the index and working tree to head.
|
||||
Discards any changes to tracked files in the working
|
||||
tree since that commit.
|
||||
'''
|
||||
if not force and self.has_local_mods():
|
||||
self.module.fail_json(msg="Local modifications exist in branch (force=false).")
|
||||
return self._command(["revert"], check_rc=True, cwd=self.dest)
|
||||
|
||||
def fetch(self):
|
||||
'''updates branch from remote sources'''
|
||||
if self.version.lower() != 'head':
|
||||
(rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
|
||||
else:
|
||||
(rc, out, err) = self._command(["pull"], cwd=self.dest)
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg="Failed to pull")
|
||||
return (rc, out, err)
|
||||
|
||||
def switch_version(self):
|
||||
'''once pulled, switch to a particular revno or revid'''
|
||||
if self.version.lower() != 'head':
|
||||
args_list = ["revert", "-r", self.version]
|
||||
else:
|
||||
args_list = ["revert"]
|
||||
return self._command(args_list, check_rc=True, cwd=self.dest)
|
||||
|
||||
|
||||
# ===========================================
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
dest=dict(type='path', required=True),
|
||||
name=dict(type='str', required=True, aliases=['parent']),
|
||||
version=dict(type='str', default='head'),
|
||||
force=dict(type='bool', default=False),
|
||||
executable=dict(type='str'),
|
||||
)
|
||||
)
|
||||
|
||||
dest = module.params['dest']
|
||||
parent = module.params['name']
|
||||
version = module.params['version']
|
||||
force = module.params['force']
|
||||
bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
|
||||
|
||||
bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
|
||||
|
||||
rc, out, err = (0, None, None)
|
||||
|
||||
bzr = Bzr(module, parent, dest, version, bzr_path)
|
||||
|
||||
# if there is no bzr configuration, do a branch operation
|
||||
# else pull and switch the version
|
||||
before = None
|
||||
local_mods = False
|
||||
if not os.path.exists(bzrconfig):
|
||||
(rc, out, err) = bzr.clone()
|
||||
|
||||
else:
|
||||
# else do a pull
|
||||
local_mods = bzr.has_local_mods()
|
||||
before = bzr.get_version()
|
||||
(rc, out, err) = bzr.reset(force)
|
||||
if rc != 0:
|
||||
module.fail_json(msg=err)
|
||||
(rc, out, err) = bzr.fetch()
|
||||
if rc != 0:
|
||||
module.fail_json(msg=err)
|
||||
|
||||
# switch to version specified regardless of whether
|
||||
# we cloned or pulled
|
||||
(rc, out, err) = bzr.switch_version()
|
||||
|
||||
# determine if we changed anything
|
||||
after = bzr.get_version()
|
||||
changed = False
|
||||
|
||||
if before != after or local_mods:
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed, before=before, after=after)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,155 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: campfire
|
||||
short_description: Send a message to Campfire
|
||||
description:
|
||||
- Send a message to Campfire.
|
||||
- Messages with newlines will result in a "Paste" message being sent.
|
||||
options:
|
||||
subscription:
|
||||
type: str
|
||||
description:
|
||||
- The subscription name to use.
|
||||
required: true
|
||||
token:
|
||||
type: str
|
||||
description:
|
||||
- API token.
|
||||
required: true
|
||||
room:
|
||||
type: str
|
||||
description:
|
||||
- Room number to which the message should be sent.
|
||||
required: true
|
||||
msg:
|
||||
type: str
|
||||
description:
|
||||
- The message body.
|
||||
required: true
|
||||
notify:
|
||||
type: str
|
||||
description:
|
||||
- Send a notification sound before the message.
|
||||
required: false
|
||||
choices: ["56k", "bell", "bezos", "bueller", "clowntown",
|
||||
"cottoneyejoe", "crickets", "dadgummit", "dangerzone",
|
||||
"danielsan", "deeper", "drama", "greatjob", "greyjoy",
|
||||
"guarantee", "heygirl", "horn", "horror",
|
||||
"inconceivable", "live", "loggins", "makeitso", "noooo",
|
||||
"nyan", "ohmy", "ohyeah", "pushit", "rimshot",
|
||||
"rollout", "rumble", "sax", "secret", "sexyback",
|
||||
"story", "tada", "tmyk", "trololo", "trombone", "unix",
|
||||
"vuvuzela", "what", "whoomp", "yeah", "yodel"]
|
||||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ ]
|
||||
author: "Adam Garside (@fabulops)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Send a message to Campfire
|
||||
community.general.campfire:
|
||||
subscription: foo
|
||||
token: 12345
|
||||
room: 123
|
||||
msg: Task completed.
|
||||
|
||||
- name: Send a message to Campfire
|
||||
community.general.campfire:
|
||||
subscription: foo
|
||||
token: 12345
|
||||
room: 123
|
||||
notify: loggins
|
||||
msg: Task completed ... with feeling.
|
||||
'''
|
||||
|
||||
try:
|
||||
from html import escape as html_escape
|
||||
except ImportError:
|
||||
# Python-3.2 or later
|
||||
import cgi
|
||||
|
||||
def html_escape(text, quote=True):
|
||||
return cgi.escape(text, quote)
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
subscription=dict(required=True),
|
||||
token=dict(required=True, no_log=True),
|
||||
room=dict(required=True),
|
||||
msg=dict(required=True),
|
||||
notify=dict(required=False,
|
||||
choices=["56k", "bell", "bezos", "bueller",
|
||||
"clowntown", "cottoneyejoe",
|
||||
"crickets", "dadgummit", "dangerzone",
|
||||
"danielsan", "deeper", "drama",
|
||||
"greatjob", "greyjoy", "guarantee",
|
||||
"heygirl", "horn", "horror",
|
||||
"inconceivable", "live", "loggins",
|
||||
"makeitso", "noooo", "nyan", "ohmy",
|
||||
"ohyeah", "pushit", "rimshot",
|
||||
"rollout", "rumble", "sax", "secret",
|
||||
"sexyback", "story", "tada", "tmyk",
|
||||
"trololo", "trombone", "unix",
|
||||
"vuvuzela", "what", "whoomp", "yeah",
|
||||
"yodel"]),
|
||||
),
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
subscription = module.params["subscription"]
|
||||
token = module.params["token"]
|
||||
room = module.params["room"]
|
||||
msg = module.params["msg"]
|
||||
notify = module.params["notify"]
|
||||
|
||||
URI = "https://%s.campfirenow.com" % subscription
|
||||
NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
|
||||
MSTR = "<message><body>%s</body></message>"
|
||||
AGENT = "Ansible/1.2"
|
||||
|
||||
# Hack to add basic auth username and password the way fetch_url expects
|
||||
module.params['url_username'] = token
|
||||
module.params['url_password'] = 'X'
|
||||
|
||||
target_url = '%s/room/%s/speak.xml' % (URI, room)
|
||||
headers = {'Content-Type': 'application/xml',
|
||||
'User-agent': AGENT}
|
||||
|
||||
# Send some audible notification if requested
|
||||
if notify:
|
||||
response, info = fetch_url(module, target_url, data=NSTR % html_escape(notify), headers=headers)
|
||||
if info['status'] not in [200, 201]:
|
||||
module.fail_json(msg="unable to send msg: '%s', campfire api"
|
||||
" returned error code: '%s'" %
|
||||
(notify, info['status']))
|
||||
|
||||
# Send the message
|
||||
response, info = fetch_url(module, target_url, data=MSTR % html_escape(msg), headers=headers)
|
||||
if info['status'] not in [200, 201]:
|
||||
module.fail_json(msg="unable to send msg: '%s', campfire api"
|
||||
" returned error code: '%s'" %
|
||||
(msg, info['status']))
|
||||
|
||||
module.exit_json(changed=True, room=room, msg=msg, notify=notify)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,181 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2014, Nate Coraor <nate@bx.psu.edu>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: capabilities
|
||||
short_description: Manage Linux capabilities
|
||||
description:
|
||||
- This module manipulates files privileges using the Linux capabilities(7) system.
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
- Specifies the path to the file to be managed.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ key ]
|
||||
capability:
|
||||
description:
|
||||
- Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ cap ]
|
||||
state:
|
||||
description:
|
||||
- Whether the entry should be present or absent in the file's capabilities.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
notes:
|
||||
- The capabilities system will automatically transform operators and flags into the effective set,
|
||||
so for example, C(cap_foo=ep) will probably become C(cap_foo+ep).
|
||||
- This module does not attempt to determine the final operator and flags to compare,
|
||||
so you will want to ensure that your capabilities argument matches the final capabilities.
|
||||
author:
|
||||
- Nate Coraor (@natefoo)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Set cap_sys_chroot+ep on /foo
|
||||
community.general.capabilities:
|
||||
path: /foo
|
||||
capability: cap_sys_chroot+ep
|
||||
state: present
|
||||
|
||||
- name: Remove cap_net_bind_service from /bar
|
||||
community.general.capabilities:
|
||||
path: /bar
|
||||
capability: cap_net_bind_service
|
||||
state: absent
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
OPS = ('=', '-', '+')
|
||||
|
||||
|
||||
class CapabilitiesModule(object):
|
||||
platform = 'Linux'
|
||||
distribution = None
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.path = module.params['path'].strip()
|
||||
self.capability = module.params['capability'].strip().lower()
|
||||
self.state = module.params['state']
|
||||
self.getcap_cmd = module.get_bin_path('getcap', required=True)
|
||||
self.setcap_cmd = module.get_bin_path('setcap', required=True)
|
||||
self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present')
|
||||
|
||||
self.run()
|
||||
|
||||
def run(self):
|
||||
|
||||
current = self.getcap(self.path)
|
||||
caps = [cap[0] for cap in current]
|
||||
|
||||
if self.state == 'present' and self.capability_tup not in current:
|
||||
# need to add capability
|
||||
if self.module.check_mode:
|
||||
self.module.exit_json(changed=True, msg='capabilities changed')
|
||||
else:
|
||||
# remove from current cap list if it's already set (but op/flags differ)
|
||||
current = list(filter(lambda x: x[0] != self.capability_tup[0], current))
|
||||
# add new cap with correct op/flags
|
||||
current.append(self.capability_tup)
|
||||
self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
|
||||
elif self.state == 'absent' and self.capability_tup[0] in caps:
|
||||
# need to remove capability
|
||||
if self.module.check_mode:
|
||||
self.module.exit_json(changed=True, msg='capabilities changed')
|
||||
else:
|
||||
# remove from current cap list and then set current list
|
||||
current = filter(lambda x: x[0] != self.capability_tup[0], current)
|
||||
self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
|
||||
self.module.exit_json(changed=False, state=self.state)
|
||||
|
||||
def getcap(self, path):
|
||||
rval = []
|
||||
cmd = "%s -v %s" % (self.getcap_cmd, path)
|
||||
rc, stdout, stderr = self.module.run_command(cmd)
|
||||
# If file xattrs are set but no caps are set the output will be:
|
||||
# '/foo ='
|
||||
# If file xattrs are unset the output will be:
|
||||
# '/foo'
|
||||
# If the file does not exist, the stderr will be (with rc == 0...):
|
||||
# '/foo (No such file or directory)'
|
||||
if rc != 0 or stderr != "":
|
||||
self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
|
||||
if stdout.strip() != path:
|
||||
if ' =' in stdout:
|
||||
# process output of an older version of libcap
|
||||
caps = stdout.split(' =')[1].strip().split()
|
||||
else:
|
||||
# otherwise, we have a newer version here
|
||||
# see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git
|
||||
caps = stdout.split()[1].strip().split()
|
||||
for cap in caps:
|
||||
cap = cap.lower()
|
||||
# getcap condenses capabilities with the same op/flags into a
|
||||
# comma-separated list, so we have to parse that
|
||||
if ',' in cap:
|
||||
cap_group = cap.split(',')
|
||||
cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
|
||||
for subcap in cap_group:
|
||||
rval.append((subcap, op, flags))
|
||||
else:
|
||||
rval.append(self._parse_cap(cap))
|
||||
return rval
|
||||
|
||||
def setcap(self, path, caps):
|
||||
caps = ' '.join([''.join(cap) for cap in caps])
|
||||
cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
|
||||
rc, stdout, stderr = self.module.run_command(cmd)
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
|
||||
else:
|
||||
return stdout
|
||||
|
||||
def _parse_cap(self, cap, op_required=True):
|
||||
opind = -1
|
||||
try:
|
||||
i = 0
|
||||
while opind == -1:
|
||||
opind = cap.find(OPS[i])
|
||||
i += 1
|
||||
except Exception:
|
||||
if op_required:
|
||||
self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
|
||||
else:
|
||||
return (cap, None, None)
|
||||
op = cap[opind]
|
||||
cap, flags = cap.split(op)
|
||||
return (cap, op, flags)
|
||||
|
||||
|
||||
# ==============================================================
|
||||
# main
|
||||
|
||||
def main():
|
||||
# defining module
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
path=dict(type='str', required=True, aliases=['key']),
|
||||
capability=dict(type='str', required=True, aliases=['cap']),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
CapabilitiesModule(module)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,206 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2021 Radek Sprta <mail@radeksprta.eu>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
---
|
||||
module: cargo
|
||||
short_description: Manage Rust packages with cargo
|
||||
version_added: 4.3.0
|
||||
description:
|
||||
- Manage Rust packages with cargo.
|
||||
author: "Radek Sprta (@radek-sprta)"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of a Rust package to install.
|
||||
type: list
|
||||
elements: str
|
||||
required: true
|
||||
path:
|
||||
description:
|
||||
->
|
||||
The base path where to install the Rust packages. Cargo automatically appends
|
||||
C(/bin). In other words, C(/usr/local) will become C(/usr/local/bin).
|
||||
type: path
|
||||
version:
|
||||
description:
|
||||
->
|
||||
The version to install. If I(name) contains multiple values, the module will
|
||||
try to install all of them in this version.
|
||||
type: str
|
||||
required: false
|
||||
state:
|
||||
description:
|
||||
- The state of the Rust package.
|
||||
required: false
|
||||
type: str
|
||||
default: present
|
||||
choices: [ "present", "absent", "latest" ]
|
||||
requirements:
|
||||
- cargo installed in bin path (recommended /usr/local/bin)
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: Install "ludusavi" Rust package
|
||||
community.general.cargo:
|
||||
name: ludusavi
|
||||
|
||||
- name: Install "ludusavi" Rust package in version 0.10.0
|
||||
community.general.cargo:
|
||||
name: ludusavi
|
||||
version: '0.10.0'
|
||||
|
||||
- name: Install "ludusavi" Rust package to global location
|
||||
community.general.cargo:
|
||||
name: ludusavi
|
||||
path: /usr/local
|
||||
|
||||
- name: Remove "ludusavi" Rust package
|
||||
community.general.cargo:
|
||||
name: ludusavi
|
||||
state: absent
|
||||
|
||||
- name: Update "ludusavi" Rust package its latest version
|
||||
community.general.cargo:
|
||||
name: ludusavi
|
||||
state: latest
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class Cargo(object):
|
||||
def __init__(self, module, **kwargs):
|
||||
self.module = module
|
||||
self.name = kwargs["name"]
|
||||
self.path = kwargs["path"]
|
||||
self.state = kwargs["state"]
|
||||
self.version = kwargs["version"]
|
||||
|
||||
self.executable = [module.get_bin_path("cargo", True)]
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return self._path
|
||||
|
||||
@path.setter
|
||||
def path(self, path):
|
||||
if path is not None and not os.path.isdir(path):
|
||||
self.module.fail_json(msg="Path %s is not a directory" % path)
|
||||
self._path = path
|
||||
|
||||
def _exec(
|
||||
self, args, run_in_check_mode=False, check_rc=True, add_package_name=True
|
||||
):
|
||||
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
|
||||
cmd = self.executable + args
|
||||
rc, out, err = self.module.run_command(cmd, check_rc=check_rc)
|
||||
return out, err
|
||||
return "", ""
|
||||
|
||||
def get_installed(self):
|
||||
cmd = ["install", "--list"]
|
||||
data, dummy = self._exec(cmd, True, False, False)
|
||||
|
||||
package_regex = re.compile(r"^([\w\-]+) v(.+):$")
|
||||
installed = {}
|
||||
for line in data.splitlines():
|
||||
package_info = package_regex.match(line)
|
||||
if package_info:
|
||||
installed[package_info.group(1)] = package_info.group(2)
|
||||
|
||||
return installed
|
||||
|
||||
def install(self, packages=None):
|
||||
cmd = ["install"]
|
||||
cmd.extend(packages or self.name)
|
||||
if self.path:
|
||||
cmd.append("--root")
|
||||
cmd.append(self.path)
|
||||
if self.version:
|
||||
cmd.append("--version")
|
||||
cmd.append(self.version)
|
||||
return self._exec(cmd)
|
||||
|
||||
def is_outdated(self, name):
|
||||
installed_version = self.get_installed().get(name)
|
||||
|
||||
cmd = ["search", name, "--limit", "1"]
|
||||
data, dummy = self._exec(cmd, True, False, False)
|
||||
|
||||
match = re.search(r'"(.+)"', data)
|
||||
if match:
|
||||
latest_version = match.group(1)
|
||||
|
||||
return installed_version != latest_version
|
||||
|
||||
def uninstall(self, packages=None):
|
||||
cmd = ["uninstall"]
|
||||
cmd.extend(packages or self.name)
|
||||
return self._exec(cmd)
|
||||
|
||||
|
||||
def main():
|
||||
arg_spec = dict(
|
||||
name=dict(required=True, type="list", elements="str"),
|
||||
path=dict(default=None, type="path"),
|
||||
state=dict(default="present", choices=["present", "absent", "latest"]),
|
||||
version=dict(default=None, type="str"),
|
||||
)
|
||||
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
|
||||
|
||||
name = module.params["name"]
|
||||
path = module.params["path"]
|
||||
state = module.params["state"]
|
||||
version = module.params["version"]
|
||||
|
||||
if not name:
|
||||
module.fail_json(msg="Package name must be specified")
|
||||
|
||||
# Set LANG env since we parse stdout
|
||||
module.run_command_environ_update = dict(
|
||||
LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C"
|
||||
)
|
||||
|
||||
cargo = Cargo(module, name=name, path=path, state=state, version=version)
|
||||
changed, out, err = False, None, None
|
||||
installed_packages = cargo.get_installed()
|
||||
if state == "present":
|
||||
to_install = [
|
||||
n
|
||||
for n in name
|
||||
if (n not in installed_packages)
|
||||
or (version and version != installed_packages[n])
|
||||
]
|
||||
if to_install:
|
||||
changed = True
|
||||
out, err = cargo.install(to_install)
|
||||
elif state == "latest":
|
||||
to_update = [
|
||||
n for n in name if n not in installed_packages or cargo.is_outdated(n)
|
||||
]
|
||||
if to_update:
|
||||
changed = True
|
||||
out, err = cargo.install(to_update)
|
||||
else: # absent
|
||||
to_uninstall = [n for n in name if n in installed_packages]
|
||||
if to_uninstall:
|
||||
changed = True
|
||||
out, err = cargo.uninstall(to_uninstall)
|
||||
|
||||
module.exit_json(changed=changed, stdout=out, stderr=err)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,155 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2016, Jonathan Mainguy <jon@soh.re>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
#
|
||||
# basis of code taken from the ansible twillio and nexmo modules
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: catapult
|
||||
short_description: Send a sms / mms using the catapult bandwidth api
|
||||
description:
|
||||
- Allows notifications to be sent using sms / mms via the catapult bandwidth api.
|
||||
options:
|
||||
src:
|
||||
type: str
|
||||
description:
|
||||
- One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)).
|
||||
required: true
|
||||
dest:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)).
|
||||
required: true
|
||||
msg:
|
||||
type: str
|
||||
description:
|
||||
- The contents of the text message (must be 2048 characters or less).
|
||||
required: true
|
||||
media:
|
||||
type: str
|
||||
description:
|
||||
- For MMS messages, a media url to the location of the media to be sent with the message.
|
||||
user_id:
|
||||
type: str
|
||||
description:
|
||||
- User Id from Api account page.
|
||||
required: true
|
||||
api_token:
|
||||
type: str
|
||||
description:
|
||||
- Api Token from Api account page.
|
||||
required: true
|
||||
api_secret:
|
||||
type: str
|
||||
description:
|
||||
- Api Secret from Api account page.
|
||||
required: true
|
||||
|
||||
author: "Jonathan Mainguy (@Jmainguy)"
|
||||
notes:
|
||||
- Will return changed even if the media url is wrong.
|
||||
- Will return changed if the destination number is invalid.
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Send a mms to multiple users
|
||||
community.general.catapult:
|
||||
src: "+15035555555"
|
||||
dest:
|
||||
- "+12525089000"
|
||||
- "+12018994225"
|
||||
media: "http://example.com/foobar.jpg"
|
||||
msg: "Task is complete"
|
||||
user_id: "{{ user_id }}"
|
||||
api_token: "{{ api_token }}"
|
||||
api_secret: "{{ api_secret }}"
|
||||
|
||||
- name: Send a sms to a single user
|
||||
community.general.catapult:
|
||||
src: "+15035555555"
|
||||
dest: "+12018994225"
|
||||
msg: "Consider yourself notified"
|
||||
user_id: "{{ user_id }}"
|
||||
api_token: "{{ api_token }}"
|
||||
api_secret: "{{ api_secret }}"
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
changed:
|
||||
description: Whether the api accepted the message.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
'''
|
||||
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
def send(module, src, dest, msg, media, user_id, api_token, api_secret):
|
||||
"""
|
||||
Send the message
|
||||
"""
|
||||
AGENT = "Ansible"
|
||||
URI = "https://api.catapult.inetwork.com/v1/users/%s/messages" % user_id
|
||||
data = {'from': src, 'to': dest, 'text': msg}
|
||||
if media:
|
||||
data['media'] = media
|
||||
|
||||
headers = {'User-Agent': AGENT, 'Content-type': 'application/json'}
|
||||
|
||||
# Hack module params to have the Basic auth params that fetch_url expects
|
||||
module.params['url_username'] = api_token.replace('\n', '')
|
||||
module.params['url_password'] = api_secret.replace('\n', '')
|
||||
|
||||
return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post")
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
src=dict(required=True),
|
||||
dest=dict(required=True, type='list', elements='str'),
|
||||
msg=dict(required=True),
|
||||
user_id=dict(required=True),
|
||||
api_token=dict(required=True, no_log=True),
|
||||
api_secret=dict(required=True, no_log=True),
|
||||
media=dict(default=None, required=False),
|
||||
),
|
||||
)
|
||||
|
||||
src = module.params['src']
|
||||
dest = module.params['dest']
|
||||
msg = module.params['msg']
|
||||
media = module.params['media']
|
||||
user_id = module.params['user_id']
|
||||
api_token = module.params['api_token']
|
||||
api_secret = module.params['api_secret']
|
||||
|
||||
for number in dest:
|
||||
rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret)
|
||||
if info["status"] != 201:
|
||||
body = json.loads(info["body"])
|
||||
fail_msg = body["message"]
|
||||
module.fail_json(msg=fail_msg)
|
||||
|
||||
changed = True
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,236 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2014-2015, Epic Games, Inc.
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: circonus_annotation
|
||||
short_description: Create an annotation in circonus
|
||||
description:
|
||||
- Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
|
||||
author: "Nick Harring (@NickatEpic)"
|
||||
requirements:
|
||||
- requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2)
|
||||
notes:
|
||||
- Check mode isn't supported.
|
||||
options:
|
||||
api_key:
|
||||
type: str
|
||||
description:
|
||||
- Circonus API key
|
||||
required: true
|
||||
category:
|
||||
type: str
|
||||
description:
|
||||
- Annotation Category
|
||||
required: true
|
||||
description:
|
||||
type: str
|
||||
description:
|
||||
- Description of annotation
|
||||
required: true
|
||||
title:
|
||||
type: str
|
||||
description:
|
||||
- Title of annotation
|
||||
required: true
|
||||
start:
|
||||
type: int
|
||||
description:
|
||||
- Unix timestamp of event start
|
||||
- If not specified, it defaults to I(now).
|
||||
stop:
|
||||
type: int
|
||||
description:
|
||||
- Unix timestamp of event end
|
||||
- If not specified, it defaults to I(now) + I(duration).
|
||||
duration:
|
||||
type: int
|
||||
description:
|
||||
- Duration in seconds of annotation
|
||||
default: 0
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
- name: Create a simple annotation event with a source, defaults to start and end time of now
|
||||
community.general.circonus_annotation:
|
||||
api_key: XXXXXXXXXXXXXXXXX
|
||||
title: App Config Change
|
||||
description: This is a detailed description of the config change
|
||||
category: This category groups like annotations
|
||||
|
||||
- name: Create an annotation with a duration of 5 minutes and a default start time of now
|
||||
community.general.circonus_annotation:
|
||||
api_key: XXXXXXXXXXXXXXXXX
|
||||
title: App Config Change
|
||||
description: This is a detailed description of the config change
|
||||
category: This category groups like annotations
|
||||
duration: 300
|
||||
|
||||
- name: Create an annotation with a start_time and end_time
|
||||
community.general.circonus_annotation:
|
||||
api_key: XXXXXXXXXXXXXXXXX
|
||||
title: App Config Change
|
||||
description: This is a detailed description of the config change
|
||||
category: This category groups like annotations
|
||||
start_time: 1395940006
|
||||
end_time: 1395954407
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
annotation:
|
||||
description: details about the created annotation
|
||||
returned: success
|
||||
type: complex
|
||||
contains:
|
||||
_cid:
|
||||
description: annotation identifier
|
||||
returned: success
|
||||
type: str
|
||||
sample: /annotation/100000
|
||||
_created:
|
||||
description: creation timestamp
|
||||
returned: success
|
||||
type: int
|
||||
sample: 1502236928
|
||||
_last_modified:
|
||||
description: last modification timestamp
|
||||
returned: success
|
||||
type: int
|
||||
sample: 1502236928
|
||||
_last_modified_by:
|
||||
description: last modified by
|
||||
returned: success
|
||||
type: str
|
||||
sample: /user/1000
|
||||
category:
|
||||
description: category of the created annotation
|
||||
returned: success
|
||||
type: str
|
||||
sample: alerts
|
||||
title:
|
||||
description: title of the created annotation
|
||||
returned: success
|
||||
type: str
|
||||
sample: WARNING
|
||||
description:
|
||||
description: description of the created annotation
|
||||
returned: success
|
||||
type: str
|
||||
sample: Host is down.
|
||||
start:
|
||||
description: timestamp, since annotation applies
|
||||
returned: success
|
||||
type: int
|
||||
sample: Host is down.
|
||||
stop:
|
||||
description: timestamp, since annotation ends
|
||||
returned: success
|
||||
type: str
|
||||
sample: Host is down.
|
||||
rel_metrics:
|
||||
description: Array of metrics related to this annotation, each metrics is a string.
|
||||
returned: success
|
||||
type: list
|
||||
sample:
|
||||
- 54321_kbps
|
||||
'''
|
||||
import json
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
import requests
|
||||
HAS_REQUESTS = True
|
||||
except ImportError:
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
HAS_REQUESTS = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.six import PY3
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def check_requests_dep(module):
|
||||
"""Check if an adequate requests version is available"""
|
||||
if not HAS_REQUESTS:
|
||||
module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
else:
|
||||
required_version = '2.0.0' if PY3 else '1.0.0'
|
||||
if LooseVersion(requests.__version__) < LooseVersion(required_version):
|
||||
module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__))
|
||||
|
||||
|
||||
def post_annotation(annotation, api_key):
|
||||
''' Takes annotation dict and api_key string'''
|
||||
base_url = 'https://api.circonus.com/v2'
|
||||
anootate_post_endpoint = '/annotation'
|
||||
resp = requests.post(base_url + anootate_post_endpoint,
|
||||
headers=build_headers(api_key), data=json.dumps(annotation))
|
||||
resp.raise_for_status()
|
||||
return resp
|
||||
|
||||
|
||||
def create_annotation(module):
|
||||
''' Takes ansible module object '''
|
||||
annotation = {}
|
||||
duration = module.params['duration']
|
||||
if module.params['start'] is not None:
|
||||
start = module.params['start']
|
||||
else:
|
||||
start = int(time.time())
|
||||
if module.params['stop'] is not None:
|
||||
stop = module.params['stop']
|
||||
else:
|
||||
stop = int(time.time()) + duration
|
||||
annotation['start'] = start
|
||||
annotation['stop'] = stop
|
||||
annotation['category'] = module.params['category']
|
||||
annotation['description'] = module.params['description']
|
||||
annotation['title'] = module.params['title']
|
||||
return annotation
|
||||
|
||||
|
||||
def build_headers(api_token):
|
||||
'''Takes api token, returns headers with it included.'''
|
||||
headers = {'X-Circonus-App-Name': 'ansible',
|
||||
'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token,
|
||||
'Accept': 'application/json'}
|
||||
return headers
|
||||
|
||||
|
||||
def main():
|
||||
'''Main function, dispatches logic'''
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
start=dict(type='int'),
|
||||
stop=dict(type='int'),
|
||||
category=dict(required=True),
|
||||
title=dict(required=True),
|
||||
description=dict(required=True),
|
||||
duration=dict(default=0, type='int'),
|
||||
api_key=dict(required=True, no_log=True)
|
||||
)
|
||||
)
|
||||
|
||||
check_requests_dep(module)
|
||||
|
||||
annotation = create_annotation(module)
|
||||
try:
|
||||
resp = post_annotation(annotation, module.params['api_key'])
|
||||
except requests.exceptions.RequestException as e:
|
||||
module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc())
|
||||
module.exit_json(changed=True, annotation=resp.json())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,189 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cisco_webex
|
||||
short_description: Send a message to a Cisco Webex Teams Room or Individual
|
||||
description:
|
||||
- Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting.
|
||||
author: Drew Rusell (@drew-russell)
|
||||
notes:
|
||||
- The C(recipient_id) type must be valid for the supplied C(recipient_id).
|
||||
- Full API documentation can be found at U(https://developer.webex.com/docs/api/basics).
|
||||
|
||||
options:
|
||||
|
||||
recipient_type:
|
||||
description:
|
||||
- The request parameter you would like to send the message to.
|
||||
- Messages can be sent to either a room or individual (by ID or E-Mail).
|
||||
required: true
|
||||
choices: ['roomId', 'toPersonEmail', 'toPersonId']
|
||||
type: str
|
||||
|
||||
recipient_id:
|
||||
description:
|
||||
- The unique identifier associated with the supplied C(recipient_type).
|
||||
required: true
|
||||
type: str
|
||||
|
||||
msg_type:
|
||||
description:
|
||||
- Specifies how you would like the message formatted.
|
||||
default: text
|
||||
choices: ['text', 'markdown']
|
||||
type: str
|
||||
aliases: ['message_type']
|
||||
|
||||
personal_token:
|
||||
description:
|
||||
- Your personal access token required to validate the Webex Teams API.
|
||||
required: true
|
||||
aliases: ['token']
|
||||
type: str
|
||||
|
||||
msg:
|
||||
description:
|
||||
- The message you would like to send.
|
||||
required: true
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
# Note: The following examples assume a variable file has been imported
|
||||
# that contains the appropriate information.
|
||||
|
||||
- name: Cisco Webex Teams - Markdown Message to a Room
|
||||
community.general.cisco_webex:
|
||||
recipient_type: roomId
|
||||
recipient_id: "{{ room_id }}"
|
||||
msg_type: markdown
|
||||
personal_token: "{{ token }}"
|
||||
msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**"
|
||||
|
||||
- name: Cisco Webex Teams - Text Message to a Room
|
||||
community.general.cisco_webex:
|
||||
recipient_type: roomId
|
||||
recipient_id: "{{ room_id }}"
|
||||
msg_type: text
|
||||
personal_token: "{{ token }}"
|
||||
msg: "Cisco Webex Teams Ansible Module - Room Message in Text"
|
||||
|
||||
- name: Cisco Webex Teams - Text Message by an Individuals ID
|
||||
community.general.cisco_webex:
|
||||
recipient_type: toPersonId
|
||||
recipient_id: "{{ person_id}}"
|
||||
msg_type: text
|
||||
personal_token: "{{ token }}"
|
||||
msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID"
|
||||
|
||||
- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address
|
||||
community.general.cisco_webex:
|
||||
recipient_type: toPersonEmail
|
||||
recipient_id: "{{ person_email }}"
|
||||
msg_type: text
|
||||
personal_token: "{{ token }}"
|
||||
msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail"
|
||||
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
status_code:
|
||||
description:
|
||||
- The Response Code returned by the Webex Teams API.
|
||||
- Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
|
||||
returned: always
|
||||
type: int
|
||||
sample: 200
|
||||
|
||||
message:
|
||||
description:
|
||||
- The Response Message returned by the Webex Teams API.
|
||||
- Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
|
||||
returned: always
|
||||
type: str
|
||||
sample: OK (585 bytes)
|
||||
"""
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
def webex_msg(module):
|
||||
"""When check mode is specified, establish a read only connection, that does not return any user specific
|
||||
data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual"""
|
||||
|
||||
# Ansible Specific Variables
|
||||
results = {}
|
||||
ansible = module.params
|
||||
|
||||
headers = {
|
||||
'Authorization': 'Bearer {0}'.format(ansible['personal_token']),
|
||||
'content-type': 'application/json'
|
||||
}
|
||||
|
||||
if module.check_mode:
|
||||
url = "https://webexapis.com/v1/people/me"
|
||||
payload = None
|
||||
|
||||
else:
|
||||
url = "https://webexapis.com/v1/messages"
|
||||
|
||||
payload = {
|
||||
ansible['recipient_type']: ansible['recipient_id'],
|
||||
ansible['msg_type']: ansible['msg']
|
||||
}
|
||||
|
||||
payload = module.jsonify(payload)
|
||||
|
||||
response, info = fetch_url(module, url, data=payload, headers=headers)
|
||||
|
||||
status_code = info['status']
|
||||
msg = info['msg']
|
||||
|
||||
# Module will fail if the response is not 200
|
||||
if status_code != 200:
|
||||
results['failed'] = True
|
||||
results['status_code'] = status_code
|
||||
results['message'] = msg
|
||||
else:
|
||||
results['failed'] = False
|
||||
results['status_code'] = status_code
|
||||
|
||||
if module.check_mode:
|
||||
results['message'] = 'Authentication Successful.'
|
||||
else:
|
||||
results['message'] = msg
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
'''Ansible main. '''
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']),
|
||||
recipient_id=dict(required=True, no_log=True),
|
||||
msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']),
|
||||
personal_token=dict(required=True, no_log=True, aliases=['token']),
|
||||
msg=dict(required=True),
|
||||
),
|
||||
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
results = webex_msg(module)
|
||||
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,346 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2015 CenturyLink
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_aa_policy
|
||||
short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the Anti Affinity Policy.
|
||||
type: str
|
||||
required: true
|
||||
location:
|
||||
description:
|
||||
- Datacenter in which the policy lives/should live.
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Whether to create or delete the policy.
|
||||
type: str
|
||||
required: false
|
||||
default: present
|
||||
choices: ['present','absent']
|
||||
requirements:
|
||||
- python = 2.7
|
||||
- requests >= 2.5.0
|
||||
- clc-sdk
|
||||
author: "CLC Runner (@clc-runner)"
|
||||
notes:
|
||||
- To use this module, it is required to set the below environment variables which enables access to the
|
||||
Centurylink Cloud
|
||||
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
|
||||
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
|
||||
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
|
||||
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
|
||||
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
|
||||
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
|
||||
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
|
||||
|
||||
---
|
||||
- name: Create AA Policy
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Create an Anti Affinity Policy
|
||||
community.general.clc_aa_policy:
|
||||
name: Hammer Time
|
||||
location: UK3
|
||||
state: present
|
||||
register: policy
|
||||
|
||||
- name: Debug
|
||||
ansible.builtin.debug:
|
||||
var: policy
|
||||
|
||||
- name: Delete AA Policy
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Delete an Anti Affinity Policy
|
||||
community.general.clc_aa_policy:
|
||||
name: Hammer Time
|
||||
location: UK3
|
||||
state: absent
|
||||
register: policy
|
||||
|
||||
- name: Debug
|
||||
ansible.builtin.debug:
|
||||
var: policy
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
policy:
|
||||
description: The anti affinity policy information
|
||||
returned: success
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"id":"1a28dd0988984d87b9cd61fa8da15424",
|
||||
"name":"test_aa_policy",
|
||||
"location":"UC1",
|
||||
"links":[
|
||||
{
|
||||
"rel":"self",
|
||||
"href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424",
|
||||
"verbs":[
|
||||
"GET",
|
||||
"DELETE",
|
||||
"PUT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"rel":"location",
|
||||
"href":"/v2/datacenters/wfad/UC1",
|
||||
"id":"uc1",
|
||||
"name":"UC1 - US West (Santa Clara)"
|
||||
}
|
||||
]
|
||||
}
|
||||
'''
|
||||
|
||||
__version__ = '${version}'
|
||||
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
REQUESTS_FOUND = False
|
||||
else:
|
||||
REQUESTS_FOUND = True
|
||||
|
||||
#
|
||||
# Requires the clc-python-sdk:
|
||||
# sudo pip install clc-sdk
|
||||
#
|
||||
CLC_IMP_ERR = None
|
||||
try:
|
||||
import clc as clc_sdk
|
||||
from clc import CLCException
|
||||
except ImportError:
|
||||
CLC_IMP_ERR = traceback.format_exc()
|
||||
CLC_FOUND = False
|
||||
clc_sdk = None
|
||||
else:
|
||||
CLC_FOUND = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
class ClcAntiAffinityPolicy:
|
||||
|
||||
clc = clc_sdk
|
||||
module = None
|
||||
|
||||
def __init__(self, module):
|
||||
"""
|
||||
Construct module
|
||||
"""
|
||||
self.module = module
|
||||
self.policy_dict = {}
|
||||
|
||||
if not CLC_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'),
|
||||
exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'),
|
||||
exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
self._set_user_agent(self.clc)
|
||||
|
||||
@staticmethod
|
||||
def _define_module_argument_spec():
|
||||
"""
|
||||
Define the argument spec for the ansible module
|
||||
:return: argument spec dictionary
|
||||
"""
|
||||
argument_spec = dict(
|
||||
name=dict(required=True),
|
||||
location=dict(required=True),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
return argument_spec
|
||||
|
||||
# Module Behavior Goodness
|
||||
def process_request(self):
|
||||
"""
|
||||
Process the request - Main Code Path
|
||||
:return: Returns with either an exit_json or fail_json
|
||||
"""
|
||||
p = self.module.params
|
||||
|
||||
self._set_clc_credentials_from_env()
|
||||
self.policy_dict = self._get_policies_for_datacenter(p)
|
||||
|
||||
if p['state'] == "absent":
|
||||
changed, policy = self._ensure_policy_is_absent(p)
|
||||
else:
|
||||
changed, policy = self._ensure_policy_is_present(p)
|
||||
|
||||
if hasattr(policy, 'data'):
|
||||
policy = policy.data
|
||||
elif hasattr(policy, '__dict__'):
|
||||
policy = policy.__dict__
|
||||
|
||||
self.module.exit_json(changed=changed, policy=policy)
|
||||
|
||||
def _set_clc_credentials_from_env(self):
|
||||
"""
|
||||
Set the CLC Credentials on the sdk by reading environment variables
|
||||
:return: none
|
||||
"""
|
||||
env = os.environ
|
||||
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
|
||||
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
|
||||
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
|
||||
clc_alias = env.get('CLC_ACCT_ALIAS', False)
|
||||
api_url = env.get('CLC_V2_API_URL', False)
|
||||
|
||||
if api_url:
|
||||
self.clc.defaults.ENDPOINT_URL_V2 = api_url
|
||||
|
||||
if v2_api_token and clc_alias:
|
||||
self.clc._LOGIN_TOKEN_V2 = v2_api_token
|
||||
self.clc._V2_ENABLED = True
|
||||
self.clc.ALIAS = clc_alias
|
||||
elif v2_api_username and v2_api_passwd:
|
||||
self.clc.v2.SetCredentials(
|
||||
api_username=v2_api_username,
|
||||
api_passwd=v2_api_passwd)
|
||||
else:
|
||||
return self.module.fail_json(
|
||||
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
|
||||
"environment variables")
|
||||
|
||||
def _get_policies_for_datacenter(self, p):
|
||||
"""
|
||||
Get the Policies for a datacenter by calling the CLC API.
|
||||
:param p: datacenter to get policies from
|
||||
:return: policies in the datacenter
|
||||
"""
|
||||
response = {}
|
||||
|
||||
policies = self.clc.v2.AntiAffinity.GetAll(location=p['location'])
|
||||
|
||||
for policy in policies:
|
||||
response[policy.name] = policy
|
||||
return response
|
||||
|
||||
def _create_policy(self, p):
|
||||
"""
|
||||
Create an Anti Affinity Policy using the CLC API.
|
||||
:param p: datacenter to create policy in
|
||||
:return: response dictionary from the CLC API.
|
||||
"""
|
||||
try:
|
||||
return self.clc.v2.AntiAffinity.Create(
|
||||
name=p['name'],
|
||||
location=p['location'])
|
||||
except CLCException as ex:
|
||||
self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format(
|
||||
p['name'], ex.response_text
|
||||
))
|
||||
|
||||
def _delete_policy(self, p):
|
||||
"""
|
||||
Delete an Anti Affinity Policy using the CLC API.
|
||||
:param p: datacenter to delete a policy from
|
||||
:return: none
|
||||
"""
|
||||
try:
|
||||
policy = self.policy_dict[p['name']]
|
||||
policy.Delete()
|
||||
except CLCException as ex:
|
||||
self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format(
|
||||
p['name'], ex.response_text
|
||||
))
|
||||
|
||||
def _policy_exists(self, policy_name):
|
||||
"""
|
||||
Check to see if an Anti Affinity Policy exists
|
||||
:param policy_name: name of the policy
|
||||
:return: boolean of if the policy exists
|
||||
"""
|
||||
if policy_name in self.policy_dict:
|
||||
return self.policy_dict.get(policy_name)
|
||||
|
||||
return False
|
||||
|
||||
def _ensure_policy_is_absent(self, p):
|
||||
"""
|
||||
Makes sure that a policy is absent
|
||||
:param p: dictionary of policy name
|
||||
:return: tuple of if a deletion occurred and the name of the policy that was deleted
|
||||
"""
|
||||
changed = False
|
||||
if self._policy_exists(policy_name=p['name']):
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
self._delete_policy(p)
|
||||
return changed, None
|
||||
|
||||
def _ensure_policy_is_present(self, p):
|
||||
"""
|
||||
Ensures that a policy is present
|
||||
:param p: dictionary of a policy name
|
||||
:return: tuple of if an addition occurred and the name of the policy that was added
|
||||
"""
|
||||
changed = False
|
||||
policy = self._policy_exists(policy_name=p['name'])
|
||||
if not policy:
|
||||
changed = True
|
||||
policy = None
|
||||
if not self.module.check_mode:
|
||||
policy = self._create_policy(p)
|
||||
return changed, policy
|
||||
|
||||
@staticmethod
|
||||
def _set_user_agent(clc):
|
||||
if hasattr(clc, 'SetRequestsSession'):
|
||||
agent_string = "ClcAnsibleModule/" + __version__
|
||||
ses = requests.Session()
|
||||
ses.headers.update({"Api-Client": agent_string})
|
||||
ses.headers['User-Agent'] += " " + agent_string
|
||||
clc.SetRequestsSession(ses)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
The main function. Instantiates the module and calls process_request.
|
||||
:return: none
|
||||
"""
|
||||
module = AnsibleModule(
|
||||
argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(),
|
||||
supports_check_mode=True)
|
||||
clc_aa_policy = ClcAntiAffinityPolicy(module)
|
||||
clc_aa_policy.process_request()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,529 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
#
|
||||
# Copyright (c) 2015 CenturyLink
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_alert_policy
|
||||
short_description: Create or Delete Alert Policies at CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
|
||||
options:
|
||||
alias:
|
||||
description:
|
||||
- The alias of your CLC Account
|
||||
type: str
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- The name of the alert policy. This is mutually exclusive with id
|
||||
type: str
|
||||
id:
|
||||
description:
|
||||
- The alert policy id. This is mutually exclusive with name
|
||||
type: str
|
||||
alert_recipients:
|
||||
description:
|
||||
- A list of recipient email ids to notify the alert.
|
||||
This is required for state 'present'
|
||||
type: list
|
||||
elements: str
|
||||
metric:
|
||||
description:
|
||||
- The metric on which to measure the condition that will trigger the alert.
|
||||
This is required for state 'present'
|
||||
type: str
|
||||
choices: ['cpu','memory','disk']
|
||||
duration:
|
||||
description:
|
||||
- The length of time in minutes that the condition must exceed the threshold.
|
||||
This is required for state 'present'
|
||||
type: str
|
||||
threshold:
|
||||
description:
|
||||
- The threshold that will trigger the alert when the metric equals or exceeds it.
|
||||
This is required for state 'present'
|
||||
This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0
|
||||
type: int
|
||||
state:
|
||||
description:
|
||||
- Whether to create or delete the policy.
|
||||
type: str
|
||||
default: present
|
||||
choices: ['present','absent']
|
||||
requirements:
|
||||
- python = 2.7
|
||||
- requests >= 2.5.0
|
||||
- clc-sdk
|
||||
author: "CLC Runner (@clc-runner)"
|
||||
notes:
|
||||
- To use this module, it is required to set the below environment variables which enables access to the
|
||||
Centurylink Cloud
|
||||
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
|
||||
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
|
||||
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
|
||||
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
|
||||
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
|
||||
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
|
||||
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
|
||||
|
||||
---
|
||||
- name: Create Alert Policy Example
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Create an Alert Policy for disk above 80% for 5 minutes
|
||||
community.general.clc_alert_policy:
|
||||
alias: wfad
|
||||
name: 'alert for disk > 80%'
|
||||
alert_recipients:
|
||||
- test1@centurylink.com
|
||||
- test2@centurylink.com
|
||||
metric: 'disk'
|
||||
duration: '00:05:00'
|
||||
threshold: 80
|
||||
state: present
|
||||
register: policy
|
||||
|
||||
- name: Debug
|
||||
ansible.builtin.debug: var=policy
|
||||
|
||||
- name: Delete Alert Policy Example
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Delete an Alert Policy
|
||||
community.general.clc_alert_policy:
|
||||
alias: wfad
|
||||
name: 'alert for disk > 80%'
|
||||
state: absent
|
||||
register: policy
|
||||
|
||||
- name: Debug
|
||||
ansible.builtin.debug: var=policy
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
policy:
|
||||
description: The alert policy information
|
||||
returned: success
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"actions": [
|
||||
{
|
||||
"action": "email",
|
||||
"settings": {
|
||||
"recipients": [
|
||||
"user1@domain.com",
|
||||
"user1@domain.com"
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
|
||||
"links": [
|
||||
{
|
||||
"href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
|
||||
"rel": "self",
|
||||
"verbs": [
|
||||
"GET",
|
||||
"DELETE",
|
||||
"PUT"
|
||||
]
|
||||
}
|
||||
],
|
||||
"name": "test_alert",
|
||||
"triggers": [
|
||||
{
|
||||
"duration": "00:05:00",
|
||||
"metric": "disk",
|
||||
"threshold": 80.0
|
||||
}
|
||||
]
|
||||
}
|
||||
'''
|
||||
|
||||
__version__ = '${version}'
|
||||
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
REQUESTS_FOUND = False
|
||||
else:
|
||||
REQUESTS_FOUND = True
|
||||
|
||||
#
|
||||
# Requires the clc-python-sdk.
|
||||
# sudo pip install clc-sdk
|
||||
#
|
||||
CLC_IMP_ERR = None
|
||||
try:
|
||||
import clc as clc_sdk
|
||||
from clc import APIFailedResponse
|
||||
except ImportError:
|
||||
CLC_IMP_ERR = traceback.format_exc()
|
||||
CLC_FOUND = False
|
||||
clc_sdk = None
|
||||
else:
|
||||
CLC_FOUND = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
class ClcAlertPolicy:
|
||||
|
||||
clc = clc_sdk
|
||||
module = None
|
||||
|
||||
def __init__(self, module):
|
||||
"""
|
||||
Construct module
|
||||
"""
|
||||
self.module = module
|
||||
self.policy_dict = {}
|
||||
|
||||
if not CLC_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
self._set_user_agent(self.clc)
|
||||
|
||||
@staticmethod
|
||||
def _define_module_argument_spec():
|
||||
"""
|
||||
Define the argument spec for the ansible module
|
||||
:return: argument spec dictionary
|
||||
"""
|
||||
argument_spec = dict(
|
||||
name=dict(),
|
||||
id=dict(),
|
||||
alias=dict(required=True),
|
||||
alert_recipients=dict(type='list', elements='str'),
|
||||
metric=dict(
|
||||
choices=[
|
||||
'cpu',
|
||||
'memory',
|
||||
'disk']),
|
||||
duration=dict(type='str'),
|
||||
threshold=dict(type='int'),
|
||||
state=dict(default='present', choices=['present', 'absent'])
|
||||
)
|
||||
mutually_exclusive = [
|
||||
['name', 'id']
|
||||
]
|
||||
return {'argument_spec': argument_spec,
|
||||
'mutually_exclusive': mutually_exclusive}
|
||||
|
||||
# Module Behavior Goodness
|
||||
def process_request(self):
|
||||
"""
|
||||
Process the request - Main Code Path
|
||||
:return: Returns with either an exit_json or fail_json
|
||||
"""
|
||||
p = self.module.params
|
||||
|
||||
self._set_clc_credentials_from_env()
|
||||
self.policy_dict = self._get_alert_policies(p['alias'])
|
||||
|
||||
if p['state'] == 'present':
|
||||
changed, policy = self._ensure_alert_policy_is_present()
|
||||
else:
|
||||
changed, policy = self._ensure_alert_policy_is_absent()
|
||||
|
||||
self.module.exit_json(changed=changed, policy=policy)
|
||||
|
||||
def _set_clc_credentials_from_env(self):
|
||||
"""
|
||||
Set the CLC Credentials on the sdk by reading environment variables
|
||||
:return: none
|
||||
"""
|
||||
env = os.environ
|
||||
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
|
||||
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
|
||||
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
|
||||
clc_alias = env.get('CLC_ACCT_ALIAS', False)
|
||||
api_url = env.get('CLC_V2_API_URL', False)
|
||||
|
||||
if api_url:
|
||||
self.clc.defaults.ENDPOINT_URL_V2 = api_url
|
||||
|
||||
if v2_api_token and clc_alias:
|
||||
self.clc._LOGIN_TOKEN_V2 = v2_api_token
|
||||
self.clc._V2_ENABLED = True
|
||||
self.clc.ALIAS = clc_alias
|
||||
elif v2_api_username and v2_api_passwd:
|
||||
self.clc.v2.SetCredentials(
|
||||
api_username=v2_api_username,
|
||||
api_passwd=v2_api_passwd)
|
||||
else:
|
||||
return self.module.fail_json(
|
||||
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
|
||||
"environment variables")
|
||||
|
||||
def _ensure_alert_policy_is_present(self):
|
||||
"""
|
||||
Ensures that the alert policy is present
|
||||
:return: (changed, policy)
|
||||
changed: A flag representing if anything is modified
|
||||
policy: the created/updated alert policy
|
||||
"""
|
||||
changed = False
|
||||
p = self.module.params
|
||||
policy_name = p.get('name')
|
||||
|
||||
if not policy_name:
|
||||
self.module.fail_json(msg='Policy name is a required')
|
||||
policy = self._alert_policy_exists(policy_name)
|
||||
if not policy:
|
||||
changed = True
|
||||
policy = None
|
||||
if not self.module.check_mode:
|
||||
policy = self._create_alert_policy()
|
||||
else:
|
||||
changed_u, policy = self._ensure_alert_policy_is_updated(policy)
|
||||
if changed_u:
|
||||
changed = True
|
||||
return changed, policy
|
||||
|
||||
def _ensure_alert_policy_is_absent(self):
|
||||
"""
|
||||
Ensures that the alert policy is absent
|
||||
:return: (changed, None)
|
||||
changed: A flag representing if anything is modified
|
||||
"""
|
||||
changed = False
|
||||
p = self.module.params
|
||||
alert_policy_id = p.get('id')
|
||||
alert_policy_name = p.get('name')
|
||||
alias = p.get('alias')
|
||||
if not alert_policy_id and not alert_policy_name:
|
||||
self.module.fail_json(
|
||||
msg='Either alert policy id or policy name is required')
|
||||
if not alert_policy_id and alert_policy_name:
|
||||
alert_policy_id = self._get_alert_policy_id(
|
||||
self.module,
|
||||
alert_policy_name)
|
||||
if alert_policy_id and alert_policy_id in self.policy_dict:
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
self._delete_alert_policy(alias, alert_policy_id)
|
||||
return changed, None
|
||||
|
||||
def _ensure_alert_policy_is_updated(self, alert_policy):
|
||||
"""
|
||||
Ensures the alert policy is updated if anything is changed in the alert policy configuration
|
||||
:param alert_policy: the target alert policy
|
||||
:return: (changed, policy)
|
||||
changed: A flag representing if anything is modified
|
||||
policy: the updated the alert policy
|
||||
"""
|
||||
changed = False
|
||||
p = self.module.params
|
||||
alert_policy_id = alert_policy.get('id')
|
||||
email_list = p.get('alert_recipients')
|
||||
metric = p.get('metric')
|
||||
duration = p.get('duration')
|
||||
threshold = p.get('threshold')
|
||||
policy = alert_policy
|
||||
if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
|
||||
(duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
|
||||
(threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
|
||||
changed = True
|
||||
elif email_list:
|
||||
t_email_list = list(
|
||||
alert_policy.get('actions')[0].get('settings').get('recipients'))
|
||||
if set(email_list) != set(t_email_list):
|
||||
changed = True
|
||||
if changed and not self.module.check_mode:
|
||||
policy = self._update_alert_policy(alert_policy_id)
|
||||
return changed, policy
|
||||
|
||||
def _get_alert_policies(self, alias):
|
||||
"""
|
||||
Get the alert policies for account alias by calling the CLC API.
|
||||
:param alias: the account alias
|
||||
:return: the alert policies for the account alias
|
||||
"""
|
||||
response = {}
|
||||
|
||||
policies = self.clc.v2.API.Call('GET',
|
||||
'/v2/alertPolicies/%s'
|
||||
% alias)
|
||||
|
||||
for policy in policies.get('items'):
|
||||
response[policy.get('id')] = policy
|
||||
return response
|
||||
|
||||
def _create_alert_policy(self):
|
||||
"""
|
||||
Create an alert Policy using the CLC API.
|
||||
:return: response dictionary from the CLC API.
|
||||
"""
|
||||
p = self.module.params
|
||||
alias = p['alias']
|
||||
email_list = p['alert_recipients']
|
||||
metric = p['metric']
|
||||
duration = p['duration']
|
||||
threshold = p['threshold']
|
||||
policy_name = p['name']
|
||||
arguments = json.dumps(
|
||||
{
|
||||
'name': policy_name,
|
||||
'actions': [{
|
||||
'action': 'email',
|
||||
'settings': {
|
||||
'recipients': email_list
|
||||
}
|
||||
}],
|
||||
'triggers': [{
|
||||
'metric': metric,
|
||||
'duration': duration,
|
||||
'threshold': threshold
|
||||
}]
|
||||
}
|
||||
)
|
||||
try:
|
||||
result = self.clc.v2.API.Call(
|
||||
'POST',
|
||||
'/v2/alertPolicies/%s' % alias,
|
||||
arguments)
|
||||
except APIFailedResponse as e:
|
||||
return self.module.fail_json(
|
||||
msg='Unable to create alert policy "{0}". {1}'.format(
|
||||
policy_name, str(e.response_text)))
|
||||
return result
|
||||
|
||||
def _update_alert_policy(self, alert_policy_id):
|
||||
"""
|
||||
Update alert policy using the CLC API.
|
||||
:param alert_policy_id: The clc alert policy id
|
||||
:return: response dictionary from the CLC API.
|
||||
"""
|
||||
p = self.module.params
|
||||
alias = p['alias']
|
||||
email_list = p['alert_recipients']
|
||||
metric = p['metric']
|
||||
duration = p['duration']
|
||||
threshold = p['threshold']
|
||||
policy_name = p['name']
|
||||
arguments = json.dumps(
|
||||
{
|
||||
'name': policy_name,
|
||||
'actions': [{
|
||||
'action': 'email',
|
||||
'settings': {
|
||||
'recipients': email_list
|
||||
}
|
||||
}],
|
||||
'triggers': [{
|
||||
'metric': metric,
|
||||
'duration': duration,
|
||||
'threshold': threshold
|
||||
}]
|
||||
}
|
||||
)
|
||||
try:
|
||||
result = self.clc.v2.API.Call(
|
||||
'PUT', '/v2/alertPolicies/%s/%s' %
|
||||
(alias, alert_policy_id), arguments)
|
||||
except APIFailedResponse as e:
|
||||
return self.module.fail_json(
|
||||
msg='Unable to update alert policy "{0}". {1}'.format(
|
||||
policy_name, str(e.response_text)))
|
||||
return result
|
||||
|
||||
def _delete_alert_policy(self, alias, policy_id):
|
||||
"""
|
||||
Delete an alert policy using the CLC API.
|
||||
:param alias : the account alias
|
||||
:param policy_id: the alert policy id
|
||||
:return: response dictionary from the CLC API.
|
||||
"""
|
||||
try:
|
||||
result = self.clc.v2.API.Call(
|
||||
'DELETE', '/v2/alertPolicies/%s/%s' %
|
||||
(alias, policy_id), None)
|
||||
except APIFailedResponse as e:
|
||||
return self.module.fail_json(
|
||||
msg='Unable to delete alert policy id "{0}". {1}'.format(
|
||||
policy_id, str(e.response_text)))
|
||||
return result
|
||||
|
||||
def _alert_policy_exists(self, policy_name):
|
||||
"""
|
||||
Check to see if an alert policy exists
|
||||
:param policy_name: name of the alert policy
|
||||
:return: boolean of if the policy exists
|
||||
"""
|
||||
result = False
|
||||
for policy_id in self.policy_dict:
|
||||
if self.policy_dict.get(policy_id).get('name') == policy_name:
|
||||
result = self.policy_dict.get(policy_id)
|
||||
return result
|
||||
|
||||
def _get_alert_policy_id(self, module, alert_policy_name):
|
||||
"""
|
||||
retrieves the alert policy id of the account based on the name of the policy
|
||||
:param module: the AnsibleModule object
|
||||
:param alert_policy_name: the alert policy name
|
||||
:return: alert_policy_id: The alert policy id
|
||||
"""
|
||||
alert_policy_id = None
|
||||
for policy_id in self.policy_dict:
|
||||
if self.policy_dict.get(policy_id).get('name') == alert_policy_name:
|
||||
if not alert_policy_id:
|
||||
alert_policy_id = policy_id
|
||||
else:
|
||||
return module.fail_json(
|
||||
msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
|
||||
return alert_policy_id
|
||||
|
||||
@staticmethod
|
||||
def _set_user_agent(clc):
|
||||
if hasattr(clc, 'SetRequestsSession'):
|
||||
agent_string = "ClcAnsibleModule/" + __version__
|
||||
ses = requests.Session()
|
||||
ses.headers.update({"Api-Client": agent_string})
|
||||
ses.headers['User-Agent'] += " " + agent_string
|
||||
clc.SetRequestsSession(ses)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
The main function. Instantiates the module and calls process_request.
|
||||
:return: none
|
||||
"""
|
||||
argument_dict = ClcAlertPolicy._define_module_argument_spec()
|
||||
module = AnsibleModule(supports_check_mode=True, **argument_dict)
|
||||
clc_alert_policy = ClcAlertPolicy(module)
|
||||
clc_alert_policy.process_request()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,302 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2015 CenturyLink
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_blueprint_package
|
||||
short_description: Deploys a blue print package on a set of servers in CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
|
||||
options:
|
||||
server_ids:
|
||||
description:
|
||||
- A list of server Ids to deploy the blue print package.
|
||||
type: list
|
||||
required: true
|
||||
elements: str
|
||||
package_id:
|
||||
description:
|
||||
- The package id of the blue print.
|
||||
type: str
|
||||
required: true
|
||||
package_params:
|
||||
description:
|
||||
- The dictionary of arguments required to deploy the blue print.
|
||||
type: dict
|
||||
default: {}
|
||||
required: false
|
||||
state:
|
||||
description:
|
||||
- Whether to install or uninstall the package. Currently it supports only "present" for install action.
|
||||
type: str
|
||||
required: false
|
||||
default: present
|
||||
choices: ['present']
|
||||
wait:
|
||||
description:
|
||||
- Whether to wait for the tasks to finish before returning.
|
||||
type: str
|
||||
default: 'True'
|
||||
required: false
|
||||
requirements:
|
||||
- python = 2.7
|
||||
- requests >= 2.5.0
|
||||
- clc-sdk
|
||||
author: "CLC Runner (@clc-runner)"
|
||||
notes:
|
||||
- To use this module, it is required to set the below environment variables which enables access to the
|
||||
Centurylink Cloud
|
||||
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
|
||||
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
|
||||
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
|
||||
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
|
||||
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
|
||||
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
|
||||
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
|
||||
|
||||
- name: Deploy package
|
||||
community.general.clc_blueprint_package:
|
||||
server_ids:
|
||||
- UC1TEST-SERVER1
|
||||
- UC1TEST-SERVER2
|
||||
package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a
|
||||
package_params: {}
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
server_ids:
|
||||
description: The list of server ids that are changed
|
||||
returned: success
|
||||
type: list
|
||||
sample:
|
||||
[
|
||||
"UC1TEST-SERVER1",
|
||||
"UC1TEST-SERVER2"
|
||||
]
|
||||
'''
|
||||
|
||||
__version__ = '${version}'
|
||||
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
REQUESTS_FOUND = False
|
||||
else:
|
||||
REQUESTS_FOUND = True
|
||||
|
||||
#
|
||||
# Requires the clc-python-sdk.
|
||||
# sudo pip install clc-sdk
|
||||
#
|
||||
CLC_IMP_ERR = None
|
||||
try:
|
||||
import clc as clc_sdk
|
||||
from clc import CLCException
|
||||
except ImportError:
|
||||
CLC_IMP_ERR = traceback.format_exc()
|
||||
CLC_FOUND = False
|
||||
clc_sdk = None
|
||||
else:
|
||||
CLC_FOUND = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
class ClcBlueprintPackage:
|
||||
|
||||
clc = clc_sdk
|
||||
module = None
|
||||
|
||||
def __init__(self, module):
|
||||
"""
|
||||
Construct module
|
||||
"""
|
||||
self.module = module
|
||||
if not CLC_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
self._set_user_agent(self.clc)
|
||||
|
||||
def process_request(self):
|
||||
"""
|
||||
Process the request - Main Code Path
|
||||
:return: Returns with either an exit_json or fail_json
|
||||
"""
|
||||
p = self.module.params
|
||||
changed = False
|
||||
changed_server_ids = []
|
||||
self._set_clc_credentials_from_env()
|
||||
server_ids = p['server_ids']
|
||||
package_id = p['package_id']
|
||||
package_params = p['package_params']
|
||||
state = p['state']
|
||||
if state == 'present':
|
||||
changed, changed_server_ids, request_list = self.ensure_package_installed(
|
||||
server_ids, package_id, package_params)
|
||||
self._wait_for_requests_to_complete(request_list)
|
||||
self.module.exit_json(changed=changed, server_ids=changed_server_ids)
|
||||
|
||||
@staticmethod
|
||||
def define_argument_spec():
|
||||
"""
|
||||
This function defines the dictionary object required for
|
||||
package module
|
||||
:return: the package dictionary object
|
||||
"""
|
||||
argument_spec = dict(
|
||||
server_ids=dict(type='list', elements='str', required=True),
|
||||
package_id=dict(required=True),
|
||||
package_params=dict(type='dict', default={}),
|
||||
wait=dict(default=True), # @FIXME should be bool?
|
||||
state=dict(default='present', choices=['present'])
|
||||
)
|
||||
return argument_spec
|
||||
|
||||
def ensure_package_installed(self, server_ids, package_id, package_params):
|
||||
"""
|
||||
Ensure the package is installed in the given list of servers
|
||||
:param server_ids: the server list where the package needs to be installed
|
||||
:param package_id: the blueprint package id
|
||||
:param package_params: the package arguments
|
||||
:return: (changed, server_ids, request_list)
|
||||
changed: A flag indicating if a change was made
|
||||
server_ids: The list of servers modified
|
||||
request_list: The list of request objects from clc-sdk
|
||||
"""
|
||||
changed = False
|
||||
request_list = []
|
||||
servers = self._get_servers_from_clc(
|
||||
server_ids,
|
||||
'Failed to get servers from CLC')
|
||||
for server in servers:
|
||||
if not self.module.check_mode:
|
||||
request = self.clc_install_package(
|
||||
server,
|
||||
package_id,
|
||||
package_params)
|
||||
request_list.append(request)
|
||||
changed = True
|
||||
return changed, server_ids, request_list
|
||||
|
||||
def clc_install_package(self, server, package_id, package_params):
|
||||
"""
|
||||
Install the package to a given clc server
|
||||
:param server: The server object where the package needs to be installed
|
||||
:param package_id: The blue print package id
|
||||
:param package_params: the required argument dict for the package installation
|
||||
:return: The result object from the CLC API call
|
||||
"""
|
||||
result = None
|
||||
try:
|
||||
result = server.ExecutePackage(
|
||||
package_id=package_id,
|
||||
parameters=package_params)
|
||||
except CLCException as ex:
|
||||
self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format(
|
||||
package_id, server.id, ex.message
|
||||
))
|
||||
return result
|
||||
|
||||
def _wait_for_requests_to_complete(self, request_lst):
|
||||
"""
|
||||
Waits until the CLC requests are complete if the wait argument is True
|
||||
:param request_lst: The list of CLC request objects
|
||||
:return: none
|
||||
"""
|
||||
if not self.module.params['wait']:
|
||||
return
|
||||
for request in request_lst:
|
||||
request.WaitUntilComplete()
|
||||
for request_details in request.requests:
|
||||
if request_details.Status() != 'succeeded':
|
||||
self.module.fail_json(
|
||||
msg='Unable to process package install request')
|
||||
|
||||
def _get_servers_from_clc(self, server_list, message):
|
||||
"""
|
||||
Internal function to fetch list of CLC server objects from a list of server ids
|
||||
:param server_list: the list of server ids
|
||||
:param message: the error message to raise if there is any error
|
||||
:return the list of CLC server objects
|
||||
"""
|
||||
try:
|
||||
return self.clc.v2.Servers(server_list).servers
|
||||
except CLCException as ex:
|
||||
self.module.fail_json(msg=message + ': %s' % ex)
|
||||
|
||||
def _set_clc_credentials_from_env(self):
|
||||
"""
|
||||
Set the CLC Credentials on the sdk by reading environment variables
|
||||
:return: none
|
||||
"""
|
||||
env = os.environ
|
||||
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
|
||||
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
|
||||
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
|
||||
clc_alias = env.get('CLC_ACCT_ALIAS', False)
|
||||
api_url = env.get('CLC_V2_API_URL', False)
|
||||
|
||||
if api_url:
|
||||
self.clc.defaults.ENDPOINT_URL_V2 = api_url
|
||||
|
||||
if v2_api_token and clc_alias:
|
||||
self.clc._LOGIN_TOKEN_V2 = v2_api_token
|
||||
self.clc._V2_ENABLED = True
|
||||
self.clc.ALIAS = clc_alias
|
||||
elif v2_api_username and v2_api_passwd:
|
||||
self.clc.v2.SetCredentials(
|
||||
api_username=v2_api_username,
|
||||
api_passwd=v2_api_passwd)
|
||||
else:
|
||||
return self.module.fail_json(
|
||||
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
|
||||
"environment variables")
|
||||
|
||||
@staticmethod
|
||||
def _set_user_agent(clc):
|
||||
if hasattr(clc, 'SetRequestsSession'):
|
||||
agent_string = "ClcAnsibleModule/" + __version__
|
||||
ses = requests.Session()
|
||||
ses.headers.update({"Api-Client": agent_string})
|
||||
ses.headers['User-Agent'] += " " + agent_string
|
||||
clc.SetRequestsSession(ses)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function
|
||||
:return: None
|
||||
"""
|
||||
module = AnsibleModule(
|
||||
argument_spec=ClcBlueprintPackage.define_argument_spec(),
|
||||
supports_check_mode=True
|
||||
)
|
||||
clc_blueprint_package = ClcBlueprintPackage(module)
|
||||
clc_blueprint_package.process_request()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,589 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2015 CenturyLink
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_firewall_policy
|
||||
short_description: Create/delete/update firewall policies
|
||||
description:
|
||||
- Create or delete or update firewall policies on Centurylink Cloud
|
||||
options:
|
||||
location:
|
||||
description:
|
||||
- Target datacenter for the firewall policy
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Whether to create or delete the firewall policy
|
||||
type: str
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
source:
|
||||
description:
|
||||
- The list of source addresses for traffic on the originating firewall.
|
||||
This is required when state is 'present'
|
||||
type: list
|
||||
elements: str
|
||||
destination:
|
||||
description:
|
||||
- The list of destination addresses for traffic on the terminating firewall.
|
||||
This is required when state is 'present'
|
||||
type: list
|
||||
elements: str
|
||||
ports:
|
||||
description:
|
||||
- The list of ports associated with the policy.
|
||||
TCP and UDP can take in single ports or port ranges.
|
||||
- "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])."
|
||||
type: list
|
||||
elements: str
|
||||
firewall_policy_id:
|
||||
description:
|
||||
- Id of the firewall policy. This is required to update or delete an existing firewall policy
|
||||
type: str
|
||||
source_account_alias:
|
||||
description:
|
||||
- CLC alias for the source account
|
||||
type: str
|
||||
required: true
|
||||
destination_account_alias:
|
||||
description:
|
||||
- CLC alias for the destination account
|
||||
type: str
|
||||
wait:
|
||||
description:
|
||||
- Whether to wait for the provisioning tasks to finish before returning.
|
||||
type: str
|
||||
default: 'True'
|
||||
enabled:
|
||||
description:
|
||||
- Whether the firewall policy is enabled or disabled
|
||||
type: str
|
||||
choices: ['True', 'False']
|
||||
default: 'True'
|
||||
requirements:
|
||||
- python = 2.7
|
||||
- requests >= 2.5.0
|
||||
- clc-sdk
|
||||
author: "CLC Runner (@clc-runner)"
|
||||
notes:
|
||||
- To use this module, it is required to set the below environment variables which enables access to the
|
||||
Centurylink Cloud
|
||||
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
|
||||
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
|
||||
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
|
||||
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
|
||||
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
|
||||
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
|
||||
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
- name: Create Firewall Policy
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Create / Verify an Firewall Policy at CenturyLink Cloud
|
||||
clc_firewall:
|
||||
source_account_alias: WFAD
|
||||
location: VA1
|
||||
state: present
|
||||
source: 10.128.216.0/24
|
||||
destination: 10.128.216.0/24
|
||||
ports: Any
|
||||
destination_account_alias: WFAD
|
||||
|
||||
- name: Delete Firewall Policy
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Delete an Firewall Policy at CenturyLink Cloud
|
||||
clc_firewall:
|
||||
source_account_alias: WFAD
|
||||
location: VA1
|
||||
state: absent
|
||||
firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
firewall_policy_id:
|
||||
description: The fire wall policy id
|
||||
returned: success
|
||||
type: str
|
||||
sample: fc36f1bfd47242e488a9c44346438c05
|
||||
firewall_policy:
|
||||
description: The fire wall policy information
|
||||
returned: success
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"destination":[
|
||||
"10.1.1.0/24",
|
||||
"10.2.2.0/24"
|
||||
],
|
||||
"destinationAccount":"wfad",
|
||||
"enabled":true,
|
||||
"id":"fc36f1bfd47242e488a9c44346438c05",
|
||||
"links":[
|
||||
{
|
||||
"href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
|
||||
"rel":"self",
|
||||
"verbs":[
|
||||
"GET",
|
||||
"PUT",
|
||||
"DELETE"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ports":[
|
||||
"any"
|
||||
],
|
||||
"source":[
|
||||
"10.1.1.0/24",
|
||||
"10.2.2.0/24"
|
||||
],
|
||||
"status":"active"
|
||||
}
|
||||
'''
|
||||
|
||||
__version__ = '${version}'
|
||||
|
||||
import os
|
||||
import traceback
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
from time import sleep
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
REQUESTS_FOUND = False
|
||||
else:
|
||||
REQUESTS_FOUND = True
|
||||
|
||||
CLC_IMP_ERR = None
|
||||
try:
|
||||
import clc as clc_sdk
|
||||
from clc import APIFailedResponse
|
||||
except ImportError:
|
||||
CLC_IMP_ERR = traceback.format_exc()
|
||||
CLC_FOUND = False
|
||||
clc_sdk = None
|
||||
else:
|
||||
CLC_FOUND = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
class ClcFirewallPolicy:
|
||||
|
||||
clc = None
|
||||
|
||||
def __init__(self, module):
|
||||
"""
|
||||
Construct module
|
||||
"""
|
||||
self.clc = clc_sdk
|
||||
self.module = module
|
||||
self.firewall_dict = {}
|
||||
|
||||
if not CLC_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
self._set_user_agent(self.clc)
|
||||
|
||||
@staticmethod
|
||||
def _define_module_argument_spec():
|
||||
"""
|
||||
Define the argument spec for the ansible module
|
||||
:return: argument spec dictionary
|
||||
"""
|
||||
argument_spec = dict(
|
||||
location=dict(required=True),
|
||||
source_account_alias=dict(required=True),
|
||||
destination_account_alias=dict(),
|
||||
firewall_policy_id=dict(),
|
||||
ports=dict(type='list', elements='str'),
|
||||
source=dict(type='list', elements='str'),
|
||||
destination=dict(type='list', elements='str'),
|
||||
wait=dict(default=True), # @FIXME type=bool
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
enabled=dict(default=True, choices=[True, False])
|
||||
)
|
||||
return argument_spec
|
||||
|
||||
def process_request(self):
|
||||
"""
|
||||
Execute the main code path, and handle the request
|
||||
:return: none
|
||||
"""
|
||||
changed = False
|
||||
firewall_policy = None
|
||||
location = self.module.params.get('location')
|
||||
source_account_alias = self.module.params.get('source_account_alias')
|
||||
destination_account_alias = self.module.params.get(
|
||||
'destination_account_alias')
|
||||
firewall_policy_id = self.module.params.get('firewall_policy_id')
|
||||
ports = self.module.params.get('ports')
|
||||
source = self.module.params.get('source')
|
||||
destination = self.module.params.get('destination')
|
||||
wait = self.module.params.get('wait')
|
||||
state = self.module.params.get('state')
|
||||
enabled = self.module.params.get('enabled')
|
||||
|
||||
self.firewall_dict = {
|
||||
'location': location,
|
||||
'source_account_alias': source_account_alias,
|
||||
'destination_account_alias': destination_account_alias,
|
||||
'firewall_policy_id': firewall_policy_id,
|
||||
'ports': ports,
|
||||
'source': source,
|
||||
'destination': destination,
|
||||
'wait': wait,
|
||||
'state': state,
|
||||
'enabled': enabled}
|
||||
|
||||
self._set_clc_credentials_from_env()
|
||||
|
||||
if state == 'absent':
|
||||
changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
|
||||
source_account_alias, location, self.firewall_dict)
|
||||
|
||||
elif state == 'present':
|
||||
changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
|
||||
source_account_alias, location, self.firewall_dict)
|
||||
|
||||
return self.module.exit_json(
|
||||
changed=changed,
|
||||
firewall_policy_id=firewall_policy_id,
|
||||
firewall_policy=firewall_policy)
|
||||
|
||||
@staticmethod
|
||||
def _get_policy_id_from_response(response):
|
||||
"""
|
||||
Method to parse out the policy id from creation response
|
||||
:param response: response from firewall creation API call
|
||||
:return: policy_id: firewall policy id from creation call
|
||||
"""
|
||||
url = response.get('links')[0]['href']
|
||||
path = urlparse(url).path
|
||||
path_list = os.path.split(path)
|
||||
policy_id = path_list[-1]
|
||||
return policy_id
|
||||
|
||||
def _set_clc_credentials_from_env(self):
|
||||
"""
|
||||
Set the CLC Credentials on the sdk by reading environment variables
|
||||
:return: none
|
||||
"""
|
||||
env = os.environ
|
||||
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
|
||||
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
|
||||
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
|
||||
clc_alias = env.get('CLC_ACCT_ALIAS', False)
|
||||
api_url = env.get('CLC_V2_API_URL', False)
|
||||
|
||||
if api_url:
|
||||
self.clc.defaults.ENDPOINT_URL_V2 = api_url
|
||||
|
||||
if v2_api_token and clc_alias:
|
||||
self.clc._LOGIN_TOKEN_V2 = v2_api_token
|
||||
self.clc._V2_ENABLED = True
|
||||
self.clc.ALIAS = clc_alias
|
||||
elif v2_api_username and v2_api_passwd:
|
||||
self.clc.v2.SetCredentials(
|
||||
api_username=v2_api_username,
|
||||
api_passwd=v2_api_passwd)
|
||||
else:
|
||||
return self.module.fail_json(
|
||||
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
|
||||
"environment variables")
|
||||
|
||||
def _ensure_firewall_policy_is_present(
|
||||
self,
|
||||
source_account_alias,
|
||||
location,
|
||||
firewall_dict):
|
||||
"""
|
||||
Ensures that a given firewall policy is present
|
||||
:param source_account_alias: the source account alias for the firewall policy
|
||||
:param location: datacenter of the firewall policy
|
||||
:param firewall_dict: dictionary of request parameters for firewall policy
|
||||
:return: (changed, firewall_policy_id, firewall_policy)
|
||||
changed: flag for if a change occurred
|
||||
firewall_policy_id: the firewall policy id that was created/updated
|
||||
firewall_policy: The firewall_policy object
|
||||
"""
|
||||
firewall_policy = None
|
||||
firewall_policy_id = firewall_dict.get('firewall_policy_id')
|
||||
|
||||
if firewall_policy_id is None:
|
||||
if not self.module.check_mode:
|
||||
response = self._create_firewall_policy(
|
||||
source_account_alias,
|
||||
location,
|
||||
firewall_dict)
|
||||
firewall_policy_id = self._get_policy_id_from_response(
|
||||
response)
|
||||
changed = True
|
||||
else:
|
||||
firewall_policy = self._get_firewall_policy(
|
||||
source_account_alias, location, firewall_policy_id)
|
||||
if not firewall_policy:
|
||||
return self.module.fail_json(
|
||||
msg='Unable to find the firewall policy id : {0}'.format(
|
||||
firewall_policy_id))
|
||||
changed = self._compare_get_request_with_dict(
|
||||
firewall_policy,
|
||||
firewall_dict)
|
||||
if not self.module.check_mode and changed:
|
||||
self._update_firewall_policy(
|
||||
source_account_alias,
|
||||
location,
|
||||
firewall_policy_id,
|
||||
firewall_dict)
|
||||
if changed and firewall_policy_id:
|
||||
firewall_policy = self._wait_for_requests_to_complete(
|
||||
source_account_alias,
|
||||
location,
|
||||
firewall_policy_id)
|
||||
return changed, firewall_policy_id, firewall_policy
|
||||
|
||||
def _ensure_firewall_policy_is_absent(
|
||||
self,
|
||||
source_account_alias,
|
||||
location,
|
||||
firewall_dict):
|
||||
"""
|
||||
Ensures that a given firewall policy is removed if present
|
||||
:param source_account_alias: the source account alias for the firewall policy
|
||||
:param location: datacenter of the firewall policy
|
||||
:param firewall_dict: firewall policy to delete
|
||||
:return: (changed, firewall_policy_id, response)
|
||||
changed: flag for if a change occurred
|
||||
firewall_policy_id: the firewall policy id that was deleted
|
||||
response: response from CLC API call
|
||||
"""
|
||||
changed = False
|
||||
response = []
|
||||
firewall_policy_id = firewall_dict.get('firewall_policy_id')
|
||||
result = self._get_firewall_policy(
|
||||
source_account_alias, location, firewall_policy_id)
|
||||
if result:
|
||||
if not self.module.check_mode:
|
||||
response = self._delete_firewall_policy(
|
||||
source_account_alias,
|
||||
location,
|
||||
firewall_policy_id)
|
||||
changed = True
|
||||
return changed, firewall_policy_id, response
|
||||
|
||||
def _create_firewall_policy(
|
||||
self,
|
||||
source_account_alias,
|
||||
location,
|
||||
firewall_dict):
|
||||
"""
|
||||
Creates the firewall policy for the given account alias
|
||||
:param source_account_alias: the source account alias for the firewall policy
|
||||
:param location: datacenter of the firewall policy
|
||||
:param firewall_dict: dictionary of request parameters for firewall policy
|
||||
:return: response from CLC API call
|
||||
"""
|
||||
payload = {
|
||||
'destinationAccount': firewall_dict.get('destination_account_alias'),
|
||||
'source': firewall_dict.get('source'),
|
||||
'destination': firewall_dict.get('destination'),
|
||||
'ports': firewall_dict.get('ports')}
|
||||
try:
|
||||
response = self.clc.v2.API.Call(
|
||||
'POST', '/v2-experimental/firewallPolicies/%s/%s' %
|
||||
(source_account_alias, location), payload)
|
||||
except APIFailedResponse as e:
|
||||
return self.module.fail_json(
|
||||
msg="Unable to create firewall policy. %s" %
|
||||
str(e.response_text))
|
||||
return response
|
||||
|
||||
def _delete_firewall_policy(
|
||||
self,
|
||||
source_account_alias,
|
||||
location,
|
||||
firewall_policy_id):
|
||||
"""
|
||||
Deletes a given firewall policy for an account alias in a datacenter
|
||||
:param source_account_alias: the source account alias for the firewall policy
|
||||
:param location: datacenter of the firewall policy
|
||||
:param firewall_policy_id: firewall policy id to delete
|
||||
:return: response: response from CLC API call
|
||||
"""
|
||||
try:
|
||||
response = self.clc.v2.API.Call(
|
||||
'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
|
||||
(source_account_alias, location, firewall_policy_id))
|
||||
except APIFailedResponse as e:
|
||||
return self.module.fail_json(
|
||||
msg="Unable to delete the firewall policy id : {0}. {1}".format(
|
||||
firewall_policy_id, str(e.response_text)))
|
||||
return response
|
||||
|
||||
def _update_firewall_policy(
|
||||
self,
|
||||
source_account_alias,
|
||||
location,
|
||||
firewall_policy_id,
|
||||
firewall_dict):
|
||||
"""
|
||||
Updates a firewall policy for a given datacenter and account alias
|
||||
:param source_account_alias: the source account alias for the firewall policy
|
||||
:param location: datacenter of the firewall policy
|
||||
:param firewall_policy_id: firewall policy id to update
|
||||
:param firewall_dict: dictionary of request parameters for firewall policy
|
||||
:return: response: response from CLC API call
|
||||
"""
|
||||
try:
|
||||
response = self.clc.v2.API.Call(
|
||||
'PUT',
|
||||
'/v2-experimental/firewallPolicies/%s/%s/%s' %
|
||||
(source_account_alias,
|
||||
location,
|
||||
firewall_policy_id),
|
||||
firewall_dict)
|
||||
except APIFailedResponse as e:
|
||||
return self.module.fail_json(
|
||||
msg="Unable to update the firewall policy id : {0}. {1}".format(
|
||||
firewall_policy_id, str(e.response_text)))
|
||||
return response
|
||||
|
||||
@staticmethod
|
||||
def _compare_get_request_with_dict(response, firewall_dict):
|
||||
"""
|
||||
Helper method to compare the json response for getting the firewall policy with the request parameters
|
||||
:param response: response from the get method
|
||||
:param firewall_dict: dictionary of request parameters for firewall policy
|
||||
:return: changed: Boolean that returns true if there are differences between
|
||||
the response parameters and the playbook parameters
|
||||
"""
|
||||
|
||||
changed = False
|
||||
|
||||
response_dest_account_alias = response.get('destinationAccount')
|
||||
response_enabled = response.get('enabled')
|
||||
response_source = response.get('source')
|
||||
response_dest = response.get('destination')
|
||||
response_ports = response.get('ports')
|
||||
request_dest_account_alias = firewall_dict.get(
|
||||
'destination_account_alias')
|
||||
request_enabled = firewall_dict.get('enabled')
|
||||
if request_enabled is None:
|
||||
request_enabled = True
|
||||
request_source = firewall_dict.get('source')
|
||||
request_dest = firewall_dict.get('destination')
|
||||
request_ports = firewall_dict.get('ports')
|
||||
|
||||
if (
|
||||
response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
|
||||
response_enabled != request_enabled) or (
|
||||
response_source and response_source != request_source) or (
|
||||
response_dest and response_dest != request_dest) or (
|
||||
response_ports and response_ports != request_ports):
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
def _get_firewall_policy(
|
||||
self,
|
||||
source_account_alias,
|
||||
location,
|
||||
firewall_policy_id):
|
||||
"""
|
||||
Get back details for a particular firewall policy
|
||||
:param source_account_alias: the source account alias for the firewall policy
|
||||
:param location: datacenter of the firewall policy
|
||||
:param firewall_policy_id: id of the firewall policy to get
|
||||
:return: response - The response from CLC API call
|
||||
"""
|
||||
response = None
|
||||
try:
|
||||
response = self.clc.v2.API.Call(
|
||||
'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
|
||||
(source_account_alias, location, firewall_policy_id))
|
||||
except APIFailedResponse as e:
|
||||
if e.response_status_code != 404:
|
||||
self.module.fail_json(
|
||||
msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
|
||||
firewall_policy_id, str(e.response_text)))
|
||||
return response
|
||||
|
||||
def _wait_for_requests_to_complete(
|
||||
self,
|
||||
source_account_alias,
|
||||
location,
|
||||
firewall_policy_id,
|
||||
wait_limit=50):
|
||||
"""
|
||||
Waits until the CLC requests are complete if the wait argument is True
|
||||
:param source_account_alias: The source account alias for the firewall policy
|
||||
:param location: datacenter of the firewall policy
|
||||
:param firewall_policy_id: The firewall policy id
|
||||
:param wait_limit: The number of times to check the status for completion
|
||||
:return: the firewall_policy object
|
||||
"""
|
||||
wait = self.module.params.get('wait')
|
||||
count = 0
|
||||
firewall_policy = None
|
||||
while wait:
|
||||
count += 1
|
||||
firewall_policy = self._get_firewall_policy(
|
||||
source_account_alias, location, firewall_policy_id)
|
||||
status = firewall_policy.get('status')
|
||||
if status == 'active' or count > wait_limit:
|
||||
wait = False
|
||||
else:
|
||||
# wait for 2 seconds
|
||||
sleep(2)
|
||||
return firewall_policy
|
||||
|
||||
@staticmethod
|
||||
def _set_user_agent(clc):
|
||||
if hasattr(clc, 'SetRequestsSession'):
|
||||
agent_string = "ClcAnsibleModule/" + __version__
|
||||
ses = requests.Session()
|
||||
ses.headers.update({"Api-Client": agent_string})
|
||||
ses.headers['User-Agent'] += " " + agent_string
|
||||
clc.SetRequestsSession(ses)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
The main function. Instantiates the module and calls process_request.
|
||||
:return: none
|
||||
"""
|
||||
module = AnsibleModule(
|
||||
argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
|
||||
supports_check_mode=True)
|
||||
|
||||
clc_firewall = ClcFirewallPolicy(module)
|
||||
clc_firewall.process_request()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,515 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
#
|
||||
# Copyright (c) 2015 CenturyLink
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_group
|
||||
short_description: Create/delete Server Groups at Centurylink Cloud
|
||||
description:
|
||||
- Create or delete Server Groups at Centurylink Centurylink Cloud
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the Server Group
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
description:
|
||||
- A description of the Server Group
|
||||
type: str
|
||||
required: false
|
||||
parent:
|
||||
description:
|
||||
- The parent group of the server group. If parent is not provided, it creates the group at top level.
|
||||
type: str
|
||||
required: false
|
||||
location:
|
||||
description:
|
||||
- Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
|
||||
associated with the account
|
||||
type: str
|
||||
required: false
|
||||
state:
|
||||
description:
|
||||
- Whether to create or delete the group
|
||||
type: str
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
wait:
|
||||
description:
|
||||
- Whether to wait for the tasks to finish before returning.
|
||||
type: bool
|
||||
default: true
|
||||
required: false
|
||||
requirements:
|
||||
- python = 2.7
|
||||
- requests >= 2.5.0
|
||||
- clc-sdk
|
||||
author: "CLC Runner (@clc-runner)"
|
||||
notes:
|
||||
- To use this module, it is required to set the below environment variables which enables access to the
|
||||
Centurylink Cloud
|
||||
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
|
||||
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
|
||||
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
|
||||
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
|
||||
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
|
||||
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
|
||||
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
# Create a Server Group
|
||||
|
||||
---
|
||||
- name: Create Server Group
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Create / Verify a Server Group at CenturyLink Cloud
|
||||
community.general.clc_group:
|
||||
name: My Cool Server Group
|
||||
parent: Default Group
|
||||
state: present
|
||||
register: clc
|
||||
|
||||
- name: Debug
|
||||
ansible.builtin.debug:
|
||||
var: clc
|
||||
|
||||
# Delete a Server Group
|
||||
- name: Delete Server Group
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Delete / Verify Absent a Server Group at CenturyLink Cloud
|
||||
community.general.clc_group:
|
||||
name: My Cool Server Group
|
||||
parent: Default Group
|
||||
state: absent
|
||||
register: clc
|
||||
|
||||
- name: Debug
|
||||
ansible.builtin.debug:
|
||||
var: clc
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
group:
|
||||
description: The group information
|
||||
returned: success
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"changeInfo":{
|
||||
"createdBy":"service.wfad",
|
||||
"createdDate":"2015-07-29T18:52:47Z",
|
||||
"modifiedBy":"service.wfad",
|
||||
"modifiedDate":"2015-07-29T18:52:47Z"
|
||||
},
|
||||
"customFields":[
|
||||
|
||||
],
|
||||
"description":"test group",
|
||||
"groups":[
|
||||
|
||||
],
|
||||
"id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
|
||||
"links":[
|
||||
{
|
||||
"href":"/v2/groups/wfad",
|
||||
"rel":"createGroup",
|
||||
"verbs":[
|
||||
"POST"
|
||||
]
|
||||
},
|
||||
{
|
||||
"href":"/v2/servers/wfad",
|
||||
"rel":"createServer",
|
||||
"verbs":[
|
||||
"POST"
|
||||
]
|
||||
},
|
||||
{
|
||||
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
|
||||
"rel":"self",
|
||||
"verbs":[
|
||||
"GET",
|
||||
"PATCH",
|
||||
"DELETE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
|
||||
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
|
||||
"rel":"parentGroup"
|
||||
},
|
||||
{
|
||||
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
|
||||
"rel":"defaults",
|
||||
"verbs":[
|
||||
"GET",
|
||||
"POST"
|
||||
]
|
||||
},
|
||||
{
|
||||
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
|
||||
"rel":"billing"
|
||||
},
|
||||
{
|
||||
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
|
||||
"rel":"archiveGroupAction"
|
||||
},
|
||||
{
|
||||
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
|
||||
"rel":"statistics"
|
||||
},
|
||||
{
|
||||
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
|
||||
"rel":"upcomingScheduledActivities"
|
||||
},
|
||||
{
|
||||
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
|
||||
"rel":"horizontalAutoscalePolicyMapping",
|
||||
"verbs":[
|
||||
"GET",
|
||||
"PUT",
|
||||
"DELETE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
|
||||
"rel":"scheduledActivities",
|
||||
"verbs":[
|
||||
"GET",
|
||||
"POST"
|
||||
]
|
||||
}
|
||||
],
|
||||
"locationId":"UC1",
|
||||
"name":"test group",
|
||||
"status":"active",
|
||||
"type":"default"
|
||||
}
|
||||
'''
|
||||
|
||||
__version__ = '${version}'
|
||||
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
REQUESTS_FOUND = False
|
||||
else:
|
||||
REQUESTS_FOUND = True
|
||||
|
||||
#
|
||||
# Requires the clc-python-sdk.
|
||||
# sudo pip install clc-sdk
|
||||
#
|
||||
CLC_IMP_ERR = None
|
||||
try:
|
||||
import clc as clc_sdk
|
||||
from clc import CLCException
|
||||
except ImportError:
|
||||
CLC_IMP_ERR = traceback.format_exc()
|
||||
CLC_FOUND = False
|
||||
clc_sdk = None
|
||||
else:
|
||||
CLC_FOUND = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
class ClcGroup(object):
|
||||
|
||||
clc = None
|
||||
root_group = None
|
||||
|
||||
def __init__(self, module):
|
||||
"""
|
||||
Construct module
|
||||
"""
|
||||
self.clc = clc_sdk
|
||||
self.module = module
|
||||
self.group_dict = {}
|
||||
|
||||
if not CLC_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
self._set_user_agent(self.clc)
|
||||
|
||||
def process_request(self):
|
||||
"""
|
||||
Execute the main code path, and handle the request
|
||||
:return: none
|
||||
"""
|
||||
location = self.module.params.get('location')
|
||||
group_name = self.module.params.get('name')
|
||||
parent_name = self.module.params.get('parent')
|
||||
group_description = self.module.params.get('description')
|
||||
state = self.module.params.get('state')
|
||||
|
||||
self._set_clc_credentials_from_env()
|
||||
self.group_dict = self._get_group_tree_for_datacenter(
|
||||
datacenter=location)
|
||||
|
||||
if state == "absent":
|
||||
changed, group, requests = self._ensure_group_is_absent(
|
||||
group_name=group_name, parent_name=parent_name)
|
||||
if requests:
|
||||
self._wait_for_requests_to_complete(requests)
|
||||
else:
|
||||
changed, group = self._ensure_group_is_present(
|
||||
group_name=group_name, parent_name=parent_name, group_description=group_description)
|
||||
try:
|
||||
group = group.data
|
||||
except AttributeError:
|
||||
group = group_name
|
||||
self.module.exit_json(changed=changed, group=group)
|
||||
|
||||
@staticmethod
|
||||
def _define_module_argument_spec():
|
||||
"""
|
||||
Define the argument spec for the ansible module
|
||||
:return: argument spec dictionary
|
||||
"""
|
||||
argument_spec = dict(
|
||||
name=dict(required=True),
|
||||
description=dict(),
|
||||
parent=dict(),
|
||||
location=dict(),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
wait=dict(type='bool', default=True))
|
||||
|
||||
return argument_spec
|
||||
|
||||
def _set_clc_credentials_from_env(self):
|
||||
"""
|
||||
Set the CLC Credentials on the sdk by reading environment variables
|
||||
:return: none
|
||||
"""
|
||||
env = os.environ
|
||||
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
|
||||
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
|
||||
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
|
||||
clc_alias = env.get('CLC_ACCT_ALIAS', False)
|
||||
api_url = env.get('CLC_V2_API_URL', False)
|
||||
|
||||
if api_url:
|
||||
self.clc.defaults.ENDPOINT_URL_V2 = api_url
|
||||
|
||||
if v2_api_token and clc_alias:
|
||||
self.clc._LOGIN_TOKEN_V2 = v2_api_token
|
||||
self.clc._V2_ENABLED = True
|
||||
self.clc.ALIAS = clc_alias
|
||||
elif v2_api_username and v2_api_passwd:
|
||||
self.clc.v2.SetCredentials(
|
||||
api_username=v2_api_username,
|
||||
api_passwd=v2_api_passwd)
|
||||
else:
|
||||
return self.module.fail_json(
|
||||
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
|
||||
"environment variables")
|
||||
|
||||
def _ensure_group_is_absent(self, group_name, parent_name):
|
||||
"""
|
||||
Ensure that group_name is absent by deleting it if necessary
|
||||
:param group_name: string - the name of the clc server group to delete
|
||||
:param parent_name: string - the name of the parent group for group_name
|
||||
:return: changed, group
|
||||
"""
|
||||
changed = False
|
||||
group = []
|
||||
results = []
|
||||
|
||||
if self._group_exists(group_name=group_name, parent_name=parent_name):
|
||||
if not self.module.check_mode:
|
||||
group.append(group_name)
|
||||
result = self._delete_group(group_name)
|
||||
results.append(result)
|
||||
changed = True
|
||||
return changed, group, results
|
||||
|
||||
def _delete_group(self, group_name):
|
||||
"""
|
||||
Delete the provided server group
|
||||
:param group_name: string - the server group to delete
|
||||
:return: none
|
||||
"""
|
||||
response = None
|
||||
group, parent = self.group_dict.get(group_name)
|
||||
try:
|
||||
response = group.Delete()
|
||||
except CLCException as ex:
|
||||
self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
|
||||
group_name, ex.response_text
|
||||
))
|
||||
return response
|
||||
|
||||
def _ensure_group_is_present(
|
||||
self,
|
||||
group_name,
|
||||
parent_name,
|
||||
group_description):
|
||||
"""
|
||||
Checks to see if a server group exists, creates it if it doesn't.
|
||||
:param group_name: the name of the group to validate/create
|
||||
:param parent_name: the name of the parent group for group_name
|
||||
:param group_description: a short description of the server group (used when creating)
|
||||
:return: (changed, group) -
|
||||
changed: Boolean- whether a change was made,
|
||||
group: A clc group object for the group
|
||||
"""
|
||||
if not self.root_group:
|
||||
raise AssertionError("Implementation Error: Root Group not set")
|
||||
parent = parent_name if parent_name is not None else self.root_group.name
|
||||
description = group_description
|
||||
changed = False
|
||||
group = group_name
|
||||
|
||||
parent_exists = self._group_exists(group_name=parent, parent_name=None)
|
||||
child_exists = self._group_exists(
|
||||
group_name=group_name,
|
||||
parent_name=parent)
|
||||
|
||||
if parent_exists and child_exists:
|
||||
group, parent = self.group_dict[group_name]
|
||||
changed = False
|
||||
elif parent_exists and not child_exists:
|
||||
if not self.module.check_mode:
|
||||
group = self._create_group(
|
||||
group=group,
|
||||
parent=parent,
|
||||
description=description)
|
||||
changed = True
|
||||
else:
|
||||
self.module.fail_json(
|
||||
msg="parent group: " +
|
||||
parent +
|
||||
" does not exist")
|
||||
|
||||
return changed, group
|
||||
|
||||
def _create_group(self, group, parent, description):
|
||||
"""
|
||||
Create the provided server group
|
||||
:param group: clc_sdk.Group - the group to create
|
||||
:param parent: clc_sdk.Parent - the parent group for {group}
|
||||
:param description: string - a text description of the group
|
||||
:return: clc_sdk.Group - the created group
|
||||
"""
|
||||
response = None
|
||||
(parent, grandparent) = self.group_dict[parent]
|
||||
try:
|
||||
response = parent.Create(name=group, description=description)
|
||||
except CLCException as ex:
|
||||
self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
|
||||
group, ex.response_text))
|
||||
return response
|
||||
|
||||
def _group_exists(self, group_name, parent_name):
|
||||
"""
|
||||
Check to see if a group exists
|
||||
:param group_name: string - the group to check
|
||||
:param parent_name: string - the parent of group_name
|
||||
:return: boolean - whether the group exists
|
||||
"""
|
||||
result = False
|
||||
if group_name in self.group_dict:
|
||||
(group, parent) = self.group_dict[group_name]
|
||||
if parent_name is None or parent_name == parent.name:
|
||||
result = True
|
||||
return result
|
||||
|
||||
def _get_group_tree_for_datacenter(self, datacenter=None):
|
||||
"""
|
||||
Walk the tree of groups for a datacenter
|
||||
:param datacenter: string - the datacenter to walk (ex: 'UC1')
|
||||
:return: a dictionary of groups and parents
|
||||
"""
|
||||
self.root_group = self.clc.v2.Datacenter(
|
||||
location=datacenter).RootGroup()
|
||||
return self._walk_groups_recursive(
|
||||
parent_group=None,
|
||||
child_group=self.root_group)
|
||||
|
||||
def _walk_groups_recursive(self, parent_group, child_group):
|
||||
"""
|
||||
Walk a parent-child tree of groups, starting with the provided child group
|
||||
:param parent_group: clc_sdk.Group - the parent group to start the walk
|
||||
:param child_group: clc_sdk.Group - the child group to start the walk
|
||||
:return: a dictionary of groups and parents
|
||||
"""
|
||||
result = {str(child_group): (child_group, parent_group)}
|
||||
groups = child_group.Subgroups().groups
|
||||
if len(groups) > 0:
|
||||
for group in groups:
|
||||
if group.type != 'default':
|
||||
continue
|
||||
|
||||
result.update(self._walk_groups_recursive(child_group, group))
|
||||
return result
|
||||
|
||||
def _wait_for_requests_to_complete(self, requests_lst):
|
||||
"""
|
||||
Waits until the CLC requests are complete if the wait argument is True
|
||||
:param requests_lst: The list of CLC request objects
|
||||
:return: none
|
||||
"""
|
||||
if not self.module.params['wait']:
|
||||
return
|
||||
for request in requests_lst:
|
||||
request.WaitUntilComplete()
|
||||
for request_details in request.requests:
|
||||
if request_details.Status() != 'succeeded':
|
||||
self.module.fail_json(
|
||||
msg='Unable to process group request')
|
||||
|
||||
@staticmethod
|
||||
def _set_user_agent(clc):
|
||||
if hasattr(clc, 'SetRequestsSession'):
|
||||
agent_string = "ClcAnsibleModule/" + __version__
|
||||
ses = requests.Session()
|
||||
ses.headers.update({"Api-Client": agent_string})
|
||||
ses.headers['User-Agent'] += " " + agent_string
|
||||
clc.SetRequestsSession(ses)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
The main function. Instantiates the module and calls process_request.
|
||||
:return: none
|
||||
"""
|
||||
module = AnsibleModule(
|
||||
argument_spec=ClcGroup._define_module_argument_spec(),
|
||||
supports_check_mode=True)
|
||||
|
||||
clc_group = ClcGroup(module)
|
||||
clc_group.process_request()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,938 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2015 CenturyLink
|
||||
#
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_loadbalancer
|
||||
short_description: Create, Delete shared loadbalancers in CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the loadbalancer
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
description:
|
||||
- A description for the loadbalancer
|
||||
type: str
|
||||
alias:
|
||||
description:
|
||||
- The alias of your CLC Account
|
||||
type: str
|
||||
required: true
|
||||
location:
|
||||
description:
|
||||
- The location of the datacenter where the load balancer resides in
|
||||
type: str
|
||||
required: true
|
||||
method:
|
||||
description:
|
||||
-The balancing method for the load balancer pool
|
||||
type: str
|
||||
choices: ['leastConnection', 'roundRobin']
|
||||
persistence:
|
||||
description:
|
||||
- The persistence method for the load balancer
|
||||
type: str
|
||||
choices: ['standard', 'sticky']
|
||||
port:
|
||||
description:
|
||||
- Port to configure on the public-facing side of the load balancer pool
|
||||
type: str
|
||||
choices: ['80', '443']
|
||||
nodes:
|
||||
description:
|
||||
- A list of nodes that needs to be added to the load balancer pool
|
||||
type: list
|
||||
default: []
|
||||
elements: dict
|
||||
status:
|
||||
description:
|
||||
- The status of the loadbalancer
|
||||
type: str
|
||||
default: enabled
|
||||
choices: ['enabled', 'disabled']
|
||||
state:
|
||||
description:
|
||||
- Whether to create or delete the load balancer pool
|
||||
type: str
|
||||
default: present
|
||||
choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
|
||||
requirements:
|
||||
- python = 2.7
|
||||
- requests >= 2.5.0
|
||||
- clc-sdk
|
||||
author: "CLC Runner (@clc-runner)"
|
||||
notes:
|
||||
- To use this module, it is required to set the below environment variables which enables access to the
|
||||
Centurylink Cloud
|
||||
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
|
||||
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
|
||||
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
|
||||
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
|
||||
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
|
||||
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
|
||||
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
|
||||
- name: Create Loadbalancer
|
||||
hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Actually Create things
|
||||
community.general.clc_loadbalancer:
|
||||
name: test
|
||||
description: test
|
||||
alias: TEST
|
||||
location: WA1
|
||||
port: 443
|
||||
nodes:
|
||||
- ipAddress: 10.11.22.123
|
||||
privatePort: 80
|
||||
state: present
|
||||
|
||||
- name: Add node to an existing loadbalancer pool
|
||||
hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Actually Create things
|
||||
community.general.clc_loadbalancer:
|
||||
name: test
|
||||
description: test
|
||||
alias: TEST
|
||||
location: WA1
|
||||
port: 443
|
||||
nodes:
|
||||
- ipAddress: 10.11.22.234
|
||||
privatePort: 80
|
||||
state: nodes_present
|
||||
|
||||
- name: Remove node from an existing loadbalancer pool
|
||||
hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Actually Create things
|
||||
community.general.clc_loadbalancer:
|
||||
name: test
|
||||
description: test
|
||||
alias: TEST
|
||||
location: WA1
|
||||
port: 443
|
||||
nodes:
|
||||
- ipAddress: 10.11.22.234
|
||||
privatePort: 80
|
||||
state: nodes_absent
|
||||
|
||||
- name: Delete LoadbalancerPool
|
||||
hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Actually Delete things
|
||||
community.general.clc_loadbalancer:
|
||||
name: test
|
||||
description: test
|
||||
alias: TEST
|
||||
location: WA1
|
||||
port: 443
|
||||
nodes:
|
||||
- ipAddress: 10.11.22.123
|
||||
privatePort: 80
|
||||
state: port_absent
|
||||
|
||||
- name: Delete Loadbalancer
|
||||
hosts: localhost
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Actually Delete things
|
||||
community.general.clc_loadbalancer:
|
||||
name: test
|
||||
description: test
|
||||
alias: TEST
|
||||
location: WA1
|
||||
port: 443
|
||||
nodes:
|
||||
- ipAddress: 10.11.22.123
|
||||
privatePort: 80
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
loadbalancer:
|
||||
description: The load balancer result object from CLC
|
||||
returned: success
|
||||
type: dict
|
||||
sample:
|
||||
{
|
||||
"description":"test-lb",
|
||||
"id":"ab5b18cb81e94ab9925b61d1ca043fb5",
|
||||
"ipAddress":"66.150.174.197",
|
||||
"links":[
|
||||
{
|
||||
"href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5",
|
||||
"rel":"self",
|
||||
"verbs":[
|
||||
"GET",
|
||||
"PUT",
|
||||
"DELETE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools",
|
||||
"rel":"pools",
|
||||
"verbs":[
|
||||
"GET",
|
||||
"POST"
|
||||
]
|
||||
}
|
||||
],
|
||||
"name":"test-lb",
|
||||
"pools":[
|
||||
|
||||
],
|
||||
"status":"enabled"
|
||||
}
|
||||
'''
|
||||
|
||||
__version__ = '${version}'
|
||||
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
from time import sleep
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
REQUESTS_FOUND = False
|
||||
else:
|
||||
REQUESTS_FOUND = True
|
||||
|
||||
#
|
||||
# Requires the clc-python-sdk.
|
||||
# sudo pip install clc-sdk
|
||||
#
|
||||
CLC_IMP_ERR = None
|
||||
try:
|
||||
import clc as clc_sdk
|
||||
from clc import APIFailedResponse
|
||||
except ImportError:
|
||||
CLC_IMP_ERR = traceback.format_exc()
|
||||
CLC_FOUND = False
|
||||
clc_sdk = None
|
||||
else:
|
||||
CLC_FOUND = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
class ClcLoadBalancer:
|
||||
|
||||
clc = None
|
||||
|
||||
def __init__(self, module):
|
||||
"""
|
||||
Construct module
|
||||
"""
|
||||
self.clc = clc_sdk
|
||||
self.module = module
|
||||
self.lb_dict = {}
|
||||
|
||||
if not CLC_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
self._set_user_agent(self.clc)
|
||||
|
||||
def process_request(self):
|
||||
"""
|
||||
Execute the main code path, and handle the request
|
||||
:return: none
|
||||
"""
|
||||
changed = False
|
||||
result_lb = None
|
||||
loadbalancer_name = self.module.params.get('name')
|
||||
loadbalancer_alias = self.module.params.get('alias')
|
||||
loadbalancer_location = self.module.params.get('location')
|
||||
loadbalancer_description = self.module.params.get('description')
|
||||
loadbalancer_port = self.module.params.get('port')
|
||||
loadbalancer_method = self.module.params.get('method')
|
||||
loadbalancer_persistence = self.module.params.get('persistence')
|
||||
loadbalancer_nodes = self.module.params.get('nodes')
|
||||
loadbalancer_status = self.module.params.get('status')
|
||||
state = self.module.params.get('state')
|
||||
|
||||
if loadbalancer_description is None:
|
||||
loadbalancer_description = loadbalancer_name
|
||||
|
||||
self._set_clc_credentials_from_env()
|
||||
|
||||
self.lb_dict = self._get_loadbalancer_list(
|
||||
alias=loadbalancer_alias,
|
||||
location=loadbalancer_location)
|
||||
|
||||
if state == 'present':
|
||||
changed, result_lb, lb_id = self.ensure_loadbalancer_present(
|
||||
name=loadbalancer_name,
|
||||
alias=loadbalancer_alias,
|
||||
location=loadbalancer_location,
|
||||
description=loadbalancer_description,
|
||||
status=loadbalancer_status)
|
||||
if loadbalancer_port:
|
||||
changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
|
||||
lb_id=lb_id,
|
||||
alias=loadbalancer_alias,
|
||||
location=loadbalancer_location,
|
||||
method=loadbalancer_method,
|
||||
persistence=loadbalancer_persistence,
|
||||
port=loadbalancer_port)
|
||||
|
||||
if loadbalancer_nodes:
|
||||
changed, result_nodes = self.ensure_lbpool_nodes_set(
|
||||
alias=loadbalancer_alias,
|
||||
location=loadbalancer_location,
|
||||
name=loadbalancer_name,
|
||||
port=loadbalancer_port,
|
||||
nodes=loadbalancer_nodes)
|
||||
elif state == 'absent':
|
||||
changed, result_lb = self.ensure_loadbalancer_absent(
|
||||
name=loadbalancer_name,
|
||||
alias=loadbalancer_alias,
|
||||
location=loadbalancer_location)
|
||||
|
||||
elif state == 'port_absent':
|
||||
changed, result_lb = self.ensure_loadbalancerpool_absent(
|
||||
alias=loadbalancer_alias,
|
||||
location=loadbalancer_location,
|
||||
name=loadbalancer_name,
|
||||
port=loadbalancer_port)
|
||||
|
||||
elif state == 'nodes_present':
|
||||
changed, result_lb = self.ensure_lbpool_nodes_present(
|
||||
alias=loadbalancer_alias,
|
||||
location=loadbalancer_location,
|
||||
name=loadbalancer_name,
|
||||
port=loadbalancer_port,
|
||||
nodes=loadbalancer_nodes)
|
||||
|
||||
elif state == 'nodes_absent':
|
||||
changed, result_lb = self.ensure_lbpool_nodes_absent(
|
||||
alias=loadbalancer_alias,
|
||||
location=loadbalancer_location,
|
||||
name=loadbalancer_name,
|
||||
port=loadbalancer_port,
|
||||
nodes=loadbalancer_nodes)
|
||||
|
||||
self.module.exit_json(changed=changed, loadbalancer=result_lb)
|
||||
|
||||
def ensure_loadbalancer_present(
|
||||
self, name, alias, location, description, status):
|
||||
"""
|
||||
Checks to see if a load balancer exists and creates one if it does not.
|
||||
:param name: Name of loadbalancer
|
||||
:param alias: Alias of account
|
||||
:param location: Datacenter
|
||||
:param description: Description of loadbalancer
|
||||
:param status: Enabled / Disabled
|
||||
:return: (changed, result, lb_id)
|
||||
changed: Boolean whether a change was made
|
||||
result: The result object from the CLC load balancer request
|
||||
lb_id: The load balancer id
|
||||
"""
|
||||
changed = False
|
||||
result = name
|
||||
lb_id = self._loadbalancer_exists(name=name)
|
||||
if not lb_id:
|
||||
if not self.module.check_mode:
|
||||
result = self.create_loadbalancer(name=name,
|
||||
alias=alias,
|
||||
location=location,
|
||||
description=description,
|
||||
status=status)
|
||||
lb_id = result.get('id')
|
||||
changed = True
|
||||
|
||||
return changed, result, lb_id
|
||||
|
||||
def ensure_loadbalancerpool_present(
|
||||
self, lb_id, alias, location, method, persistence, port):
|
||||
"""
|
||||
Checks to see if a load balancer pool exists and creates one if it does not.
|
||||
:param lb_id: The loadbalancer id
|
||||
:param alias: The account alias
|
||||
:param location: the datacenter the load balancer resides in
|
||||
:param method: the load balancing method
|
||||
:param persistence: the load balancing persistence type
|
||||
:param port: the port that the load balancer will listen on
|
||||
:return: (changed, group, pool_id) -
|
||||
changed: Boolean whether a change was made
|
||||
result: The result from the CLC API call
|
||||
pool_id: The string id of the load balancer pool
|
||||
"""
|
||||
changed = False
|
||||
result = port
|
||||
if not lb_id:
|
||||
return changed, None, None
|
||||
pool_id = self._loadbalancerpool_exists(
|
||||
alias=alias,
|
||||
location=location,
|
||||
port=port,
|
||||
lb_id=lb_id)
|
||||
if not pool_id:
|
||||
if not self.module.check_mode:
|
||||
result = self.create_loadbalancerpool(
|
||||
alias=alias,
|
||||
location=location,
|
||||
lb_id=lb_id,
|
||||
method=method,
|
||||
persistence=persistence,
|
||||
port=port)
|
||||
pool_id = result.get('id')
|
||||
changed = True
|
||||
|
||||
return changed, result, pool_id
|
||||
|
||||
def ensure_loadbalancer_absent(self, name, alias, location):
|
||||
"""
|
||||
Checks to see if a load balancer exists and deletes it if it does
|
||||
:param name: Name of the load balancer
|
||||
:param alias: Alias of account
|
||||
:param location: Datacenter
|
||||
:return: (changed, result)
|
||||
changed: Boolean whether a change was made
|
||||
result: The result from the CLC API Call
|
||||
"""
|
||||
changed = False
|
||||
result = name
|
||||
lb_exists = self._loadbalancer_exists(name=name)
|
||||
if lb_exists:
|
||||
if not self.module.check_mode:
|
||||
result = self.delete_loadbalancer(alias=alias,
|
||||
location=location,
|
||||
name=name)
|
||||
changed = True
|
||||
return changed, result
|
||||
|
||||
def ensure_loadbalancerpool_absent(self, alias, location, name, port):
|
||||
"""
|
||||
Checks to see if a load balancer pool exists and deletes it if it does
|
||||
:param alias: The account alias
|
||||
:param location: the datacenter the load balancer resides in
|
||||
:param name: the name of the load balancer
|
||||
:param port: the port that the load balancer listens on
|
||||
:return: (changed, result) -
|
||||
changed: Boolean whether a change was made
|
||||
result: The result from the CLC API call
|
||||
"""
|
||||
changed = False
|
||||
result = None
|
||||
lb_exists = self._loadbalancer_exists(name=name)
|
||||
if lb_exists:
|
||||
lb_id = self._get_loadbalancer_id(name=name)
|
||||
pool_id = self._loadbalancerpool_exists(
|
||||
alias=alias,
|
||||
location=location,
|
||||
port=port,
|
||||
lb_id=lb_id)
|
||||
if pool_id:
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
result = self.delete_loadbalancerpool(
|
||||
alias=alias,
|
||||
location=location,
|
||||
lb_id=lb_id,
|
||||
pool_id=pool_id)
|
||||
else:
|
||||
result = "Pool doesn't exist"
|
||||
else:
|
||||
result = "LB Doesn't Exist"
|
||||
return changed, result
|
||||
|
||||
def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
|
||||
"""
|
||||
Checks to see if the provided list of nodes exist for the pool
|
||||
and set the nodes if any in the list those doesn't exist
|
||||
:param alias: The account alias
|
||||
:param location: the datacenter the load balancer resides in
|
||||
:param name: the name of the load balancer
|
||||
:param port: the port that the load balancer will listen on
|
||||
:param nodes: The list of nodes to be updated to the pool
|
||||
:return: (changed, result) -
|
||||
changed: Boolean whether a change was made
|
||||
result: The result from the CLC API call
|
||||
"""
|
||||
result = {}
|
||||
changed = False
|
||||
lb_exists = self._loadbalancer_exists(name=name)
|
||||
if lb_exists:
|
||||
lb_id = self._get_loadbalancer_id(name=name)
|
||||
pool_id = self._loadbalancerpool_exists(
|
||||
alias=alias,
|
||||
location=location,
|
||||
port=port,
|
||||
lb_id=lb_id)
|
||||
if pool_id:
|
||||
nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
|
||||
location=location,
|
||||
lb_id=lb_id,
|
||||
pool_id=pool_id,
|
||||
nodes_to_check=nodes)
|
||||
if not nodes_exist:
|
||||
changed = True
|
||||
result = self.set_loadbalancernodes(alias=alias,
|
||||
location=location,
|
||||
lb_id=lb_id,
|
||||
pool_id=pool_id,
|
||||
nodes=nodes)
|
||||
else:
|
||||
result = "Pool doesn't exist"
|
||||
else:
|
||||
result = "Load balancer doesn't Exist"
|
||||
return changed, result
|
||||
|
||||
def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
|
||||
"""
|
||||
Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
|
||||
:param alias: The account alias
|
||||
:param location: the datacenter the load balancer resides in
|
||||
:param name: the name of the load balancer
|
||||
:param port: the port that the load balancer will listen on
|
||||
:param nodes: the list of nodes to be added
|
||||
:return: (changed, result) -
|
||||
changed: Boolean whether a change was made
|
||||
result: The result from the CLC API call
|
||||
"""
|
||||
changed = False
|
||||
lb_exists = self._loadbalancer_exists(name=name)
|
||||
if lb_exists:
|
||||
lb_id = self._get_loadbalancer_id(name=name)
|
||||
pool_id = self._loadbalancerpool_exists(
|
||||
alias=alias,
|
||||
location=location,
|
||||
port=port,
|
||||
lb_id=lb_id)
|
||||
if pool_id:
|
||||
changed, result = self.add_lbpool_nodes(alias=alias,
|
||||
location=location,
|
||||
lb_id=lb_id,
|
||||
pool_id=pool_id,
|
||||
nodes_to_add=nodes)
|
||||
else:
|
||||
result = "Pool doesn't exist"
|
||||
else:
|
||||
result = "Load balancer doesn't Exist"
|
||||
return changed, result
|
||||
|
||||
def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
|
||||
"""
|
||||
Checks to see if the provided list of nodes exist for the pool and removes them if found any
|
||||
:param alias: The account alias
|
||||
:param location: the datacenter the load balancer resides in
|
||||
:param name: the name of the load balancer
|
||||
:param port: the port that the load balancer will listen on
|
||||
:param nodes: the list of nodes to be removed
|
||||
:return: (changed, result) -
|
||||
changed: Boolean whether a change was made
|
||||
result: The result from the CLC API call
|
||||
"""
|
||||
changed = False
|
||||
lb_exists = self._loadbalancer_exists(name=name)
|
||||
if lb_exists:
|
||||
lb_id = self._get_loadbalancer_id(name=name)
|
||||
pool_id = self._loadbalancerpool_exists(
|
||||
alias=alias,
|
||||
location=location,
|
||||
port=port,
|
||||
lb_id=lb_id)
|
||||
if pool_id:
|
||||
changed, result = self.remove_lbpool_nodes(alias=alias,
|
||||
location=location,
|
||||
lb_id=lb_id,
|
||||
pool_id=pool_id,
|
||||
nodes_to_remove=nodes)
|
||||
else:
|
||||
result = "Pool doesn't exist"
|
||||
else:
|
||||
result = "Load balancer doesn't Exist"
|
||||
return changed, result
|
||||
|
||||
def create_loadbalancer(self, name, alias, location, description, status):
|
||||
"""
|
||||
Create a loadbalancer w/ params
|
||||
:param name: Name of loadbalancer
|
||||
:param alias: Alias of account
|
||||
:param location: Datacenter
|
||||
:param description: Description for loadbalancer to be created
|
||||
:param status: Enabled / Disabled
|
||||
:return: result: The result from the CLC API call
|
||||
"""
|
||||
result = None
|
||||
try:
|
||||
result = self.clc.v2.API.Call('POST',
|
||||
'/v2/sharedLoadBalancers/%s/%s' % (alias,
|
||||
location),
|
||||
json.dumps({"name": name,
|
||||
"description": description,
|
||||
"status": status}))
|
||||
sleep(1)
|
||||
except APIFailedResponse as e:
|
||||
self.module.fail_json(
|
||||
msg='Unable to create load balancer "{0}". {1}'.format(
|
||||
name, str(e.response_text)))
|
||||
return result
|
||||
|
||||
def create_loadbalancerpool(
|
||||
self, alias, location, lb_id, method, persistence, port):
|
||||
"""
|
||||
Creates a pool on the provided load balancer
|
||||
:param alias: the account alias
|
||||
:param location: the datacenter the load balancer resides in
|
||||
:param lb_id: the id string of the load balancer
|
||||
:param method: the load balancing method
|
||||
:param persistence: the load balancing persistence type
|
||||
:param port: the port that the load balancer will listen on
|
||||
:return: result: The result from the create API call
|
||||
"""
|
||||
result = None
|
||||
try:
|
||||
result = self.clc.v2.API.Call(
|
||||
'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
|
||||
(alias, location, lb_id), json.dumps(
|
||||
{
|
||||
"port": port, "method": method, "persistence": persistence
|
||||
}))
|
||||
except APIFailedResponse as e:
|
||||
self.module.fail_json(
|
||||
msg='Unable to create pool for load balancer id "{0}". {1}'.format(
|
||||
lb_id, str(e.response_text)))
|
||||
return result
|
||||
|
||||
def delete_loadbalancer(self, alias, location, name):
|
||||
"""
|
||||
Delete CLC loadbalancer
|
||||
:param alias: Alias for account
|
||||
:param location: Datacenter
|
||||
:param name: Name of the loadbalancer to delete
|
||||
:return: result: The result from the CLC API call
|
||||
"""
|
||||
result = None
|
||||
lb_id = self._get_loadbalancer_id(name=name)
|
||||
try:
|
||||
result = self.clc.v2.API.Call(
|
||||
'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
|
||||
(alias, location, lb_id))
|
||||
except APIFailedResponse as e:
|
||||
self.module.fail_json(
|
||||
msg='Unable to delete load balancer "{0}". {1}'.format(
|
||||
name, str(e.response_text)))
|
||||
return result
|
||||
|
||||
def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
|
||||
"""
|
||||
Delete the pool on the provided load balancer
|
||||
:param alias: The account alias
|
||||
:param location: the datacenter the load balancer resides in
|
||||
:param lb_id: the id string of the load balancer
|
||||
:param pool_id: the id string of the load balancer pool
|
||||
:return: result: The result from the delete API call
|
||||
"""
|
||||
result = None
|
||||
try:
|
||||
result = self.clc.v2.API.Call(
|
||||
'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
|
||||
(alias, location, lb_id, pool_id))
|
||||
except APIFailedResponse as e:
|
||||
self.module.fail_json(
|
||||
msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
|
||||
lb_id, str(e.response_text)))
|
||||
return result
|
||||
|
||||
def _get_loadbalancer_id(self, name):
|
||||
"""
|
||||
Retrieves unique ID of loadbalancer
|
||||
:param name: Name of loadbalancer
|
||||
:return: Unique ID of the loadbalancer
|
||||
"""
|
||||
id = None
|
||||
for lb in self.lb_dict:
|
||||
if lb.get('name') == name:
|
||||
id = lb.get('id')
|
||||
return id
|
||||
|
||||
def _get_loadbalancer_list(self, alias, location):
|
||||
"""
|
||||
Retrieve a list of loadbalancers
|
||||
:param alias: Alias for account
|
||||
:param location: Datacenter
|
||||
:return: JSON data for all loadbalancers at datacenter
|
||||
"""
|
||||
result = None
|
||||
try:
|
||||
result = self.clc.v2.API.Call(
|
||||
'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
|
||||
except APIFailedResponse as e:
|
||||
self.module.fail_json(
|
||||
msg='Unable to fetch load balancers for account: {0}. {1}'.format(
|
||||
alias, str(e.response_text)))
|
||||
return result
|
||||
|
||||
def _loadbalancer_exists(self, name):
|
||||
"""
|
||||
Verify a loadbalancer exists
|
||||
:param name: Name of loadbalancer
|
||||
:return: False or the ID of the existing loadbalancer
|
||||
"""
|
||||
result = False
|
||||
|
||||
for lb in self.lb_dict:
|
||||
if lb.get('name') == name:
|
||||
result = lb.get('id')
|
||||
return result
|
||||
|
||||
def _loadbalancerpool_exists(self, alias, location, port, lb_id):
|
||||
"""
|
||||
Checks to see if a pool exists on the specified port on the provided load balancer
|
||||
:param alias: the account alias
|
||||
:param location: the datacenter the load balancer resides in
|
||||
:param port: the port to check and see if it exists
|
||||
:param lb_id: the id string of the provided load balancer
|
||||
:return: result: The id string of the pool or False
|
||||
"""
|
||||
result = False
|
||||
try:
|
||||
pool_list = self.clc.v2.API.Call(
|
||||
'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
|
||||
(alias, location, lb_id))
|
||||
except APIFailedResponse as e:
|
||||
return self.module.fail_json(
|
||||
msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
|
||||
lb_id, str(e.response_text)))
|
||||
for pool in pool_list:
|
||||
if int(pool.get('port')) == int(port):
|
||||
result = pool.get('id')
|
||||
return result
|
||||
|
||||
def _loadbalancerpool_nodes_exists(
|
||||
self, alias, location, lb_id, pool_id, nodes_to_check):
|
||||
"""
|
||||
Checks to see if a set of nodes exists on the specified port on the provided load balancer
|
||||
:param alias: the account alias
|
||||
:param location: the datacenter the load balancer resides in
|
||||
:param lb_id: the id string of the provided load balancer
|
||||
:param pool_id: the id string of the load balancer pool
|
||||
:param nodes_to_check: the list of nodes to check for
|
||||
:return: result: True / False indicating if the given nodes exist
|
||||
"""
|
||||
result = False
|
||||
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
|
||||
for node in nodes_to_check:
|
||||
if not node.get('status'):
|
||||
node['status'] = 'enabled'
|
||||
if node in nodes:
|
||||
result = True
|
||||
else:
|
||||
result = False
|
||||
return result
|
||||
|
||||
def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
|
||||
"""
|
||||
Updates nodes to the provided pool
|
||||
:param alias: the account alias
|
||||
:param location: the datacenter the load balancer resides in
|
||||
:param lb_id: the id string of the load balancer
|
||||
:param pool_id: the id string of the pool
|
||||
:param nodes: a list of dictionaries containing the nodes to set
|
||||
:return: result: The result from the CLC API call
|
||||
"""
|
||||
result = None
|
||||
if not lb_id:
|
||||
return result
|
||||
if not self.module.check_mode:
|
||||
try:
|
||||
result = self.clc.v2.API.Call('PUT',
|
||||
'/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
|
||||
% (alias, location, lb_id, pool_id), json.dumps(nodes))
|
||||
except APIFailedResponse as e:
|
||||
self.module.fail_json(
|
||||
msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
|
||||
pool_id, str(e.response_text)))
|
||||
return result
|
||||
|
||||
def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
|
||||
"""
|
||||
Add nodes to the provided pool
|
||||
:param alias: the account alias
|
||||
:param location: the datacenter the load balancer resides in
|
||||
:param lb_id: the id string of the load balancer
|
||||
:param pool_id: the id string of the pool
|
||||
:param nodes_to_add: a list of dictionaries containing the nodes to add
|
||||
:return: (changed, result) -
|
||||
changed: Boolean whether a change was made
|
||||
result: The result from the CLC API call
|
||||
"""
|
||||
changed = False
|
||||
result = {}
|
||||
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
|
||||
for node in nodes_to_add:
|
||||
if not node.get('status'):
|
||||
node['status'] = 'enabled'
|
||||
if node not in nodes:
|
||||
changed = True
|
||||
nodes.append(node)
|
||||
if changed is True and not self.module.check_mode:
|
||||
result = self.set_loadbalancernodes(
|
||||
alias,
|
||||
location,
|
||||
lb_id,
|
||||
pool_id,
|
||||
nodes)
|
||||
return changed, result
|
||||
|
||||
def remove_lbpool_nodes(
|
||||
self, alias, location, lb_id, pool_id, nodes_to_remove):
|
||||
"""
|
||||
Removes nodes from the provided pool
|
||||
:param alias: the account alias
|
||||
:param location: the datacenter the load balancer resides in
|
||||
:param lb_id: the id string of the load balancer
|
||||
:param pool_id: the id string of the pool
|
||||
:param nodes_to_remove: a list of dictionaries containing the nodes to remove
|
||||
:return: (changed, result) -
|
||||
changed: Boolean whether a change was made
|
||||
result: The result from the CLC API call
|
||||
"""
|
||||
changed = False
|
||||
result = {}
|
||||
nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
|
||||
for node in nodes_to_remove:
|
||||
if not node.get('status'):
|
||||
node['status'] = 'enabled'
|
||||
if node in nodes:
|
||||
changed = True
|
||||
nodes.remove(node)
|
||||
if changed is True and not self.module.check_mode:
|
||||
result = self.set_loadbalancernodes(
|
||||
alias,
|
||||
location,
|
||||
lb_id,
|
||||
pool_id,
|
||||
nodes)
|
||||
return changed, result
|
||||
|
||||
def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
|
||||
"""
|
||||
Return the list of nodes available to the provided load balancer pool
|
||||
:param alias: the account alias
|
||||
:param location: the datacenter the load balancer resides in
|
||||
:param lb_id: the id string of the load balancer
|
||||
:param pool_id: the id string of the pool
|
||||
:return: result: The list of nodes
|
||||
"""
|
||||
result = None
|
||||
try:
|
||||
result = self.clc.v2.API.Call('GET',
|
||||
'/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
|
||||
% (alias, location, lb_id, pool_id))
|
||||
except APIFailedResponse as e:
|
||||
self.module.fail_json(
|
||||
msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
|
||||
pool_id, str(e.response_text)))
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def define_argument_spec():
|
||||
"""
|
||||
Define the argument spec for the ansible module
|
||||
:return: argument spec dictionary
|
||||
"""
|
||||
argument_spec = dict(
|
||||
name=dict(required=True),
|
||||
description=dict(),
|
||||
location=dict(required=True),
|
||||
alias=dict(required=True),
|
||||
port=dict(choices=[80, 443]),
|
||||
method=dict(choices=['leastConnection', 'roundRobin']),
|
||||
persistence=dict(choices=['standard', 'sticky']),
|
||||
nodes=dict(type='list', default=[], elements='dict'),
|
||||
status=dict(default='enabled', choices=['enabled', 'disabled']),
|
||||
state=dict(
|
||||
default='present',
|
||||
choices=[
|
||||
'present',
|
||||
'absent',
|
||||
'port_absent',
|
||||
'nodes_present',
|
||||
'nodes_absent'])
|
||||
)
|
||||
return argument_spec
|
||||
|
||||
def _set_clc_credentials_from_env(self):
|
||||
"""
|
||||
Set the CLC Credentials on the sdk by reading environment variables
|
||||
:return: none
|
||||
"""
|
||||
env = os.environ
|
||||
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
|
||||
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
|
||||
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
|
||||
clc_alias = env.get('CLC_ACCT_ALIAS', False)
|
||||
api_url = env.get('CLC_V2_API_URL', False)
|
||||
|
||||
if api_url:
|
||||
self.clc.defaults.ENDPOINT_URL_V2 = api_url
|
||||
|
||||
if v2_api_token and clc_alias:
|
||||
self.clc._LOGIN_TOKEN_V2 = v2_api_token
|
||||
self.clc._V2_ENABLED = True
|
||||
self.clc.ALIAS = clc_alias
|
||||
elif v2_api_username and v2_api_passwd:
|
||||
self.clc.v2.SetCredentials(
|
||||
api_username=v2_api_username,
|
||||
api_passwd=v2_api_passwd)
|
||||
else:
|
||||
return self.module.fail_json(
|
||||
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
|
||||
"environment variables")
|
||||
|
||||
@staticmethod
|
||||
def _set_user_agent(clc):
|
||||
if hasattr(clc, 'SetRequestsSession'):
|
||||
agent_string = "ClcAnsibleModule/" + __version__
|
||||
ses = requests.Session()
|
||||
ses.headers.update({"Api-Client": agent_string})
|
||||
ses.headers['User-Agent'] += " " + agent_string
|
||||
clc.SetRequestsSession(ses)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
The main function. Instantiates the module and calls process_request.
|
||||
:return: none
|
||||
"""
|
||||
module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
|
||||
supports_check_mode=True)
|
||||
clc_loadbalancer = ClcLoadBalancer(module)
|
||||
clc_loadbalancer.process_request()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,968 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2015 CenturyLink
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_modify_server
|
||||
short_description: Modify servers in CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to modify servers in CenturyLink Cloud.
|
||||
options:
|
||||
server_ids:
|
||||
description:
|
||||
- A list of server Ids to modify.
|
||||
type: list
|
||||
required: true
|
||||
elements: str
|
||||
cpu:
|
||||
description:
|
||||
- How many CPUs to update on the server
|
||||
type: str
|
||||
memory:
|
||||
description:
|
||||
- Memory (in GB) to set to the server.
|
||||
type: str
|
||||
anti_affinity_policy_id:
|
||||
description:
|
||||
- The anti affinity policy id to be set for a hyper scale server.
|
||||
This is mutually exclusive with 'anti_affinity_policy_name'
|
||||
type: str
|
||||
anti_affinity_policy_name:
|
||||
description:
|
||||
- The anti affinity policy name to be set for a hyper scale server.
|
||||
This is mutually exclusive with 'anti_affinity_policy_id'
|
||||
type: str
|
||||
alert_policy_id:
|
||||
description:
|
||||
- The alert policy id to be associated to the server.
|
||||
This is mutually exclusive with 'alert_policy_name'
|
||||
type: str
|
||||
alert_policy_name:
|
||||
description:
|
||||
- The alert policy name to be associated to the server.
|
||||
This is mutually exclusive with 'alert_policy_id'
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The state to insure that the provided resources are in.
|
||||
type: str
|
||||
default: 'present'
|
||||
choices: ['present', 'absent']
|
||||
wait:
|
||||
description:
|
||||
- Whether to wait for the provisioning tasks to finish before returning.
|
||||
type: bool
|
||||
default: true
|
||||
requirements:
|
||||
- python = 2.7
|
||||
- requests >= 2.5.0
|
||||
- clc-sdk
|
||||
author: "CLC Runner (@clc-runner)"
|
||||
notes:
|
||||
- To use this module, it is required to set the below environment variables which enables access to the
|
||||
Centurylink Cloud
|
||||
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
|
||||
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
|
||||
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
|
||||
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
|
||||
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
|
||||
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
|
||||
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
|
||||
|
||||
- name: Set the cpu count to 4 on a server
|
||||
community.general.clc_modify_server:
|
||||
server_ids:
|
||||
- UC1TESTSVR01
|
||||
- UC1TESTSVR02
|
||||
cpu: 4
|
||||
state: present
|
||||
|
||||
- name: Set the memory to 8GB on a server
|
||||
community.general.clc_modify_server:
|
||||
server_ids:
|
||||
- UC1TESTSVR01
|
||||
- UC1TESTSVR02
|
||||
memory: 8
|
||||
state: present
|
||||
|
||||
- name: Set the anti affinity policy on a server
|
||||
community.general.clc_modify_server:
|
||||
server_ids:
|
||||
- UC1TESTSVR01
|
||||
- UC1TESTSVR02
|
||||
anti_affinity_policy_name: 'aa_policy'
|
||||
state: present
|
||||
|
||||
- name: Remove the anti affinity policy on a server
|
||||
community.general.clc_modify_server:
|
||||
server_ids:
|
||||
- UC1TESTSVR01
|
||||
- UC1TESTSVR02
|
||||
anti_affinity_policy_name: 'aa_policy'
|
||||
state: absent
|
||||
|
||||
- name: Add the alert policy on a server
|
||||
community.general.clc_modify_server:
|
||||
server_ids:
|
||||
- UC1TESTSVR01
|
||||
- UC1TESTSVR02
|
||||
alert_policy_name: 'alert_policy'
|
||||
state: present
|
||||
|
||||
- name: Remove the alert policy on a server
|
||||
community.general.clc_modify_server:
|
||||
server_ids:
|
||||
- UC1TESTSVR01
|
||||
- UC1TESTSVR02
|
||||
alert_policy_name: 'alert_policy'
|
||||
state: absent
|
||||
|
||||
- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers
|
||||
community.general.clc_modify_server:
|
||||
server_ids:
|
||||
- UC1TESTSVR01
|
||||
- UC1TESTSVR02
|
||||
cpu: 8
|
||||
memory: 16
|
||||
state: present
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
server_ids:
|
||||
description: The list of server ids that are changed
|
||||
returned: success
|
||||
type: list
|
||||
sample:
|
||||
[
|
||||
"UC1TEST-SVR01",
|
||||
"UC1TEST-SVR02"
|
||||
]
|
||||
servers:
|
||||
description: The list of server objects that are changed
|
||||
returned: success
|
||||
type: list
|
||||
sample:
|
||||
[
|
||||
{
|
||||
"changeInfo":{
|
||||
"createdBy":"service.wfad",
|
||||
"createdDate":1438196820,
|
||||
"modifiedBy":"service.wfad",
|
||||
"modifiedDate":1438196820
|
||||
},
|
||||
"description":"test-server",
|
||||
"details":{
|
||||
"alertPolicies":[
|
||||
|
||||
],
|
||||
"cpu":1,
|
||||
"customFields":[
|
||||
|
||||
],
|
||||
"diskCount":3,
|
||||
"disks":[
|
||||
{
|
||||
"id":"0:0",
|
||||
"partitionPaths":[
|
||||
|
||||
],
|
||||
"sizeGB":1
|
||||
},
|
||||
{
|
||||
"id":"0:1",
|
||||
"partitionPaths":[
|
||||
|
||||
],
|
||||
"sizeGB":2
|
||||
},
|
||||
{
|
||||
"id":"0:2",
|
||||
"partitionPaths":[
|
||||
|
||||
],
|
||||
"sizeGB":14
|
||||
}
|
||||
],
|
||||
"hostName":"",
|
||||
"inMaintenanceMode":false,
|
||||
"ipAddresses":[
|
||||
{
|
||||
"internal":"10.1.1.1"
|
||||
}
|
||||
],
|
||||
"memoryGB":1,
|
||||
"memoryMB":1024,
|
||||
"partitions":[
|
||||
|
||||
],
|
||||
"powerState":"started",
|
||||
"snapshots":[
|
||||
|
||||
],
|
||||
"storageGB":17
|
||||
},
|
||||
"groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
|
||||
"id":"test-server",
|
||||
"ipaddress":"10.120.45.23",
|
||||
"isTemplate":false,
|
||||
"links":[
|
||||
{
|
||||
"href":"/v2/servers/wfad/test-server",
|
||||
"id":"test-server",
|
||||
"rel":"self",
|
||||
"verbs":[
|
||||
"GET",
|
||||
"PATCH",
|
||||
"DELETE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
|
||||
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
|
||||
"rel":"group"
|
||||
},
|
||||
{
|
||||
"href":"/v2/accounts/wfad",
|
||||
"id":"wfad",
|
||||
"rel":"account"
|
||||
},
|
||||
{
|
||||
"href":"/v2/billing/wfad/serverPricing/test-server",
|
||||
"rel":"billing"
|
||||
},
|
||||
{
|
||||
"href":"/v2/servers/wfad/test-server/publicIPAddresses",
|
||||
"rel":"publicIPAddresses",
|
||||
"verbs":[
|
||||
"POST"
|
||||
]
|
||||
},
|
||||
{
|
||||
"href":"/v2/servers/wfad/test-server/credentials",
|
||||
"rel":"credentials"
|
||||
},
|
||||
{
|
||||
"href":"/v2/servers/wfad/test-server/statistics",
|
||||
"rel":"statistics"
|
||||
},
|
||||
{
|
||||
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
|
||||
"rel":"upcomingScheduledActivities"
|
||||
},
|
||||
{
|
||||
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
|
||||
"rel":"scheduledActivities",
|
||||
"verbs":[
|
||||
"GET",
|
||||
"POST"
|
||||
]
|
||||
},
|
||||
{
|
||||
"href":"/v2/servers/wfad/test-server/capabilities",
|
||||
"rel":"capabilities"
|
||||
},
|
||||
{
|
||||
"href":"/v2/servers/wfad/test-server/alertPolicies",
|
||||
"rel":"alertPolicyMappings",
|
||||
"verbs":[
|
||||
"POST"
|
||||
]
|
||||
},
|
||||
{
|
||||
"href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
|
||||
"rel":"antiAffinityPolicyMapping",
|
||||
"verbs":[
|
||||
"PUT",
|
||||
"DELETE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
|
||||
"rel":"cpuAutoscalePolicyMapping",
|
||||
"verbs":[
|
||||
"PUT",
|
||||
"DELETE"
|
||||
]
|
||||
}
|
||||
],
|
||||
"locationId":"UC1",
|
||||
"name":"test-server",
|
||||
"os":"ubuntu14_64Bit",
|
||||
"osType":"Ubuntu 14 64-bit",
|
||||
"status":"active",
|
||||
"storageType":"standard",
|
||||
"type":"standard"
|
||||
}
|
||||
]
|
||||
'''
|
||||
|
||||
__version__ = '${version}'
|
||||
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
REQUESTS_FOUND = False
|
||||
else:
|
||||
REQUESTS_FOUND = True
|
||||
|
||||
#
|
||||
# Requires the clc-python-sdk.
|
||||
# sudo pip install clc-sdk
|
||||
#
|
||||
CLC_IMP_ERR = None
|
||||
try:
|
||||
import clc as clc_sdk
|
||||
from clc import CLCException
|
||||
from clc import APIFailedResponse
|
||||
except ImportError:
|
||||
CLC_IMP_ERR = traceback.format_exc()
|
||||
CLC_FOUND = False
|
||||
clc_sdk = None
|
||||
else:
|
||||
CLC_FOUND = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
class ClcModifyServer:
|
||||
clc = clc_sdk
|
||||
|
||||
def __init__(self, module):
|
||||
"""
|
||||
Construct module
|
||||
"""
|
||||
self.clc = clc_sdk
|
||||
self.module = module
|
||||
|
||||
if not CLC_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
self._set_user_agent(self.clc)
|
||||
|
||||
def process_request(self):
|
||||
"""
|
||||
Process the request - Main Code Path
|
||||
:return: Returns with either an exit_json or fail_json
|
||||
"""
|
||||
self._set_clc_credentials_from_env()
|
||||
|
||||
p = self.module.params
|
||||
cpu = p.get('cpu')
|
||||
memory = p.get('memory')
|
||||
state = p.get('state')
|
||||
if state == 'absent' and (cpu or memory):
|
||||
return self.module.fail_json(
|
||||
msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
|
||||
|
||||
server_ids = p['server_ids']
|
||||
if not isinstance(server_ids, list):
|
||||
return self.module.fail_json(
|
||||
msg='server_ids needs to be a list of instances to modify: %s' %
|
||||
server_ids)
|
||||
|
||||
(changed, server_dict_array, changed_server_ids) = self._modify_servers(
|
||||
server_ids=server_ids)
|
||||
|
||||
self.module.exit_json(
|
||||
changed=changed,
|
||||
server_ids=changed_server_ids,
|
||||
servers=server_dict_array)
|
||||
|
||||
@staticmethod
|
||||
def _define_module_argument_spec():
|
||||
"""
|
||||
Define the argument spec for the ansible module
|
||||
:return: argument spec dictionary
|
||||
"""
|
||||
argument_spec = dict(
|
||||
server_ids=dict(type='list', required=True, elements='str'),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
cpu=dict(),
|
||||
memory=dict(),
|
||||
anti_affinity_policy_id=dict(),
|
||||
anti_affinity_policy_name=dict(),
|
||||
alert_policy_id=dict(),
|
||||
alert_policy_name=dict(),
|
||||
wait=dict(type='bool', default=True)
|
||||
)
|
||||
mutually_exclusive = [
|
||||
['anti_affinity_policy_id', 'anti_affinity_policy_name'],
|
||||
['alert_policy_id', 'alert_policy_name']
|
||||
]
|
||||
return {"argument_spec": argument_spec,
|
||||
"mutually_exclusive": mutually_exclusive}
|
||||
|
||||
def _set_clc_credentials_from_env(self):
|
||||
"""
|
||||
Set the CLC Credentials on the sdk by reading environment variables
|
||||
:return: none
|
||||
"""
|
||||
env = os.environ
|
||||
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
|
||||
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
|
||||
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
|
||||
clc_alias = env.get('CLC_ACCT_ALIAS', False)
|
||||
api_url = env.get('CLC_V2_API_URL', False)
|
||||
|
||||
if api_url:
|
||||
self.clc.defaults.ENDPOINT_URL_V2 = api_url
|
||||
|
||||
if v2_api_token and clc_alias:
|
||||
self.clc._LOGIN_TOKEN_V2 = v2_api_token
|
||||
self.clc._V2_ENABLED = True
|
||||
self.clc.ALIAS = clc_alias
|
||||
elif v2_api_username and v2_api_passwd:
|
||||
self.clc.v2.SetCredentials(
|
||||
api_username=v2_api_username,
|
||||
api_passwd=v2_api_passwd)
|
||||
else:
|
||||
return self.module.fail_json(
|
||||
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
|
||||
"environment variables")
|
||||
|
||||
def _get_servers_from_clc(self, server_list, message):
|
||||
"""
|
||||
Internal function to fetch list of CLC server objects from a list of server ids
|
||||
:param server_list: The list of server ids
|
||||
:param message: the error message to throw in case of any error
|
||||
:return the list of CLC server objects
|
||||
"""
|
||||
try:
|
||||
return self.clc.v2.Servers(server_list).servers
|
||||
except CLCException as ex:
|
||||
return self.module.fail_json(msg=message + ': %s' % ex.message)
|
||||
|
||||
def _modify_servers(self, server_ids):
|
||||
"""
|
||||
modify the servers configuration on the provided list
|
||||
:param server_ids: list of servers to modify
|
||||
:return: a list of dictionaries with server information about the servers that were modified
|
||||
"""
|
||||
p = self.module.params
|
||||
state = p.get('state')
|
||||
server_params = {
|
||||
'cpu': p.get('cpu'),
|
||||
'memory': p.get('memory'),
|
||||
'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
|
||||
'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
|
||||
'alert_policy_id': p.get('alert_policy_id'),
|
||||
'alert_policy_name': p.get('alert_policy_name'),
|
||||
}
|
||||
changed = False
|
||||
server_changed = False
|
||||
aa_changed = False
|
||||
ap_changed = False
|
||||
server_dict_array = []
|
||||
result_server_ids = []
|
||||
request_list = []
|
||||
changed_servers = []
|
||||
|
||||
if not isinstance(server_ids, list) or len(server_ids) < 1:
|
||||
return self.module.fail_json(
|
||||
msg='server_ids should be a list of servers, aborting')
|
||||
|
||||
servers = self._get_servers_from_clc(
|
||||
server_ids,
|
||||
'Failed to obtain server list from the CLC API')
|
||||
for server in servers:
|
||||
if state == 'present':
|
||||
server_changed, server_result = self._ensure_server_config(
|
||||
server, server_params)
|
||||
if server_result:
|
||||
request_list.append(server_result)
|
||||
aa_changed = self._ensure_aa_policy_present(
|
||||
server,
|
||||
server_params)
|
||||
ap_changed = self._ensure_alert_policy_present(
|
||||
server,
|
||||
server_params)
|
||||
elif state == 'absent':
|
||||
aa_changed = self._ensure_aa_policy_absent(
|
||||
server,
|
||||
server_params)
|
||||
ap_changed = self._ensure_alert_policy_absent(
|
||||
server,
|
||||
server_params)
|
||||
if server_changed or aa_changed or ap_changed:
|
||||
changed_servers.append(server)
|
||||
changed = True
|
||||
|
||||
self._wait_for_requests(self.module, request_list)
|
||||
self._refresh_servers(self.module, changed_servers)
|
||||
|
||||
for server in changed_servers:
|
||||
server_dict_array.append(server.data)
|
||||
result_server_ids.append(server.id)
|
||||
|
||||
return changed, server_dict_array, result_server_ids
|
||||
|
||||
def _ensure_server_config(
|
||||
self, server, server_params):
|
||||
"""
|
||||
ensures the server is updated with the provided cpu and memory
|
||||
:param server: the CLC server object
|
||||
:param server_params: the dictionary of server parameters
|
||||
:return: (changed, group) -
|
||||
changed: Boolean whether a change was made
|
||||
result: The result from the CLC API call
|
||||
"""
|
||||
cpu = server_params.get('cpu')
|
||||
memory = server_params.get('memory')
|
||||
changed = False
|
||||
result = None
|
||||
|
||||
if not cpu:
|
||||
cpu = server.cpu
|
||||
if not memory:
|
||||
memory = server.memory
|
||||
if memory != server.memory or cpu != server.cpu:
|
||||
if not self.module.check_mode:
|
||||
result = self._modify_clc_server(
|
||||
self.clc,
|
||||
self.module,
|
||||
server.id,
|
||||
cpu,
|
||||
memory)
|
||||
changed = True
|
||||
return changed, result
|
||||
|
||||
@staticmethod
|
||||
def _modify_clc_server(clc, module, server_id, cpu, memory):
|
||||
"""
|
||||
Modify the memory or CPU of a clc server.
|
||||
:param clc: the clc-sdk instance to use
|
||||
:param module: the AnsibleModule object
|
||||
:param server_id: id of the server to modify
|
||||
:param cpu: the new cpu value
|
||||
:param memory: the new memory value
|
||||
:return: the result of CLC API call
|
||||
"""
|
||||
result = None
|
||||
acct_alias = clc.v2.Account.GetAlias()
|
||||
try:
|
||||
# Update the server configuration
|
||||
job_obj = clc.v2.API.Call('PATCH',
|
||||
'servers/%s/%s' % (acct_alias,
|
||||
server_id),
|
||||
json.dumps([{"op": "set",
|
||||
"member": "memory",
|
||||
"value": memory},
|
||||
{"op": "set",
|
||||
"member": "cpu",
|
||||
"value": cpu}]))
|
||||
result = clc.v2.Requests(job_obj)
|
||||
except APIFailedResponse as ex:
|
||||
module.fail_json(
|
||||
msg='Unable to update the server configuration for server : "{0}". {1}'.format(
|
||||
server_id, str(ex.response_text)))
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _wait_for_requests(module, request_list):
|
||||
"""
|
||||
Block until server provisioning requests are completed.
|
||||
:param module: the AnsibleModule object
|
||||
:param request_list: a list of clc-sdk.Request instances
|
||||
:return: none
|
||||
"""
|
||||
wait = module.params.get('wait')
|
||||
if wait:
|
||||
# Requests.WaitUntilComplete() returns the count of failed requests
|
||||
failed_requests_count = sum(
|
||||
[request.WaitUntilComplete() for request in request_list])
|
||||
|
||||
if failed_requests_count > 0:
|
||||
module.fail_json(
|
||||
msg='Unable to process modify server request')
|
||||
|
||||
@staticmethod
|
||||
def _refresh_servers(module, servers):
|
||||
"""
|
||||
Loop through a list of servers and refresh them.
|
||||
:param module: the AnsibleModule object
|
||||
:param servers: list of clc-sdk.Server instances to refresh
|
||||
:return: none
|
||||
"""
|
||||
for server in servers:
|
||||
try:
|
||||
server.Refresh()
|
||||
except CLCException as ex:
|
||||
module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
|
||||
server.id, ex.message
|
||||
))
|
||||
|
||||
def _ensure_aa_policy_present(
|
||||
self, server, server_params):
|
||||
"""
|
||||
ensures the server is updated with the provided anti affinity policy
|
||||
:param server: the CLC server object
|
||||
:param server_params: the dictionary of server parameters
|
||||
:return: (changed, group) -
|
||||
changed: Boolean whether a change was made
|
||||
result: The result from the CLC API call
|
||||
"""
|
||||
changed = False
|
||||
acct_alias = self.clc.v2.Account.GetAlias()
|
||||
|
||||
aa_policy_id = server_params.get('anti_affinity_policy_id')
|
||||
aa_policy_name = server_params.get('anti_affinity_policy_name')
|
||||
if not aa_policy_id and aa_policy_name:
|
||||
aa_policy_id = self._get_aa_policy_id_by_name(
|
||||
self.clc,
|
||||
self.module,
|
||||
acct_alias,
|
||||
aa_policy_name)
|
||||
current_aa_policy_id = self._get_aa_policy_id_of_server(
|
||||
self.clc,
|
||||
self.module,
|
||||
acct_alias,
|
||||
server.id)
|
||||
|
||||
if aa_policy_id and aa_policy_id != current_aa_policy_id:
|
||||
self._modify_aa_policy(
|
||||
self.clc,
|
||||
self.module,
|
||||
acct_alias,
|
||||
server.id,
|
||||
aa_policy_id)
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
def _ensure_aa_policy_absent(
|
||||
self, server, server_params):
|
||||
"""
|
||||
ensures the provided anti affinity policy is removed from the server
|
||||
:param server: the CLC server object
|
||||
:param server_params: the dictionary of server parameters
|
||||
:return: (changed, group) -
|
||||
changed: Boolean whether a change was made
|
||||
result: The result from the CLC API call
|
||||
"""
|
||||
changed = False
|
||||
acct_alias = self.clc.v2.Account.GetAlias()
|
||||
aa_policy_id = server_params.get('anti_affinity_policy_id')
|
||||
aa_policy_name = server_params.get('anti_affinity_policy_name')
|
||||
if not aa_policy_id and aa_policy_name:
|
||||
aa_policy_id = self._get_aa_policy_id_by_name(
|
||||
self.clc,
|
||||
self.module,
|
||||
acct_alias,
|
||||
aa_policy_name)
|
||||
current_aa_policy_id = self._get_aa_policy_id_of_server(
|
||||
self.clc,
|
||||
self.module,
|
||||
acct_alias,
|
||||
server.id)
|
||||
|
||||
if aa_policy_id and aa_policy_id == current_aa_policy_id:
|
||||
self._delete_aa_policy(
|
||||
self.clc,
|
||||
self.module,
|
||||
acct_alias,
|
||||
server.id)
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
@staticmethod
|
||||
def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
|
||||
"""
|
||||
modifies the anti affinity policy of the CLC server
|
||||
:param clc: the clc-sdk instance to use
|
||||
:param module: the AnsibleModule object
|
||||
:param acct_alias: the CLC account alias
|
||||
:param server_id: the CLC server id
|
||||
:param aa_policy_id: the anti affinity policy id
|
||||
:return: result: The result from the CLC API call
|
||||
"""
|
||||
result = None
|
||||
if not module.check_mode:
|
||||
try:
|
||||
result = clc.v2.API.Call('PUT',
|
||||
'servers/%s/%s/antiAffinityPolicy' % (
|
||||
acct_alias,
|
||||
server_id),
|
||||
json.dumps({"id": aa_policy_id}))
|
||||
except APIFailedResponse as ex:
|
||||
module.fail_json(
|
||||
msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
|
||||
server_id, str(ex.response_text)))
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _delete_aa_policy(clc, module, acct_alias, server_id):
|
||||
"""
|
||||
Delete the anti affinity policy of the CLC server
|
||||
:param clc: the clc-sdk instance to use
|
||||
:param module: the AnsibleModule object
|
||||
:param acct_alias: the CLC account alias
|
||||
:param server_id: the CLC server id
|
||||
:return: result: The result from the CLC API call
|
||||
"""
|
||||
result = None
|
||||
if not module.check_mode:
|
||||
try:
|
||||
result = clc.v2.API.Call('DELETE',
|
||||
'servers/%s/%s/antiAffinityPolicy' % (
|
||||
acct_alias,
|
||||
server_id),
|
||||
json.dumps({}))
|
||||
except APIFailedResponse as ex:
|
||||
module.fail_json(
|
||||
msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
|
||||
server_id, str(ex.response_text)))
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
|
||||
"""
|
||||
retrieves the anti affinity policy id of the server based on the name of the policy
|
||||
:param clc: the clc-sdk instance to use
|
||||
:param module: the AnsibleModule object
|
||||
:param alias: the CLC account alias
|
||||
:param aa_policy_name: the anti affinity policy name
|
||||
:return: aa_policy_id: The anti affinity policy id
|
||||
"""
|
||||
aa_policy_id = None
|
||||
try:
|
||||
aa_policies = clc.v2.API.Call(method='GET',
|
||||
url='antiAffinityPolicies/%s' % alias)
|
||||
except APIFailedResponse as ex:
|
||||
return module.fail_json(
|
||||
msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
|
||||
alias, str(ex.response_text)))
|
||||
for aa_policy in aa_policies.get('items'):
|
||||
if aa_policy.get('name') == aa_policy_name:
|
||||
if not aa_policy_id:
|
||||
aa_policy_id = aa_policy.get('id')
|
||||
else:
|
||||
return module.fail_json(
|
||||
msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
|
||||
if not aa_policy_id:
|
||||
module.fail_json(
|
||||
msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
|
||||
return aa_policy_id
|
||||
|
||||
@staticmethod
|
||||
def _get_aa_policy_id_of_server(clc, module, alias, server_id):
|
||||
"""
|
||||
retrieves the anti affinity policy id of the server based on the CLC server id
|
||||
:param clc: the clc-sdk instance to use
|
||||
:param module: the AnsibleModule object
|
||||
:param alias: the CLC account alias
|
||||
:param server_id: the CLC server id
|
||||
:return: aa_policy_id: The anti affinity policy id
|
||||
"""
|
||||
aa_policy_id = None
|
||||
try:
|
||||
result = clc.v2.API.Call(
|
||||
method='GET', url='servers/%s/%s/antiAffinityPolicy' %
|
||||
(alias, server_id))
|
||||
aa_policy_id = result.get('id')
|
||||
except APIFailedResponse as ex:
|
||||
if ex.response_status_code != 404:
|
||||
module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
|
||||
server_id, str(ex.response_text)))
|
||||
return aa_policy_id
|
||||
|
||||
def _ensure_alert_policy_present(
|
||||
self, server, server_params):
|
||||
"""
|
||||
ensures the server is updated with the provided alert policy
|
||||
:param server: the CLC server object
|
||||
:param server_params: the dictionary of server parameters
|
||||
:return: (changed, group) -
|
||||
changed: Boolean whether a change was made
|
||||
result: The result from the CLC API call
|
||||
"""
|
||||
changed = False
|
||||
acct_alias = self.clc.v2.Account.GetAlias()
|
||||
alert_policy_id = server_params.get('alert_policy_id')
|
||||
alert_policy_name = server_params.get('alert_policy_name')
|
||||
if not alert_policy_id and alert_policy_name:
|
||||
alert_policy_id = self._get_alert_policy_id_by_name(
|
||||
self.clc,
|
||||
self.module,
|
||||
acct_alias,
|
||||
alert_policy_name)
|
||||
if alert_policy_id and not self._alert_policy_exists(
|
||||
server, alert_policy_id):
|
||||
self._add_alert_policy_to_server(
|
||||
self.clc,
|
||||
self.module,
|
||||
acct_alias,
|
||||
server.id,
|
||||
alert_policy_id)
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
def _ensure_alert_policy_absent(
|
||||
self, server, server_params):
|
||||
"""
|
||||
ensures the alert policy is removed from the server
|
||||
:param server: the CLC server object
|
||||
:param server_params: the dictionary of server parameters
|
||||
:return: (changed, group) -
|
||||
changed: Boolean whether a change was made
|
||||
result: The result from the CLC API call
|
||||
"""
|
||||
changed = False
|
||||
|
||||
acct_alias = self.clc.v2.Account.GetAlias()
|
||||
alert_policy_id = server_params.get('alert_policy_id')
|
||||
alert_policy_name = server_params.get('alert_policy_name')
|
||||
if not alert_policy_id and alert_policy_name:
|
||||
alert_policy_id = self._get_alert_policy_id_by_name(
|
||||
self.clc,
|
||||
self.module,
|
||||
acct_alias,
|
||||
alert_policy_name)
|
||||
|
||||
if alert_policy_id and self._alert_policy_exists(
|
||||
server, alert_policy_id):
|
||||
self._remove_alert_policy_to_server(
|
||||
self.clc,
|
||||
self.module,
|
||||
acct_alias,
|
||||
server.id,
|
||||
alert_policy_id)
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
@staticmethod
|
||||
def _add_alert_policy_to_server(
|
||||
clc, module, acct_alias, server_id, alert_policy_id):
|
||||
"""
|
||||
add the alert policy to CLC server
|
||||
:param clc: the clc-sdk instance to use
|
||||
:param module: the AnsibleModule object
|
||||
:param acct_alias: the CLC account alias
|
||||
:param server_id: the CLC server id
|
||||
:param alert_policy_id: the alert policy id
|
||||
:return: result: The result from the CLC API call
|
||||
"""
|
||||
result = None
|
||||
if not module.check_mode:
|
||||
try:
|
||||
result = clc.v2.API.Call('POST',
|
||||
'servers/%s/%s/alertPolicies' % (
|
||||
acct_alias,
|
||||
server_id),
|
||||
json.dumps({"id": alert_policy_id}))
|
||||
except APIFailedResponse as ex:
|
||||
module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
|
||||
server_id, str(ex.response_text)))
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _remove_alert_policy_to_server(
|
||||
clc, module, acct_alias, server_id, alert_policy_id):
|
||||
"""
|
||||
remove the alert policy to the CLC server
|
||||
:param clc: the clc-sdk instance to use
|
||||
:param module: the AnsibleModule object
|
||||
:param acct_alias: the CLC account alias
|
||||
:param server_id: the CLC server id
|
||||
:param alert_policy_id: the alert policy id
|
||||
:return: result: The result from the CLC API call
|
||||
"""
|
||||
result = None
|
||||
if not module.check_mode:
|
||||
try:
|
||||
result = clc.v2.API.Call('DELETE',
|
||||
'servers/%s/%s/alertPolicies/%s'
|
||||
% (acct_alias, server_id, alert_policy_id))
|
||||
except APIFailedResponse as ex:
|
||||
module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
|
||||
server_id, str(ex.response_text)))
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
|
||||
"""
|
||||
retrieves the alert policy id of the server based on the name of the policy
|
||||
:param clc: the clc-sdk instance to use
|
||||
:param module: the AnsibleModule object
|
||||
:param alias: the CLC account alias
|
||||
:param alert_policy_name: the alert policy name
|
||||
:return: alert_policy_id: The alert policy id
|
||||
"""
|
||||
alert_policy_id = None
|
||||
try:
|
||||
alert_policies = clc.v2.API.Call(method='GET',
|
||||
url='alertPolicies/%s' % alias)
|
||||
except APIFailedResponse as ex:
|
||||
return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
|
||||
alias, str(ex.response_text)))
|
||||
for alert_policy in alert_policies.get('items'):
|
||||
if alert_policy.get('name') == alert_policy_name:
|
||||
if not alert_policy_id:
|
||||
alert_policy_id = alert_policy.get('id')
|
||||
else:
|
||||
return module.fail_json(
|
||||
msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
|
||||
return alert_policy_id
|
||||
|
||||
@staticmethod
|
||||
def _alert_policy_exists(server, alert_policy_id):
|
||||
"""
|
||||
Checks if the alert policy exists for the server
|
||||
:param server: the clc server object
|
||||
:param alert_policy_id: the alert policy
|
||||
:return: True: if the given alert policy id associated to the server, False otherwise
|
||||
"""
|
||||
result = False
|
||||
alert_policies = server.alertPolicies
|
||||
if alert_policies:
|
||||
for alert_policy in alert_policies:
|
||||
if alert_policy.get('id') == alert_policy_id:
|
||||
result = True
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _set_user_agent(clc):
|
||||
if hasattr(clc, 'SetRequestsSession'):
|
||||
agent_string = "ClcAnsibleModule/" + __version__
|
||||
ses = requests.Session()
|
||||
ses.headers.update({"Api-Client": agent_string})
|
||||
ses.headers['User-Agent'] += " " + agent_string
|
||||
clc.SetRequestsSession(ses)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
The main function. Instantiates the module and calls process_request.
|
||||
:return: none
|
||||
"""
|
||||
|
||||
argument_dict = ClcModifyServer._define_module_argument_spec()
|
||||
module = AnsibleModule(supports_check_mode=True, **argument_dict)
|
||||
clc_modify_server = ClcModifyServer(module)
|
||||
clc_modify_server.process_request()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,362 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2015 CenturyLink
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_publicip
|
||||
short_description: Add and Delete public ips on servers in CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
|
||||
options:
|
||||
protocol:
|
||||
description:
|
||||
- The protocol that the public IP will listen for.
|
||||
type: str
|
||||
default: TCP
|
||||
choices: ['TCP', 'UDP', 'ICMP']
|
||||
ports:
|
||||
description:
|
||||
- A list of ports to expose. This is required when state is 'present'
|
||||
type: list
|
||||
elements: int
|
||||
server_ids:
|
||||
description:
|
||||
- A list of servers to create public ips on.
|
||||
type: list
|
||||
required: true
|
||||
elements: str
|
||||
state:
|
||||
description:
|
||||
- Determine whether to create or delete public IPs. If present module will not create a second public ip if one
|
||||
already exists.
|
||||
type: str
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
wait:
|
||||
description:
|
||||
- Whether to wait for the tasks to finish before returning.
|
||||
type: bool
|
||||
default: true
|
||||
requirements:
|
||||
- python = 2.7
|
||||
- requests >= 2.5.0
|
||||
- clc-sdk
|
||||
author: "CLC Runner (@clc-runner)"
|
||||
notes:
|
||||
- To use this module, it is required to set the below environment variables which enables access to the
|
||||
Centurylink Cloud
|
||||
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
|
||||
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
|
||||
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
|
||||
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
|
||||
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
|
||||
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
|
||||
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
|
||||
|
||||
- name: Add Public IP to Server
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Create Public IP For Servers
|
||||
community.general.clc_publicip:
|
||||
protocol: TCP
|
||||
ports:
|
||||
- 80
|
||||
server_ids:
|
||||
- UC1TEST-SVR01
|
||||
- UC1TEST-SVR02
|
||||
state: present
|
||||
register: clc
|
||||
|
||||
- name: Debug
|
||||
ansible.builtin.debug:
|
||||
var: clc
|
||||
|
||||
- name: Delete Public IP from Server
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
connection: local
|
||||
tasks:
|
||||
- name: Create Public IP For Servers
|
||||
community.general.clc_publicip:
|
||||
server_ids:
|
||||
- UC1TEST-SVR01
|
||||
- UC1TEST-SVR02
|
||||
state: absent
|
||||
register: clc
|
||||
|
||||
- name: Debug
|
||||
ansible.builtin.debug:
|
||||
var: clc
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
server_ids:
|
||||
description: The list of server ids that are changed
|
||||
returned: success
|
||||
type: list
|
||||
sample:
|
||||
[
|
||||
"UC1TEST-SVR01",
|
||||
"UC1TEST-SVR02"
|
||||
]
|
||||
'''
|
||||
|
||||
__version__ = '${version}'
|
||||
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
REQUESTS_FOUND = False
|
||||
else:
|
||||
REQUESTS_FOUND = True
|
||||
|
||||
#
|
||||
# Requires the clc-python-sdk.
|
||||
# sudo pip install clc-sdk
|
||||
#
|
||||
CLC_IMP_ERR = None
|
||||
try:
|
||||
import clc as clc_sdk
|
||||
from clc import CLCException
|
||||
except ImportError:
|
||||
CLC_IMP_ERR = traceback.format_exc()
|
||||
CLC_FOUND = False
|
||||
clc_sdk = None
|
||||
else:
|
||||
CLC_FOUND = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
class ClcPublicIp(object):
|
||||
clc = clc_sdk
|
||||
module = None
|
||||
|
||||
def __init__(self, module):
|
||||
"""
|
||||
Construct module
|
||||
"""
|
||||
self.module = module
|
||||
if not CLC_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
self._set_user_agent(self.clc)
|
||||
|
||||
def process_request(self):
|
||||
"""
|
||||
Process the request - Main Code Path
|
||||
:return: Returns with either an exit_json or fail_json
|
||||
"""
|
||||
self._set_clc_credentials_from_env()
|
||||
params = self.module.params
|
||||
server_ids = params['server_ids']
|
||||
ports = params['ports']
|
||||
protocol = params['protocol']
|
||||
state = params['state']
|
||||
|
||||
if state == 'present':
|
||||
changed, changed_server_ids, requests = self.ensure_public_ip_present(
|
||||
server_ids=server_ids, protocol=protocol, ports=ports)
|
||||
elif state == 'absent':
|
||||
changed, changed_server_ids, requests = self.ensure_public_ip_absent(
|
||||
server_ids=server_ids)
|
||||
else:
|
||||
return self.module.fail_json(msg="Unknown State: " + state)
|
||||
self._wait_for_requests_to_complete(requests)
|
||||
return self.module.exit_json(changed=changed,
|
||||
server_ids=changed_server_ids)
|
||||
|
||||
@staticmethod
|
||||
def _define_module_argument_spec():
|
||||
"""
|
||||
Define the argument spec for the ansible module
|
||||
:return: argument spec dictionary
|
||||
"""
|
||||
argument_spec = dict(
|
||||
server_ids=dict(type='list', required=True, elements='str'),
|
||||
protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
|
||||
ports=dict(type='list', elements='int'),
|
||||
wait=dict(type='bool', default=True),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
return argument_spec
|
||||
|
||||
def ensure_public_ip_present(self, server_ids, protocol, ports):
|
||||
"""
|
||||
Ensures the given server ids having the public ip available
|
||||
:param server_ids: the list of server ids
|
||||
:param protocol: the ip protocol
|
||||
:param ports: the list of ports to expose
|
||||
:return: (changed, changed_server_ids, results)
|
||||
changed: A flag indicating if there is any change
|
||||
changed_server_ids : the list of server ids that are changed
|
||||
results: The result list from clc public ip call
|
||||
"""
|
||||
changed = False
|
||||
results = []
|
||||
changed_server_ids = []
|
||||
servers = self._get_servers_from_clc(
|
||||
server_ids,
|
||||
'Failed to obtain server list from the CLC API')
|
||||
servers_to_change = [
|
||||
server for server in servers if len(
|
||||
server.PublicIPs().public_ips) == 0]
|
||||
ports_to_expose = [{'protocol': protocol, 'port': port}
|
||||
for port in ports]
|
||||
for server in servers_to_change:
|
||||
if not self.module.check_mode:
|
||||
result = self._add_publicip_to_server(server, ports_to_expose)
|
||||
results.append(result)
|
||||
changed_server_ids.append(server.id)
|
||||
changed = True
|
||||
return changed, changed_server_ids, results
|
||||
|
||||
def _add_publicip_to_server(self, server, ports_to_expose):
|
||||
result = None
|
||||
try:
|
||||
result = server.PublicIPs().Add(ports_to_expose)
|
||||
except CLCException as ex:
|
||||
self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
|
||||
server.id, ex.response_text
|
||||
))
|
||||
return result
|
||||
|
||||
def ensure_public_ip_absent(self, server_ids):
|
||||
"""
|
||||
Ensures the given server ids having the public ip removed if there is any
|
||||
:param server_ids: the list of server ids
|
||||
:return: (changed, changed_server_ids, results)
|
||||
changed: A flag indicating if there is any change
|
||||
changed_server_ids : the list of server ids that are changed
|
||||
results: The result list from clc public ip call
|
||||
"""
|
||||
changed = False
|
||||
results = []
|
||||
changed_server_ids = []
|
||||
servers = self._get_servers_from_clc(
|
||||
server_ids,
|
||||
'Failed to obtain server list from the CLC API')
|
||||
servers_to_change = [
|
||||
server for server in servers if len(
|
||||
server.PublicIPs().public_ips) > 0]
|
||||
for server in servers_to_change:
|
||||
if not self.module.check_mode:
|
||||
result = self._remove_publicip_from_server(server)
|
||||
results.append(result)
|
||||
changed_server_ids.append(server.id)
|
||||
changed = True
|
||||
return changed, changed_server_ids, results
|
||||
|
||||
def _remove_publicip_from_server(self, server):
|
||||
result = None
|
||||
try:
|
||||
for ip_address in server.PublicIPs().public_ips:
|
||||
result = ip_address.Delete()
|
||||
except CLCException as ex:
|
||||
self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
|
||||
server.id, ex.response_text
|
||||
))
|
||||
return result
|
||||
|
||||
def _wait_for_requests_to_complete(self, requests_lst):
|
||||
"""
|
||||
Waits until the CLC requests are complete if the wait argument is True
|
||||
:param requests_lst: The list of CLC request objects
|
||||
:return: none
|
||||
"""
|
||||
if not self.module.params['wait']:
|
||||
return
|
||||
for request in requests_lst:
|
||||
request.WaitUntilComplete()
|
||||
for request_details in request.requests:
|
||||
if request_details.Status() != 'succeeded':
|
||||
self.module.fail_json(
|
||||
msg='Unable to process public ip request')
|
||||
|
||||
def _set_clc_credentials_from_env(self):
|
||||
"""
|
||||
Set the CLC Credentials on the sdk by reading environment variables
|
||||
:return: none
|
||||
"""
|
||||
env = os.environ
|
||||
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
|
||||
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
|
||||
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
|
||||
clc_alias = env.get('CLC_ACCT_ALIAS', False)
|
||||
api_url = env.get('CLC_V2_API_URL', False)
|
||||
|
||||
if api_url:
|
||||
self.clc.defaults.ENDPOINT_URL_V2 = api_url
|
||||
|
||||
if v2_api_token and clc_alias:
|
||||
self.clc._LOGIN_TOKEN_V2 = v2_api_token
|
||||
self.clc._V2_ENABLED = True
|
||||
self.clc.ALIAS = clc_alias
|
||||
elif v2_api_username and v2_api_passwd:
|
||||
self.clc.v2.SetCredentials(
|
||||
api_username=v2_api_username,
|
||||
api_passwd=v2_api_passwd)
|
||||
else:
|
||||
return self.module.fail_json(
|
||||
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
|
||||
"environment variables")
|
||||
|
||||
def _get_servers_from_clc(self, server_ids, message):
|
||||
"""
|
||||
Gets list of servers form CLC api
|
||||
"""
|
||||
try:
|
||||
return self.clc.v2.Servers(server_ids).servers
|
||||
except CLCException as exception:
|
||||
self.module.fail_json(msg=message + ': %s' % exception)
|
||||
|
||||
@staticmethod
|
||||
def _set_user_agent(clc):
|
||||
if hasattr(clc, 'SetRequestsSession'):
|
||||
agent_string = "ClcAnsibleModule/" + __version__
|
||||
ses = requests.Session()
|
||||
ses.headers.update({"Api-Client": agent_string})
|
||||
ses.headers['User-Agent'] += " " + agent_string
|
||||
clc.SetRequestsSession(ses)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
The main function. Instantiates the module and calls process_request.
|
||||
:return: none
|
||||
"""
|
||||
module = AnsibleModule(
|
||||
argument_spec=ClcPublicIp._define_module_argument_spec(),
|
||||
supports_check_mode=True
|
||||
)
|
||||
clc_public_ip = ClcPublicIp(module)
|
||||
clc_public_ip.process_request()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,412 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2015 CenturyLink
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: clc_server_snapshot
|
||||
short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud
|
||||
description:
|
||||
- An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
|
||||
options:
|
||||
server_ids:
|
||||
description:
|
||||
- The list of CLC server Ids.
|
||||
type: list
|
||||
required: true
|
||||
elements: str
|
||||
expiration_days:
|
||||
description:
|
||||
- The number of days to keep the server snapshot before it expires.
|
||||
type: int
|
||||
default: 7
|
||||
required: false
|
||||
state:
|
||||
description:
|
||||
- The state to insure that the provided resources are in.
|
||||
type: str
|
||||
default: 'present'
|
||||
required: false
|
||||
choices: ['present', 'absent', 'restore']
|
||||
wait:
|
||||
description:
|
||||
- Whether to wait for the provisioning tasks to finish before returning.
|
||||
default: 'True'
|
||||
required: false
|
||||
type: str
|
||||
requirements:
|
||||
- python = 2.7
|
||||
- requests >= 2.5.0
|
||||
- clc-sdk
|
||||
author: "CLC Runner (@clc-runner)"
|
||||
notes:
|
||||
- To use this module, it is required to set the below environment variables which enables access to the
|
||||
Centurylink Cloud
|
||||
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
|
||||
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
|
||||
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
|
||||
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
|
||||
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
|
||||
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
|
||||
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
|
||||
|
||||
- name: Create server snapshot
|
||||
community.general.clc_server_snapshot:
|
||||
server_ids:
|
||||
- UC1TEST-SVR01
|
||||
- UC1TEST-SVR02
|
||||
expiration_days: 10
|
||||
wait: true
|
||||
state: present
|
||||
|
||||
- name: Restore server snapshot
|
||||
community.general.clc_server_snapshot:
|
||||
server_ids:
|
||||
- UC1TEST-SVR01
|
||||
- UC1TEST-SVR02
|
||||
wait: true
|
||||
state: restore
|
||||
|
||||
- name: Delete server snapshot
|
||||
community.general.clc_server_snapshot:
|
||||
server_ids:
|
||||
- UC1TEST-SVR01
|
||||
- UC1TEST-SVR02
|
||||
wait: true
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
server_ids:
|
||||
description: The list of server ids that are changed
|
||||
returned: success
|
||||
type: list
|
||||
sample:
|
||||
[
|
||||
"UC1TEST-SVR01",
|
||||
"UC1TEST-SVR02"
|
||||
]
|
||||
'''
|
||||
|
||||
__version__ = '${version}'
|
||||
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
REQUESTS_IMP_ERR = None
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
REQUESTS_IMP_ERR = traceback.format_exc()
|
||||
REQUESTS_FOUND = False
|
||||
else:
|
||||
REQUESTS_FOUND = True
|
||||
|
||||
#
|
||||
# Requires the clc-python-sdk.
|
||||
# sudo pip install clc-sdk
|
||||
#
|
||||
CLC_IMP_ERR = None
|
||||
try:
|
||||
import clc as clc_sdk
|
||||
from clc import CLCException
|
||||
except ImportError:
|
||||
CLC_IMP_ERR = traceback.format_exc()
|
||||
CLC_FOUND = False
|
||||
clc_sdk = None
|
||||
else:
|
||||
CLC_FOUND = True
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
class ClcSnapshot:
|
||||
|
||||
clc = clc_sdk
|
||||
module = None
|
||||
|
||||
def __init__(self, module):
|
||||
"""
|
||||
Construct module
|
||||
"""
|
||||
self.module = module
|
||||
|
||||
if not CLC_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
|
||||
if not REQUESTS_FOUND:
|
||||
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
|
||||
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
|
||||
self.module.fail_json(
|
||||
msg='requests library version should be >= 2.5.0')
|
||||
|
||||
self._set_user_agent(self.clc)
|
||||
|
||||
def process_request(self):
|
||||
"""
|
||||
Process the request - Main Code Path
|
||||
:return: Returns with either an exit_json or fail_json
|
||||
"""
|
||||
p = self.module.params
|
||||
server_ids = p['server_ids']
|
||||
expiration_days = p['expiration_days']
|
||||
state = p['state']
|
||||
request_list = []
|
||||
changed = False
|
||||
changed_servers = []
|
||||
|
||||
self._set_clc_credentials_from_env()
|
||||
if state == 'present':
|
||||
changed, request_list, changed_servers = self.ensure_server_snapshot_present(
|
||||
server_ids=server_ids,
|
||||
expiration_days=expiration_days)
|
||||
elif state == 'absent':
|
||||
changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
|
||||
server_ids=server_ids)
|
||||
elif state == 'restore':
|
||||
changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
|
||||
server_ids=server_ids)
|
||||
|
||||
self._wait_for_requests_to_complete(request_list)
|
||||
return self.module.exit_json(
|
||||
changed=changed,
|
||||
server_ids=changed_servers)
|
||||
|
||||
def ensure_server_snapshot_present(self, server_ids, expiration_days):
|
||||
"""
|
||||
Ensures the given set of server_ids have the snapshots created
|
||||
:param server_ids: The list of server_ids to create the snapshot
|
||||
:param expiration_days: The number of days to keep the snapshot
|
||||
:return: (changed, request_list, changed_servers)
|
||||
changed: A flag indicating whether any change was made
|
||||
request_list: the list of clc request objects from CLC API call
|
||||
changed_servers: The list of servers ids that are modified
|
||||
"""
|
||||
request_list = []
|
||||
changed = False
|
||||
servers = self._get_servers_from_clc(
|
||||
server_ids,
|
||||
'Failed to obtain server list from the CLC API')
|
||||
servers_to_change = [
|
||||
server for server in servers if len(
|
||||
server.GetSnapshots()) == 0]
|
||||
for server in servers_to_change:
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
request = self._create_server_snapshot(server, expiration_days)
|
||||
request_list.append(request)
|
||||
changed_servers = [
|
||||
server.id for server in servers_to_change if server.id]
|
||||
return changed, request_list, changed_servers
|
||||
|
||||
def _create_server_snapshot(self, server, expiration_days):
|
||||
"""
|
||||
Create the snapshot for the CLC server
|
||||
:param server: the CLC server object
|
||||
:param expiration_days: The number of days to keep the snapshot
|
||||
:return: the create request object from CLC API Call
|
||||
"""
|
||||
result = None
|
||||
try:
|
||||
result = server.CreateSnapshot(
|
||||
delete_existing=True,
|
||||
expiration_days=expiration_days)
|
||||
except CLCException as ex:
|
||||
self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
|
||||
server.id, ex.response_text
|
||||
))
|
||||
return result
|
||||
|
||||
def ensure_server_snapshot_absent(self, server_ids):
|
||||
"""
|
||||
Ensures the given set of server_ids have the snapshots removed
|
||||
:param server_ids: The list of server_ids to delete the snapshot
|
||||
:return: (changed, request_list, changed_servers)
|
||||
changed: A flag indicating whether any change was made
|
||||
request_list: the list of clc request objects from CLC API call
|
||||
changed_servers: The list of servers ids that are modified
|
||||
"""
|
||||
request_list = []
|
||||
changed = False
|
||||
servers = self._get_servers_from_clc(
|
||||
server_ids,
|
||||
'Failed to obtain server list from the CLC API')
|
||||
servers_to_change = [
|
||||
server for server in servers if len(
|
||||
server.GetSnapshots()) > 0]
|
||||
for server in servers_to_change:
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
request = self._delete_server_snapshot(server)
|
||||
request_list.append(request)
|
||||
changed_servers = [
|
||||
server.id for server in servers_to_change if server.id]
|
||||
return changed, request_list, changed_servers
|
||||
|
||||
def _delete_server_snapshot(self, server):
|
||||
"""
|
||||
Delete snapshot for the CLC server
|
||||
:param server: the CLC server object
|
||||
:return: the delete snapshot request object from CLC API
|
||||
"""
|
||||
result = None
|
||||
try:
|
||||
result = server.DeleteSnapshot()
|
||||
except CLCException as ex:
|
||||
self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
|
||||
server.id, ex.response_text
|
||||
))
|
||||
return result
|
||||
|
||||
def ensure_server_snapshot_restore(self, server_ids):
|
||||
"""
|
||||
Ensures the given set of server_ids have the snapshots restored
|
||||
:param server_ids: The list of server_ids to delete the snapshot
|
||||
:return: (changed, request_list, changed_servers)
|
||||
changed: A flag indicating whether any change was made
|
||||
request_list: the list of clc request objects from CLC API call
|
||||
changed_servers: The list of servers ids that are modified
|
||||
"""
|
||||
request_list = []
|
||||
changed = False
|
||||
servers = self._get_servers_from_clc(
|
||||
server_ids,
|
||||
'Failed to obtain server list from the CLC API')
|
||||
servers_to_change = [
|
||||
server for server in servers if len(
|
||||
server.GetSnapshots()) > 0]
|
||||
for server in servers_to_change:
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
request = self._restore_server_snapshot(server)
|
||||
request_list.append(request)
|
||||
changed_servers = [
|
||||
server.id for server in servers_to_change if server.id]
|
||||
return changed, request_list, changed_servers
|
||||
|
||||
def _restore_server_snapshot(self, server):
|
||||
"""
|
||||
Restore snapshot for the CLC server
|
||||
:param server: the CLC server object
|
||||
:return: the restore snapshot request object from CLC API
|
||||
"""
|
||||
result = None
|
||||
try:
|
||||
result = server.RestoreSnapshot()
|
||||
except CLCException as ex:
|
||||
self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
|
||||
server.id, ex.response_text
|
||||
))
|
||||
return result
|
||||
|
||||
def _wait_for_requests_to_complete(self, requests_lst):
|
||||
"""
|
||||
Waits until the CLC requests are complete if the wait argument is True
|
||||
:param requests_lst: The list of CLC request objects
|
||||
:return: none
|
||||
"""
|
||||
if not self.module.params['wait']:
|
||||
return
|
||||
for request in requests_lst:
|
||||
request.WaitUntilComplete()
|
||||
for request_details in request.requests:
|
||||
if request_details.Status() != 'succeeded':
|
||||
self.module.fail_json(
|
||||
msg='Unable to process server snapshot request')
|
||||
|
||||
@staticmethod
|
||||
def define_argument_spec():
|
||||
"""
|
||||
This function defines the dictionary object required for
|
||||
package module
|
||||
:return: the package dictionary object
|
||||
"""
|
||||
argument_spec = dict(
|
||||
server_ids=dict(type='list', required=True, elements='str'),
|
||||
expiration_days=dict(default=7, type='int'),
|
||||
wait=dict(default=True),
|
||||
state=dict(
|
||||
default='present',
|
||||
choices=[
|
||||
'present',
|
||||
'absent',
|
||||
'restore']),
|
||||
)
|
||||
return argument_spec
|
||||
|
||||
def _get_servers_from_clc(self, server_list, message):
|
||||
"""
|
||||
Internal function to fetch list of CLC server objects from a list of server ids
|
||||
:param server_list: The list of server ids
|
||||
:param message: The error message to throw in case of any error
|
||||
:return the list of CLC server objects
|
||||
"""
|
||||
try:
|
||||
return self.clc.v2.Servers(server_list).servers
|
||||
except CLCException as ex:
|
||||
return self.module.fail_json(msg=message + ': %s' % ex)
|
||||
|
||||
def _set_clc_credentials_from_env(self):
|
||||
"""
|
||||
Set the CLC Credentials on the sdk by reading environment variables
|
||||
:return: none
|
||||
"""
|
||||
env = os.environ
|
||||
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
|
||||
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
|
||||
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
|
||||
clc_alias = env.get('CLC_ACCT_ALIAS', False)
|
||||
api_url = env.get('CLC_V2_API_URL', False)
|
||||
|
||||
if api_url:
|
||||
self.clc.defaults.ENDPOINT_URL_V2 = api_url
|
||||
|
||||
if v2_api_token and clc_alias:
|
||||
self.clc._LOGIN_TOKEN_V2 = v2_api_token
|
||||
self.clc._V2_ENABLED = True
|
||||
self.clc.ALIAS = clc_alias
|
||||
elif v2_api_username and v2_api_passwd:
|
||||
self.clc.v2.SetCredentials(
|
||||
api_username=v2_api_username,
|
||||
api_passwd=v2_api_passwd)
|
||||
else:
|
||||
return self.module.fail_json(
|
||||
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
|
||||
"environment variables")
|
||||
|
||||
@staticmethod
|
||||
def _set_user_agent(clc):
|
||||
if hasattr(clc, 'SetRequestsSession'):
|
||||
agent_string = "ClcAnsibleModule/" + __version__
|
||||
ses = requests.Session()
|
||||
ses.headers.update({"Api-Client": agent_string})
|
||||
ses.headers['User-Agent'] += " " + agent_string
|
||||
clc.SetRequestsSession(ses)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function
|
||||
:return: None
|
||||
"""
|
||||
module = AnsibleModule(
|
||||
argument_spec=ClcSnapshot.define_argument_spec(),
|
||||
supports_check_mode=True
|
||||
)
|
||||
clc_snapshot = ClcSnapshot(module)
|
||||
clc_snapshot.process_request()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,133 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, René Moser <mail@renemoser.net>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cloud_init_data_facts
|
||||
short_description: Retrieve facts of cloud-init
|
||||
description:
|
||||
- Gathers facts by reading the status.json and result.json of cloud-init.
|
||||
author: René Moser (@resmo)
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
- community.general.attributes.facts
|
||||
- community.general.attributes.facts_module
|
||||
options:
|
||||
filter:
|
||||
description:
|
||||
- Filter facts
|
||||
type: str
|
||||
choices: [ status, result ]
|
||||
notes:
|
||||
- See http://cloudinit.readthedocs.io/ for more information about cloud-init.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Gather all facts of cloud init
|
||||
community.general.cloud_init_data_facts:
|
||||
register: result
|
||||
|
||||
- ansible.builtin.debug:
|
||||
var: result
|
||||
|
||||
- name: Wait for cloud init to finish
|
||||
community.general.cloud_init_data_facts:
|
||||
filter: status
|
||||
register: res
|
||||
until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
|
||||
retries: 50
|
||||
delay: 5
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
cloud_init_data_facts:
|
||||
description: Facts of result and status.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '{
|
||||
"status": {
|
||||
"v1": {
|
||||
"datasource": "DataSourceCloudStack",
|
||||
"errors": []
|
||||
},
|
||||
"result": {
|
||||
"v1": {
|
||||
"datasource": "DataSourceCloudStack",
|
||||
"init": {
|
||||
"errors": [],
|
||||
"finished": 1522066377.0185432,
|
||||
"start": 1522066375.2648022
|
||||
},
|
||||
"init-local": {
|
||||
"errors": [],
|
||||
"finished": 1522066373.70919,
|
||||
"start": 1522066373.4726632
|
||||
},
|
||||
"modules-config": {
|
||||
"errors": [],
|
||||
"finished": 1522066380.9097016,
|
||||
"start": 1522066379.0011985
|
||||
},
|
||||
"modules-final": {
|
||||
"errors": [],
|
||||
"finished": 1522066383.56594,
|
||||
"start": 1522066382.3449218
|
||||
},
|
||||
"stage": null
|
||||
}
|
||||
}'
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
|
||||
CLOUD_INIT_PATH = "/var/lib/cloud/data"
|
||||
|
||||
|
||||
def gather_cloud_init_data_facts(module):
|
||||
res = {
|
||||
'cloud_init_data_facts': dict()
|
||||
}
|
||||
|
||||
for i in ['result', 'status']:
|
||||
filter = module.params.get('filter')
|
||||
if filter is None or filter == i:
|
||||
res['cloud_init_data_facts'][i] = dict()
|
||||
json_file = os.path.join(CLOUD_INIT_PATH, i + '.json')
|
||||
|
||||
if os.path.exists(json_file):
|
||||
f = open(json_file, 'rb')
|
||||
contents = to_text(f.read(), errors='surrogate_or_strict')
|
||||
f.close()
|
||||
|
||||
if contents:
|
||||
res['cloud_init_data_facts'][i] = module.from_json(contents)
|
||||
return res
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
filter=dict(choices=['result', 'status']),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
facts = gather_cloud_init_data_facts(module)
|
||||
result = dict(changed=False, ansible_facts=facts, **facts)
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,885 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: cloudflare_dns
|
||||
author:
|
||||
- Michael Gruener (@mgruener)
|
||||
requirements:
|
||||
- python >= 2.6
|
||||
short_description: Manage Cloudflare DNS records
|
||||
description:
|
||||
- "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)."
|
||||
options:
|
||||
api_token:
|
||||
description:
|
||||
- API token.
|
||||
- Required for api token authentication.
|
||||
- "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)."
|
||||
- Can be specified in C(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0.
|
||||
type: str
|
||||
required: false
|
||||
version_added: '0.2.0'
|
||||
account_api_key:
|
||||
description:
|
||||
- Account API key.
|
||||
- Required for api keys authentication.
|
||||
- "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)."
|
||||
type: str
|
||||
required: false
|
||||
aliases: [ account_api_token ]
|
||||
account_email:
|
||||
description:
|
||||
- Account email. Required for API keys authentication.
|
||||
type: str
|
||||
required: false
|
||||
algorithm:
|
||||
description:
|
||||
- Algorithm number.
|
||||
- Required for I(type=DS) and I(type=SSHFP) when I(state=present).
|
||||
type: int
|
||||
cert_usage:
|
||||
description:
|
||||
- Certificate usage number.
|
||||
- Required for I(type=TLSA) when I(state=present).
|
||||
type: int
|
||||
choices: [ 0, 1, 2, 3 ]
|
||||
hash_type:
|
||||
description:
|
||||
- Hash type number.
|
||||
- Required for I(type=DS), I(type=SSHFP) and I(type=TLSA) when I(state=present).
|
||||
type: int
|
||||
choices: [ 1, 2 ]
|
||||
key_tag:
|
||||
description:
|
||||
- DNSSEC key tag.
|
||||
- Needed for I(type=DS) when I(state=present).
|
||||
type: int
|
||||
port:
|
||||
description:
|
||||
- Service port.
|
||||
- Required for I(type=SRV) and I(type=TLSA).
|
||||
type: int
|
||||
priority:
|
||||
description:
|
||||
- Record priority.
|
||||
- Required for I(type=MX) and I(type=SRV)
|
||||
default: 1
|
||||
type: int
|
||||
proto:
|
||||
description:
|
||||
- Service protocol. Required for I(type=SRV) and I(type=TLSA).
|
||||
- Common values are TCP and UDP.
|
||||
- Before Ansible 2.6 only TCP and UDP were available.
|
||||
type: str
|
||||
proxied:
|
||||
description:
|
||||
- Proxy through Cloudflare network or just use DNS.
|
||||
type: bool
|
||||
default: false
|
||||
record:
|
||||
description:
|
||||
- Record to add.
|
||||
- Required if I(state=present).
|
||||
- Default is C(@) (e.g. the zone name).
|
||||
type: str
|
||||
default: '@'
|
||||
aliases: [ name ]
|
||||
selector:
|
||||
description:
|
||||
- Selector number.
|
||||
- Required for I(type=TLSA) when I(state=present).
|
||||
choices: [ 0, 1 ]
|
||||
type: int
|
||||
service:
|
||||
description:
|
||||
- Record service.
|
||||
- Required for I(type=SRV).
|
||||
type: str
|
||||
solo:
|
||||
description:
|
||||
- Whether the record should be the only one for that record type and record name.
|
||||
- Only use with I(state=present).
|
||||
- This will delete all other records with the same record name and type.
|
||||
type: bool
|
||||
state:
|
||||
description:
|
||||
- Whether the record(s) should exist or not.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
timeout:
|
||||
description:
|
||||
- Timeout for Cloudflare API calls.
|
||||
type: int
|
||||
default: 30
|
||||
ttl:
|
||||
description:
|
||||
- The TTL to give the new record.
|
||||
- Must be between 120 and 2,147,483,647 seconds, or 1 for automatic.
|
||||
type: int
|
||||
default: 1
|
||||
type:
|
||||
description:
|
||||
- The type of DNS record to create. Required if I(state=present).
|
||||
- I(type=DS), I(type=SSHFP) and I(type=TLSA) added in Ansible 2.7.
|
||||
type: str
|
||||
choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, TXT ]
|
||||
value:
|
||||
description:
|
||||
- The record value.
|
||||
- Required for I(state=present).
|
||||
type: str
|
||||
aliases: [ content ]
|
||||
weight:
|
||||
description:
|
||||
- Service weight.
|
||||
- Required for I(type=SRV).
|
||||
type: int
|
||||
default: 1
|
||||
zone:
|
||||
description:
|
||||
- The name of the Zone to work with (e.g. "example.com").
|
||||
- The Zone must already exist.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [ domain ]
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a test.example.net A record to point to 127.0.0.1
|
||||
community.general.cloudflare_dns:
|
||||
zone: example.net
|
||||
record: test
|
||||
type: A
|
||||
value: 127.0.0.1
|
||||
account_email: test@example.com
|
||||
account_api_key: dummyapitoken
|
||||
register: record
|
||||
|
||||
- name: Create a record using api token
|
||||
community.general.cloudflare_dns:
|
||||
zone: example.net
|
||||
record: test
|
||||
type: A
|
||||
value: 127.0.0.1
|
||||
api_token: dummyapitoken
|
||||
|
||||
- name: Create a example.net CNAME record to example.com
|
||||
community.general.cloudflare_dns:
|
||||
zone: example.net
|
||||
type: CNAME
|
||||
value: example.com
|
||||
account_email: test@example.com
|
||||
account_api_key: dummyapitoken
|
||||
state: present
|
||||
|
||||
- name: Change its TTL
|
||||
community.general.cloudflare_dns:
|
||||
zone: example.net
|
||||
type: CNAME
|
||||
value: example.com
|
||||
ttl: 600
|
||||
account_email: test@example.com
|
||||
account_api_key: dummyapitoken
|
||||
state: present
|
||||
|
||||
- name: Delete the record
|
||||
community.general.cloudflare_dns:
|
||||
zone: example.net
|
||||
type: CNAME
|
||||
value: example.com
|
||||
account_email: test@example.com
|
||||
account_api_key: dummyapitoken
|
||||
state: absent
|
||||
|
||||
- name: Create a example.net CNAME record to example.com and proxy through Cloudflare's network
|
||||
community.general.cloudflare_dns:
|
||||
zone: example.net
|
||||
type: CNAME
|
||||
value: example.com
|
||||
proxied: true
|
||||
account_email: test@example.com
|
||||
account_api_key: dummyapitoken
|
||||
state: present
|
||||
|
||||
# This deletes all other TXT records named "test.example.net"
|
||||
- name: Create TXT record "test.example.net" with value "unique value"
|
||||
community.general.cloudflare_dns:
|
||||
domain: example.net
|
||||
record: test
|
||||
type: TXT
|
||||
value: unique value
|
||||
solo: true
|
||||
account_email: test@example.com
|
||||
account_api_key: dummyapitoken
|
||||
state: present
|
||||
|
||||
- name: Create an SRV record _foo._tcp.example.net
|
||||
community.general.cloudflare_dns:
|
||||
domain: example.net
|
||||
service: foo
|
||||
proto: tcp
|
||||
port: 3500
|
||||
priority: 10
|
||||
weight: 20
|
||||
type: SRV
|
||||
value: fooserver.example.net
|
||||
|
||||
- name: Create a SSHFP record login.example.com
|
||||
community.general.cloudflare_dns:
|
||||
zone: example.com
|
||||
record: login
|
||||
type: SSHFP
|
||||
algorithm: 4
|
||||
hash_type: 2
|
||||
value: 9dc1d6742696d2f51ca1f1a78b3d16a840f7d111eb9454239e70db31363f33e1
|
||||
|
||||
- name: Create a TLSA record _25._tcp.mail.example.com
|
||||
community.general.cloudflare_dns:
|
||||
zone: example.com
|
||||
record: mail
|
||||
port: 25
|
||||
proto: tcp
|
||||
type: TLSA
|
||||
cert_usage: 3
|
||||
selector: 1
|
||||
hash_type: 1
|
||||
value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3
|
||||
|
||||
- name: Create a DS record for subdomain.example.com
|
||||
community.general.cloudflare_dns:
|
||||
zone: example.com
|
||||
record: subdomain
|
||||
type: DS
|
||||
key_tag: 5464
|
||||
algorithm: 8
|
||||
hash_type: 2
|
||||
value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
record:
|
||||
description: A dictionary containing the record data.
|
||||
returned: success, except on record deletion
|
||||
type: complex
|
||||
contains:
|
||||
content:
|
||||
description: The record content (details depend on record type).
|
||||
returned: success
|
||||
type: str
|
||||
sample: 192.0.2.91
|
||||
created_on:
|
||||
description: The record creation date.
|
||||
returned: success
|
||||
type: str
|
||||
sample: "2016-03-25T19:09:42.516553Z"
|
||||
data:
|
||||
description: Additional record data.
|
||||
returned: success, if type is SRV, DS, SSHFP or TLSA
|
||||
type: dict
|
||||
sample: {
|
||||
name: "jabber",
|
||||
port: 8080,
|
||||
priority: 10,
|
||||
proto: "_tcp",
|
||||
service: "_xmpp",
|
||||
target: "jabberhost.sample.com",
|
||||
weight: 5,
|
||||
}
|
||||
id:
|
||||
description: The record ID.
|
||||
returned: success
|
||||
type: str
|
||||
sample: f9efb0549e96abcb750de63b38c9576e
|
||||
locked:
|
||||
description: No documentation available.
|
||||
returned: success
|
||||
type: bool
|
||||
sample: false
|
||||
meta:
|
||||
description: No documentation available.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: { auto_added: false }
|
||||
modified_on:
|
||||
description: Record modification date.
|
||||
returned: success
|
||||
type: str
|
||||
sample: "2016-03-25T19:09:42.516553Z"
|
||||
name:
|
||||
description: The record name as FQDN (including _service and _proto for SRV).
|
||||
returned: success
|
||||
type: str
|
||||
sample: www.sample.com
|
||||
priority:
|
||||
description: Priority of the MX record.
|
||||
returned: success, if type is MX
|
||||
type: int
|
||||
sample: 10
|
||||
proxiable:
|
||||
description: Whether this record can be proxied through Cloudflare.
|
||||
returned: success
|
||||
type: bool
|
||||
sample: false
|
||||
proxied:
|
||||
description: Whether the record is proxied through Cloudflare.
|
||||
returned: success
|
||||
type: bool
|
||||
sample: false
|
||||
ttl:
|
||||
description: The time-to-live for the record.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 300
|
||||
type:
|
||||
description: The record type.
|
||||
returned: success
|
||||
type: str
|
||||
sample: A
|
||||
zone_id:
|
||||
description: The ID of the zone containing the record.
|
||||
returned: success
|
||||
type: str
|
||||
sample: abcede0bf9f0066f94029d2e6b73856a
|
||||
zone_name:
|
||||
description: The name of the zone containing the record.
|
||||
returned: success
|
||||
type: str
|
||||
sample: sample.com
|
||||
'''
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, env_fallback
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from ansible.module_utils.common.text.converters import to_native, to_text
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
def lowercase_string(param):
|
||||
if not isinstance(param, str):
|
||||
return param
|
||||
return param.lower()
|
||||
|
||||
|
||||
class CloudflareAPI(object):
|
||||
|
||||
cf_api_endpoint = 'https://api.cloudflare.com/client/v4'
|
||||
changed = False
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.api_token = module.params['api_token']
|
||||
self.account_api_key = module.params['account_api_key']
|
||||
self.account_email = module.params['account_email']
|
||||
self.algorithm = module.params['algorithm']
|
||||
self.cert_usage = module.params['cert_usage']
|
||||
self.hash_type = module.params['hash_type']
|
||||
self.key_tag = module.params['key_tag']
|
||||
self.port = module.params['port']
|
||||
self.priority = module.params['priority']
|
||||
self.proto = lowercase_string(module.params['proto'])
|
||||
self.proxied = module.params['proxied']
|
||||
self.selector = module.params['selector']
|
||||
self.record = lowercase_string(module.params['record'])
|
||||
self.service = lowercase_string(module.params['service'])
|
||||
self.is_solo = module.params['solo']
|
||||
self.state = module.params['state']
|
||||
self.timeout = module.params['timeout']
|
||||
self.ttl = module.params['ttl']
|
||||
self.type = module.params['type']
|
||||
self.value = module.params['value']
|
||||
self.weight = module.params['weight']
|
||||
self.zone = lowercase_string(module.params['zone'])
|
||||
|
||||
if self.record == '@':
|
||||
self.record = self.zone
|
||||
|
||||
if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None):
|
||||
self.value = self.value.rstrip('.').lower()
|
||||
|
||||
if (self.type == 'AAAA') and (self.value is not None):
|
||||
self.value = self.value.lower()
|
||||
|
||||
if (self.type == 'SRV'):
|
||||
if (self.proto is not None) and (not self.proto.startswith('_')):
|
||||
self.proto = '_' + self.proto
|
||||
if (self.service is not None) and (not self.service.startswith('_')):
|
||||
self.service = '_' + self.service
|
||||
|
||||
if (self.type == 'TLSA'):
|
||||
if (self.proto is not None) and (not self.proto.startswith('_')):
|
||||
self.proto = '_' + self.proto
|
||||
if (self.port is not None):
|
||||
self.port = '_' + str(self.port)
|
||||
|
||||
if not self.record.endswith(self.zone):
|
||||
self.record = self.record + '.' + self.zone
|
||||
|
||||
if (self.type == 'DS'):
|
||||
if self.record == self.zone:
|
||||
self.module.fail_json(msg="DS records only apply to subdomains.")
|
||||
|
||||
def _cf_simple_api_call(self, api_call, method='GET', payload=None):
|
||||
if self.api_token:
|
||||
headers = {
|
||||
'Authorization': 'Bearer ' + self.api_token,
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
else:
|
||||
headers = {
|
||||
'X-Auth-Email': self.account_email,
|
||||
'X-Auth-Key': self.account_api_key,
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
data = None
|
||||
if payload:
|
||||
try:
|
||||
data = json.dumps(payload)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e))
|
||||
|
||||
resp, info = fetch_url(self.module,
|
||||
self.cf_api_endpoint + api_call,
|
||||
headers=headers,
|
||||
data=data,
|
||||
method=method,
|
||||
timeout=self.timeout)
|
||||
|
||||
if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]:
|
||||
self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}: {2}".format(api_call, info['status'], info.get('msg')))
|
||||
|
||||
error_msg = ''
|
||||
if info['status'] == 401:
|
||||
# Unauthorized
|
||||
error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
|
||||
elif info['status'] == 403:
|
||||
# Forbidden
|
||||
error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
|
||||
elif info['status'] == 429:
|
||||
# Too many requests
|
||||
error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
|
||||
elif info['status'] == 405:
|
||||
# Method not allowed
|
||||
error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
|
||||
elif info['status'] == 415:
|
||||
# Unsupported Media Type
|
||||
error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
|
||||
elif info['status'] == 400:
|
||||
# Bad Request
|
||||
error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
|
||||
|
||||
result = None
|
||||
try:
|
||||
content = resp.read()
|
||||
except AttributeError:
|
||||
if info['body']:
|
||||
content = info['body']
|
||||
else:
|
||||
error_msg += "; The API response was empty"
|
||||
|
||||
if content:
|
||||
try:
|
||||
result = json.loads(to_text(content, errors='surrogate_or_strict'))
|
||||
except (getattr(json, 'JSONDecodeError', ValueError)) as e:
|
||||
error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
|
||||
|
||||
# Without a valid/parsed JSON response no more error processing can be done
|
||||
if result is None:
|
||||
self.module.fail_json(msg=error_msg)
|
||||
|
||||
if 'success' not in result:
|
||||
error_msg += "; Unexpected error details: {0}".format(result.get('error'))
|
||||
self.module.fail_json(msg=error_msg)
|
||||
|
||||
if not result['success']:
|
||||
error_msg += "; Error details: "
|
||||
for error in result['errors']:
|
||||
error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message'])
|
||||
if 'error_chain' in error:
|
||||
for chain_error in error['error_chain']:
|
||||
error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message'])
|
||||
self.module.fail_json(msg=error_msg)
|
||||
|
||||
return result, info['status']
|
||||
|
||||
def _cf_api_call(self, api_call, method='GET', payload=None):
|
||||
result, status = self._cf_simple_api_call(api_call, method, payload)
|
||||
|
||||
data = result['result']
|
||||
|
||||
if 'result_info' in result:
|
||||
pagination = result['result_info']
|
||||
if pagination['total_pages'] > 1:
|
||||
next_page = int(pagination['page']) + 1
|
||||
parameters = ['page={0}'.format(next_page)]
|
||||
# strip "page" parameter from call parameters (if there are any)
|
||||
if '?' in api_call:
|
||||
raw_api_call, query = api_call.split('?', 1)
|
||||
parameters += [param for param in query.split('&') if not param.startswith('page')]
|
||||
else:
|
||||
raw_api_call = api_call
|
||||
while next_page <= pagination['total_pages']:
|
||||
raw_api_call += '?' + '&'.join(parameters)
|
||||
result, status = self._cf_simple_api_call(raw_api_call, method, payload)
|
||||
data += result['result']
|
||||
next_page += 1
|
||||
|
||||
return data, status
|
||||
|
||||
def _get_zone_id(self, zone=None):
|
||||
if not zone:
|
||||
zone = self.zone
|
||||
|
||||
zones = self.get_zones(zone)
|
||||
if len(zones) > 1:
|
||||
self.module.fail_json(msg="More than one zone matches {0}".format(zone))
|
||||
|
||||
if len(zones) < 1:
|
||||
self.module.fail_json(msg="No zone found with name {0}".format(zone))
|
||||
|
||||
return zones[0]['id']
|
||||
|
||||
def get_zones(self, name=None):
|
||||
if not name:
|
||||
name = self.zone
|
||||
param = ''
|
||||
if name:
|
||||
param = '?' + urlencode({'name': name})
|
||||
zones, status = self._cf_api_call('/zones' + param)
|
||||
return zones
|
||||
|
||||
def get_dns_records(self, zone_name=None, type=None, record=None, value=''):
|
||||
if not zone_name:
|
||||
zone_name = self.zone
|
||||
if not type:
|
||||
type = self.type
|
||||
if not record:
|
||||
record = self.record
|
||||
# necessary because None as value means to override user
|
||||
# set module value
|
||||
if (not value) and (value is not None):
|
||||
value = self.value
|
||||
|
||||
zone_id = self._get_zone_id()
|
||||
api_call = '/zones/{0}/dns_records'.format(zone_id)
|
||||
query = {}
|
||||
if type:
|
||||
query['type'] = type
|
||||
if record:
|
||||
query['name'] = record
|
||||
if value:
|
||||
query['content'] = value
|
||||
if query:
|
||||
api_call += '?' + urlencode(query)
|
||||
|
||||
records, status = self._cf_api_call(api_call)
|
||||
return records
|
||||
|
||||
def delete_dns_records(self, **kwargs):
|
||||
params = {}
|
||||
for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone',
|
||||
'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']:
|
||||
if param in kwargs:
|
||||
params[param] = kwargs[param]
|
||||
else:
|
||||
params[param] = getattr(self, param)
|
||||
|
||||
records = []
|
||||
content = params['value']
|
||||
search_record = params['record']
|
||||
if params['type'] == 'SRV':
|
||||
if not (params['value'] is None or params['value'] == ''):
|
||||
content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
|
||||
search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
|
||||
elif params['type'] == 'DS':
|
||||
if not (params['value'] is None or params['value'] == ''):
|
||||
content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
|
||||
elif params['type'] == 'SSHFP':
|
||||
if not (params['value'] is None or params['value'] == ''):
|
||||
content = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
|
||||
elif params['type'] == 'TLSA':
|
||||
if not (params['value'] is None or params['value'] == ''):
|
||||
content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
|
||||
search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
|
||||
if params['solo']:
|
||||
search_value = None
|
||||
else:
|
||||
search_value = content
|
||||
|
||||
records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
|
||||
|
||||
for rr in records:
|
||||
if params['solo']:
|
||||
if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)):
|
||||
self.changed = True
|
||||
if not self.module.check_mode:
|
||||
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
|
||||
else:
|
||||
self.changed = True
|
||||
if not self.module.check_mode:
|
||||
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
|
||||
return self.changed
|
||||
|
||||
def ensure_dns_record(self, **kwargs):
|
||||
params = {}
|
||||
for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone',
|
||||
'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']:
|
||||
if param in kwargs:
|
||||
params[param] = kwargs[param]
|
||||
else:
|
||||
params[param] = getattr(self, param)
|
||||
|
||||
search_value = params['value']
|
||||
search_record = params['record']
|
||||
new_record = None
|
||||
if (params['type'] is None) or (params['record'] is None):
|
||||
self.module.fail_json(msg="You must provide a type and a record to create a new record")
|
||||
|
||||
if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']):
|
||||
if not params['value']:
|
||||
self.module.fail_json(msg="You must provide a non-empty value to create this record type")
|
||||
|
||||
# there can only be one CNAME per record
|
||||
# ignoring the value when searching for existing
|
||||
# CNAME records allows us to update the value if it
|
||||
# changes
|
||||
if params['type'] == 'CNAME':
|
||||
search_value = None
|
||||
|
||||
new_record = {
|
||||
"type": params['type'],
|
||||
"name": params['record'],
|
||||
"content": params['value'],
|
||||
"ttl": params['ttl']
|
||||
}
|
||||
|
||||
if (params['type'] in ['A', 'AAAA', 'CNAME']):
|
||||
new_record["proxied"] = params["proxied"]
|
||||
|
||||
if params['type'] == 'MX':
|
||||
for attr in [params['priority'], params['value']]:
|
||||
if (attr is None) or (attr == ''):
|
||||
self.module.fail_json(msg="You must provide priority and a value to create this record type")
|
||||
new_record = {
|
||||
"type": params['type'],
|
||||
"name": params['record'],
|
||||
"content": params['value'],
|
||||
"priority": params['priority'],
|
||||
"ttl": params['ttl']
|
||||
}
|
||||
|
||||
if params['type'] == 'SRV':
|
||||
for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]:
|
||||
if (attr is None) or (attr == ''):
|
||||
self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type")
|
||||
srv_data = {
|
||||
"target": params['value'],
|
||||
"port": params['port'],
|
||||
"weight": params['weight'],
|
||||
"priority": params['priority'],
|
||||
"name": params['record'][:-len('.' + params['zone'])],
|
||||
"proto": params['proto'],
|
||||
"service": params['service']
|
||||
}
|
||||
new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data}
|
||||
search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
|
||||
search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
|
||||
|
||||
if params['type'] == 'DS':
|
||||
for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]:
|
||||
if (attr is None) or (attr == ''):
|
||||
self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type")
|
||||
ds_data = {
|
||||
"key_tag": params['key_tag'],
|
||||
"algorithm": params['algorithm'],
|
||||
"digest_type": params['hash_type'],
|
||||
"digest": params['value'],
|
||||
}
|
||||
new_record = {
|
||||
"type": params['type'],
|
||||
"name": params['record'],
|
||||
'data': ds_data,
|
||||
"ttl": params['ttl'],
|
||||
}
|
||||
search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
|
||||
|
||||
if params['type'] == 'SSHFP':
|
||||
for attr in [params['algorithm'], params['hash_type'], params['value']]:
|
||||
if (attr is None) or (attr == ''):
|
||||
self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type")
|
||||
sshfp_data = {
|
||||
"fingerprint": params['value'],
|
||||
"type": params['hash_type'],
|
||||
"algorithm": params['algorithm'],
|
||||
}
|
||||
new_record = {
|
||||
"type": params['type'],
|
||||
"name": params['record'],
|
||||
'data': sshfp_data,
|
||||
"ttl": params['ttl'],
|
||||
}
|
||||
search_value = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
|
||||
|
||||
if params['type'] == 'TLSA':
|
||||
for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]:
|
||||
if (attr is None) or (attr == ''):
|
||||
self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type")
|
||||
search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
|
||||
tlsa_data = {
|
||||
"usage": params['cert_usage'],
|
||||
"selector": params['selector'],
|
||||
"matching_type": params['hash_type'],
|
||||
"certificate": params['value'],
|
||||
}
|
||||
new_record = {
|
||||
"type": params['type'],
|
||||
"name": search_record,
|
||||
'data': tlsa_data,
|
||||
"ttl": params['ttl'],
|
||||
}
|
||||
search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
|
||||
|
||||
zone_id = self._get_zone_id(params['zone'])
|
||||
records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
|
||||
# in theory this should be impossible as cloudflare does not allow
|
||||
# the creation of duplicate records but lets cover it anyways
|
||||
if len(records) > 1:
|
||||
self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!")
|
||||
# record already exists, check if it must be updated
|
||||
if len(records) == 1:
|
||||
cur_record = records[0]
|
||||
do_update = False
|
||||
if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']):
|
||||
do_update = True
|
||||
if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']):
|
||||
do_update = True
|
||||
if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']):
|
||||
do_update = True
|
||||
if ('data' in new_record) and ('data' in cur_record):
|
||||
if (cur_record['data'] != new_record['data']):
|
||||
do_update = True
|
||||
if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']):
|
||||
do_update = True
|
||||
if do_update:
|
||||
if self.module.check_mode:
|
||||
result = new_record
|
||||
else:
|
||||
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record)
|
||||
self.changed = True
|
||||
return result, self.changed
|
||||
else:
|
||||
return records, self.changed
|
||||
if self.module.check_mode:
|
||||
result = new_record
|
||||
else:
|
||||
result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record)
|
||||
self.changed = True
|
||||
return result, self.changed
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
api_token=dict(
|
||||
type="str",
|
||||
required=False,
|
||||
no_log=True,
|
||||
fallback=(env_fallback, ["CLOUDFLARE_TOKEN"]),
|
||||
),
|
||||
account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']),
|
||||
account_email=dict(type='str', required=False),
|
||||
algorithm=dict(type='int'),
|
||||
cert_usage=dict(type='int', choices=[0, 1, 2, 3]),
|
||||
hash_type=dict(type='int', choices=[1, 2]),
|
||||
key_tag=dict(type='int', no_log=False),
|
||||
port=dict(type='int'),
|
||||
priority=dict(type='int', default=1),
|
||||
proto=dict(type='str'),
|
||||
proxied=dict(type='bool', default=False),
|
||||
record=dict(type='str', default='@', aliases=['name']),
|
||||
selector=dict(type='int', choices=[0, 1]),
|
||||
service=dict(type='str'),
|
||||
solo=dict(type='bool'),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
timeout=dict(type='int', default=30),
|
||||
ttl=dict(type='int', default=1),
|
||||
type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'TXT']),
|
||||
value=dict(type='str', aliases=['content']),
|
||||
weight=dict(type='int', default=1),
|
||||
zone=dict(type='str', required=True, aliases=['domain']),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
required_if=[
|
||||
('state', 'present', ['record', 'type', 'value']),
|
||||
('state', 'absent', ['record']),
|
||||
('type', 'SRV', ['proto', 'service']),
|
||||
('type', 'TLSA', ['proto', 'port']),
|
||||
],
|
||||
)
|
||||
|
||||
if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']):
|
||||
module.fail_json(msg="Either api_token or account_api_key and account_email params are required.")
|
||||
if module.params['type'] == 'SRV':
|
||||
if not ((module.params['weight'] is not None and module.params['port'] is not None
|
||||
and not (module.params['value'] is None or module.params['value'] == ''))
|
||||
or (module.params['weight'] is None and module.params['port'] is None
|
||||
and (module.params['value'] is None or module.params['value'] == ''))):
|
||||
module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.")
|
||||
|
||||
if module.params['type'] == 'SSHFP':
|
||||
if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None
|
||||
and not (module.params['value'] is None or module.params['value'] == ''))
|
||||
or (module.params['algorithm'] is None and module.params['hash_type'] is None
|
||||
and (module.params['value'] is None or module.params['value'] == ''))):
|
||||
module.fail_json(msg="For SSHFP records the params algorithm, hash_type and value all need to be defined, or not at all.")
|
||||
|
||||
if module.params['type'] == 'TLSA':
|
||||
if not ((module.params['cert_usage'] is not None and module.params['selector'] is not None and module.params['hash_type'] is not None
|
||||
and not (module.params['value'] is None or module.params['value'] == ''))
|
||||
or (module.params['cert_usage'] is None and module.params['selector'] is None and module.params['hash_type'] is None
|
||||
and (module.params['value'] is None or module.params['value'] == ''))):
|
||||
module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.")
|
||||
|
||||
if module.params['type'] == 'DS':
|
||||
if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None
|
||||
and not (module.params['value'] is None or module.params['value'] == ''))
|
||||
or (module.params['key_tag'] is None and module.params['algorithm'] is None and module.params['hash_type'] is None
|
||||
and (module.params['value'] is None or module.params['value'] == ''))):
|
||||
module.fail_json(msg="For DS records the params key_tag, algorithm, hash_type and value all need to be defined, or not at all.")
|
||||
|
||||
changed = False
|
||||
cf_api = CloudflareAPI(module)
|
||||
|
||||
# sanity checks
|
||||
if cf_api.is_solo and cf_api.state == 'absent':
|
||||
module.fail_json(msg="solo=true can only be used with state=present")
|
||||
|
||||
# perform add, delete or update (only the TTL can be updated) of one or
|
||||
# more records
|
||||
if cf_api.state == 'present':
|
||||
# delete all records matching record name + type
|
||||
if cf_api.is_solo:
|
||||
changed = cf_api.delete_dns_records(solo=cf_api.is_solo)
|
||||
result, changed = cf_api.ensure_dns_record()
|
||||
if isinstance(result, list):
|
||||
module.exit_json(changed=changed, result={'record': result[0]})
|
||||
|
||||
module.exit_json(changed=changed, result={'record': result})
|
||||
else:
|
||||
# force solo to False, just to be sure
|
||||
changed = cf_api.delete_dns_records(solo=False)
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,143 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: cobbler_sync
|
||||
short_description: Sync Cobbler
|
||||
description:
|
||||
- Sync Cobbler to commit changes.
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
- The name or IP address of the Cobbler system.
|
||||
default: 127.0.0.1
|
||||
type: str
|
||||
port:
|
||||
description:
|
||||
- Port number to be used for REST connection.
|
||||
- The default value depends on parameter C(use_ssl).
|
||||
type: int
|
||||
username:
|
||||
description:
|
||||
- The username to log in to Cobbler.
|
||||
default: cobbler
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- The password to log in to Cobbler.
|
||||
type: str
|
||||
use_ssl:
|
||||
description:
|
||||
- If C(false), an HTTP connection will be used instead of the default HTTPS connection.
|
||||
type: bool
|
||||
default: true
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(false), SSL certificates will not be validated.
|
||||
- This should only set to C(false) when used on personally controlled sites using self-signed certificates.
|
||||
type: bool
|
||||
default: true
|
||||
author:
|
||||
- Dag Wieers (@dagwieers)
|
||||
todo:
|
||||
notes:
|
||||
- Concurrently syncing Cobbler is bound to fail with weird errors.
|
||||
- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
|
||||
More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Commit Cobbler changes
|
||||
community.general.cobbler_sync:
|
||||
host: cobbler01
|
||||
username: cobbler
|
||||
password: MySuperSecureP4sswOrd
|
||||
run_once: true
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
# Default return values
|
||||
'''
|
||||
|
||||
import datetime
|
||||
import ssl
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six.moves import xmlrpc_client
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(type='str', default='127.0.0.1'),
|
||||
port=dict(type='int'),
|
||||
username=dict(type='str', default='cobbler'),
|
||||
password=dict(type='str', no_log=True),
|
||||
use_ssl=dict(type='bool', default=True),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
username = module.params['username']
|
||||
password = module.params['password']
|
||||
port = module.params['port']
|
||||
use_ssl = module.params['use_ssl']
|
||||
validate_certs = module.params['validate_certs']
|
||||
|
||||
module.params['proto'] = 'https' if use_ssl else 'http'
|
||||
if not port:
|
||||
module.params['port'] = '443' if use_ssl else '80'
|
||||
|
||||
result = dict(
|
||||
changed=True,
|
||||
)
|
||||
|
||||
start = datetime.datetime.utcnow()
|
||||
|
||||
ssl_context = None
|
||||
if not validate_certs:
|
||||
try:
|
||||
ssl_context = ssl._create_unverified_context()
|
||||
except AttributeError:
|
||||
# Legacy Python that doesn't verify HTTPS certificates by default
|
||||
pass
|
||||
else:
|
||||
# Handle target environment that doesn't support HTTPS verification
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
|
||||
url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
|
||||
if ssl_context:
|
||||
conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
|
||||
else:
|
||||
conn = xmlrpc_client.Server(url)
|
||||
|
||||
try:
|
||||
token = conn.login(username, password)
|
||||
except xmlrpc_client.Fault as e:
|
||||
module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e)))
|
||||
|
||||
if not module.check_mode:
|
||||
try:
|
||||
conn.sync(token)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e)))
|
||||
|
||||
elapsed = datetime.datetime.utcnow() - start
|
||||
module.exit_json(elapsed=elapsed.seconds, **result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,341 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: cobbler_system
|
||||
short_description: Manage system objects in Cobbler
|
||||
description:
|
||||
- Add, modify or remove systems in Cobbler
|
||||
options:
|
||||
host:
|
||||
description:
|
||||
- The name or IP address of the Cobbler system.
|
||||
default: 127.0.0.1
|
||||
type: str
|
||||
port:
|
||||
description:
|
||||
- Port number to be used for REST connection.
|
||||
- The default value depends on parameter C(use_ssl).
|
||||
type: int
|
||||
username:
|
||||
description:
|
||||
- The username to log in to Cobbler.
|
||||
default: cobbler
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- The password to log in to Cobbler.
|
||||
type: str
|
||||
use_ssl:
|
||||
description:
|
||||
- If C(false), an HTTP connection will be used instead of the default HTTPS connection.
|
||||
type: bool
|
||||
default: true
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(false), SSL certificates will not be validated.
|
||||
- This should only set to C(false) when used on personally controlled sites using self-signed certificates.
|
||||
type: bool
|
||||
default: true
|
||||
name:
|
||||
description:
|
||||
- The system name to manage.
|
||||
type: str
|
||||
properties:
|
||||
description:
|
||||
- A dictionary with system properties.
|
||||
type: dict
|
||||
interfaces:
|
||||
description:
|
||||
- A list of dictionaries containing interface options.
|
||||
type: dict
|
||||
sync:
|
||||
description:
|
||||
- Sync on changes.
|
||||
- Concurrently syncing Cobbler is bound to fail.
|
||||
type: bool
|
||||
default: false
|
||||
state:
|
||||
description:
|
||||
- Whether the system should be present, absent or a query is made.
|
||||
choices: [ absent, present, query ]
|
||||
default: present
|
||||
type: str
|
||||
author:
|
||||
- Dag Wieers (@dagwieers)
|
||||
notes:
|
||||
- Concurrently syncing Cobbler is bound to fail with weird errors.
|
||||
- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
|
||||
More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Ensure the system exists in Cobbler
|
||||
community.general.cobbler_system:
|
||||
host: cobbler01
|
||||
username: cobbler
|
||||
password: MySuperSecureP4sswOrd
|
||||
name: myhost
|
||||
properties:
|
||||
profile: CentOS6-x86_64
|
||||
name_servers: [ 2.3.4.5, 3.4.5.6 ]
|
||||
name_servers_search: foo.com, bar.com
|
||||
interfaces:
|
||||
eth0:
|
||||
macaddress: 00:01:02:03:04:05
|
||||
ipaddress: 1.2.3.4
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Enable network boot in Cobbler
|
||||
community.general.cobbler_system:
|
||||
host: bdsol-aci-cobbler-01
|
||||
username: cobbler
|
||||
password: ins3965!
|
||||
name: bdsol-aci51-apic1.cisco.com
|
||||
properties:
|
||||
netboot_enabled: true
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Query all systems in Cobbler
|
||||
community.general.cobbler_system:
|
||||
host: cobbler01
|
||||
username: cobbler
|
||||
password: MySuperSecureP4sswOrd
|
||||
state: query
|
||||
register: cobbler_systems
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Query a specific system in Cobbler
|
||||
community.general.cobbler_system:
|
||||
host: cobbler01
|
||||
username: cobbler
|
||||
password: MySuperSecureP4sswOrd
|
||||
name: '{{ inventory_hostname }}'
|
||||
state: query
|
||||
register: cobbler_properties
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Ensure the system does not exist in Cobbler
|
||||
community.general.cobbler_system:
|
||||
host: cobbler01
|
||||
username: cobbler
|
||||
password: MySuperSecureP4sswOrd
|
||||
name: myhost
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
systems:
|
||||
description: List of systems
|
||||
returned: I(state=query) and I(name) is not provided
|
||||
type: list
|
||||
system:
|
||||
description: (Resulting) information about the system we are working with
|
||||
returned: when I(name) is provided
|
||||
type: dict
|
||||
'''
|
||||
|
||||
import datetime
|
||||
import ssl
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.six.moves import xmlrpc_client
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
IFPROPS_MAPPING = dict(
|
||||
bondingopts='bonding_opts',
|
||||
bridgeopts='bridge_opts',
|
||||
connected_mode='connected_mode',
|
||||
cnames='cnames',
|
||||
dhcptag='dhcp_tag',
|
||||
dnsname='dns_name',
|
||||
ifgateway='if_gateway',
|
||||
interfacetype='interface_type',
|
||||
interfacemaster='interface_master',
|
||||
ipaddress='ip_address',
|
||||
ipv6address='ipv6_address',
|
||||
ipv6defaultgateway='ipv6_default_gateway',
|
||||
ipv6mtu='ipv6_mtu',
|
||||
ipv6prefix='ipv6_prefix',
|
||||
ipv6secondaries='ipv6_secondariesu',
|
||||
ipv6staticroutes='ipv6_static_routes',
|
||||
macaddress='mac_address',
|
||||
management='management',
|
||||
mtu='mtu',
|
||||
netmask='netmask',
|
||||
static='static',
|
||||
staticroutes='static_routes',
|
||||
virtbridge='virt_bridge',
|
||||
)
|
||||
|
||||
|
||||
def getsystem(conn, name, token):
|
||||
system = dict()
|
||||
if name:
|
||||
# system = conn.get_system(name, token)
|
||||
systems = conn.find_system(dict(name=name), token)
|
||||
if systems:
|
||||
system = systems[0]
|
||||
return system
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(type='str', default='127.0.0.1'),
|
||||
port=dict(type='int'),
|
||||
username=dict(type='str', default='cobbler'),
|
||||
password=dict(type='str', no_log=True),
|
||||
use_ssl=dict(type='bool', default=True),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
name=dict(type='str'),
|
||||
interfaces=dict(type='dict'),
|
||||
properties=dict(type='dict'),
|
||||
sync=dict(type='bool', default=False),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
username = module.params['username']
|
||||
password = module.params['password']
|
||||
port = module.params['port']
|
||||
use_ssl = module.params['use_ssl']
|
||||
validate_certs = module.params['validate_certs']
|
||||
|
||||
name = module.params['name']
|
||||
state = module.params['state']
|
||||
|
||||
module.params['proto'] = 'https' if use_ssl else 'http'
|
||||
if not port:
|
||||
module.params['port'] = '443' if use_ssl else '80'
|
||||
|
||||
result = dict(
|
||||
changed=False,
|
||||
)
|
||||
|
||||
start = datetime.datetime.utcnow()
|
||||
|
||||
ssl_context = None
|
||||
if not validate_certs:
|
||||
try:
|
||||
ssl_context = ssl._create_unverified_context()
|
||||
except AttributeError:
|
||||
# Legacy Python that doesn't verify HTTPS certificates by default
|
||||
pass
|
||||
else:
|
||||
# Handle target environment that doesn't support HTTPS verification
|
||||
ssl._create_default_https_context = ssl._create_unverified_context
|
||||
|
||||
url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
|
||||
if ssl_context:
|
||||
conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
|
||||
else:
|
||||
conn = xmlrpc_client.Server(url)
|
||||
|
||||
try:
|
||||
token = conn.login(username, password)
|
||||
except xmlrpc_client.Fault as e:
|
||||
module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params))
|
||||
|
||||
system = getsystem(conn, name, token)
|
||||
# result['system'] = system
|
||||
|
||||
if state == 'query':
|
||||
if name:
|
||||
result['system'] = system
|
||||
else:
|
||||
# Turn it into a dictionary of dictionaries
|
||||
# all_systems = conn.get_systems()
|
||||
# result['systems'] = { system['name']: system for system in all_systems }
|
||||
|
||||
# Return a list of dictionaries
|
||||
result['systems'] = conn.get_systems()
|
||||
|
||||
elif state == 'present':
|
||||
|
||||
if system:
|
||||
# Update existing entry
|
||||
system_id = conn.get_system_handle(name, token)
|
||||
|
||||
for key, value in iteritems(module.params['properties']):
|
||||
if key not in system:
|
||||
module.warn("Property '{0}' is not a valid system property.".format(key))
|
||||
if system[key] != value:
|
||||
try:
|
||||
conn.modify_system(system_id, key, value, token)
|
||||
result['changed'] = True
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
|
||||
|
||||
else:
|
||||
# Create a new entry
|
||||
system_id = conn.new_system(token)
|
||||
conn.modify_system(system_id, 'name', name, token)
|
||||
result['changed'] = True
|
||||
|
||||
if module.params['properties']:
|
||||
for key, value in iteritems(module.params['properties']):
|
||||
try:
|
||||
conn.modify_system(system_id, key, value, token)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
|
||||
|
||||
# Add interface properties
|
||||
interface_properties = dict()
|
||||
if module.params['interfaces']:
|
||||
for device, values in iteritems(module.params['interfaces']):
|
||||
for key, value in iteritems(values):
|
||||
if key == 'name':
|
||||
continue
|
||||
if key not in IFPROPS_MAPPING:
|
||||
module.warn("Property '{0}' is not a valid system property.".format(key))
|
||||
if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value:
|
||||
result['changed'] = True
|
||||
interface_properties['{0}-{1}'.format(key, device)] = value
|
||||
|
||||
if result['changed'] is True:
|
||||
conn.modify_system(system_id, "modify_interface", interface_properties, token)
|
||||
|
||||
# Only save when the entry was changed
|
||||
if not module.check_mode and result['changed']:
|
||||
conn.save_system(system_id, token)
|
||||
|
||||
elif state == 'absent':
|
||||
|
||||
if system:
|
||||
if not module.check_mode:
|
||||
conn.remove_system(name, token)
|
||||
result['changed'] = True
|
||||
|
||||
if not module.check_mode and module.params['sync'] and result['changed']:
|
||||
try:
|
||||
conn.sync(token)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e)))
|
||||
|
||||
if state in ('absent', 'present'):
|
||||
result['system'] = getsystem(conn, name, token)
|
||||
|
||||
if module._diff:
|
||||
result['diff'] = dict(before=system, after=result['system'])
|
||||
|
||||
elapsed = datetime.datetime.utcnow() - start
|
||||
module.exit_json(elapsed=elapsed.seconds, **result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,268 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2014, Dimitrios Tydeas Mengidis <tydeas.dr@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: composer
|
||||
author:
|
||||
- "Dimitrios Tydeas Mengidis (@dmtrs)"
|
||||
- "René Moser (@resmo)"
|
||||
short_description: Dependency Manager for PHP
|
||||
description:
|
||||
- >
|
||||
Composer is a tool for dependency management in PHP. It allows you to
|
||||
declare the dependent libraries your project needs and it will install
|
||||
them in your project for you.
|
||||
options:
|
||||
command:
|
||||
type: str
|
||||
description:
|
||||
- Composer command like "install", "update" and so on.
|
||||
default: install
|
||||
arguments:
|
||||
type: str
|
||||
description:
|
||||
- Composer arguments like required package, version and so on.
|
||||
default: ''
|
||||
executable:
|
||||
type: path
|
||||
description:
|
||||
- Path to PHP Executable on the remote host, if PHP is not in PATH.
|
||||
aliases: [ php_path ]
|
||||
working_dir:
|
||||
type: path
|
||||
description:
|
||||
- Directory of your project (see --working-dir). This is required when
|
||||
the command is not run globally.
|
||||
- Will be ignored if I(global_command=true).
|
||||
global_command:
|
||||
description:
|
||||
- Runs the specified command globally.
|
||||
type: bool
|
||||
default: false
|
||||
prefer_source:
|
||||
description:
|
||||
- Forces installation from package sources when possible (see --prefer-source).
|
||||
default: false
|
||||
type: bool
|
||||
prefer_dist:
|
||||
description:
|
||||
- Forces installation from package dist even for dev versions (see --prefer-dist).
|
||||
default: false
|
||||
type: bool
|
||||
no_dev:
|
||||
description:
|
||||
- Disables installation of require-dev packages (see --no-dev).
|
||||
default: true
|
||||
type: bool
|
||||
no_scripts:
|
||||
description:
|
||||
- Skips the execution of all scripts defined in composer.json (see --no-scripts).
|
||||
default: false
|
||||
type: bool
|
||||
no_plugins:
|
||||
description:
|
||||
- Disables all plugins (see --no-plugins).
|
||||
default: false
|
||||
type: bool
|
||||
optimize_autoloader:
|
||||
description:
|
||||
- Optimize autoloader during autoloader dump (see --optimize-autoloader).
|
||||
- Convert PSR-0/4 autoloading to classmap to get a faster autoloader.
|
||||
- Recommended especially for production, but can take a bit of time to run.
|
||||
default: true
|
||||
type: bool
|
||||
classmap_authoritative:
|
||||
description:
|
||||
- Autoload classes from classmap only.
|
||||
- Implicitly enable optimize_autoloader.
|
||||
- Recommended especially for production, but can take a bit of time to run.
|
||||
default: false
|
||||
type: bool
|
||||
apcu_autoloader:
|
||||
description:
|
||||
- Uses APCu to cache found/not-found classes
|
||||
default: false
|
||||
type: bool
|
||||
ignore_platform_reqs:
|
||||
description:
|
||||
- Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
|
||||
default: false
|
||||
type: bool
|
||||
composer_executable:
|
||||
type: path
|
||||
description:
|
||||
- Path to composer executable on the remote host, if composer is not in C(PATH) or a custom composer is needed.
|
||||
version_added: 3.2.0
|
||||
requirements:
|
||||
- php
|
||||
- composer installed in bin path (recommended /usr/local/bin) or specified in I(composer_executable)
|
||||
notes:
|
||||
- Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
|
||||
- We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Download and installs all libs and dependencies outlined in the /path/to/project/composer.lock
|
||||
community.general.composer:
|
||||
command: install
|
||||
working_dir: /path/to/project
|
||||
|
||||
- name: Install a new package
|
||||
community.general.composer:
|
||||
command: require
|
||||
arguments: my/package
|
||||
working_dir: /path/to/project
|
||||
|
||||
- name: Clone and install a project with all dependencies
|
||||
community.general.composer:
|
||||
command: create-project
|
||||
arguments: package/package /path/to/project ~1.0
|
||||
working_dir: /path/to/project
|
||||
prefer_dist: true
|
||||
|
||||
- name: Install a package globally
|
||||
community.general.composer:
|
||||
command: require
|
||||
global_command: true
|
||||
arguments: my/package
|
||||
'''
|
||||
|
||||
import re
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def parse_out(string):
|
||||
return re.sub(r"\s+", " ", string).strip()
|
||||
|
||||
|
||||
def has_changed(string):
|
||||
for no_change in ["Nothing to install or update", "Nothing to install, update or remove"]:
|
||||
if no_change in string:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def get_available_options(module, command='install'):
|
||||
# get all available options from a composer command using composer help to json
|
||||
rc, out, err = composer_command(module, "help %s" % command, arguments="--no-interaction --format=json")
|
||||
if rc != 0:
|
||||
output = parse_out(err)
|
||||
module.fail_json(msg=output)
|
||||
|
||||
command_help_json = module.from_json(out)
|
||||
return command_help_json['definition']['options']
|
||||
|
||||
|
||||
def composer_command(module, command, arguments="", options=None, global_command=False):
|
||||
if options is None:
|
||||
options = []
|
||||
|
||||
if module.params['executable'] is None:
|
||||
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
|
||||
else:
|
||||
php_path = module.params['executable']
|
||||
|
||||
if module.params['composer_executable'] is None:
|
||||
composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
|
||||
else:
|
||||
composer_path = module.params['composer_executable']
|
||||
|
||||
cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments)
|
||||
return module.run_command(cmd)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
command=dict(default="install", type="str"),
|
||||
arguments=dict(default="", type="str"),
|
||||
executable=dict(type="path", aliases=["php_path"]),
|
||||
working_dir=dict(type="path"),
|
||||
global_command=dict(default=False, type="bool"),
|
||||
prefer_source=dict(default=False, type="bool"),
|
||||
prefer_dist=dict(default=False, type="bool"),
|
||||
no_dev=dict(default=True, type="bool"),
|
||||
no_scripts=dict(default=False, type="bool"),
|
||||
no_plugins=dict(default=False, type="bool"),
|
||||
apcu_autoloader=dict(default=False, type="bool"),
|
||||
optimize_autoloader=dict(default=True, type="bool"),
|
||||
classmap_authoritative=dict(default=False, type="bool"),
|
||||
ignore_platform_reqs=dict(default=False, type="bool"),
|
||||
composer_executable=dict(type="path"),
|
||||
),
|
||||
required_if=[('global_command', False, ['working_dir'])],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
# Get composer command with fallback to default
|
||||
command = module.params['command']
|
||||
if re.search(r"\s", command):
|
||||
module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
|
||||
|
||||
arguments = module.params['arguments']
|
||||
global_command = module.params['global_command']
|
||||
available_options = get_available_options(module=module, command=command)
|
||||
|
||||
options = []
|
||||
|
||||
# Default options
|
||||
default_options = [
|
||||
'no-ansi',
|
||||
'no-interaction',
|
||||
'no-progress',
|
||||
]
|
||||
|
||||
for option in default_options:
|
||||
if option in available_options:
|
||||
option = "--%s" % option
|
||||
options.append(option)
|
||||
|
||||
if not global_command:
|
||||
options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
|
||||
|
||||
option_params = {
|
||||
'prefer_source': 'prefer-source',
|
||||
'prefer_dist': 'prefer-dist',
|
||||
'no_dev': 'no-dev',
|
||||
'no_scripts': 'no-scripts',
|
||||
'no_plugins': 'no-plugins',
|
||||
'apcu_autoloader': 'acpu-autoloader',
|
||||
'optimize_autoloader': 'optimize-autoloader',
|
||||
'classmap_authoritative': 'classmap-authoritative',
|
||||
'ignore_platform_reqs': 'ignore-platform-reqs',
|
||||
}
|
||||
|
||||
for param, option in option_params.items():
|
||||
if module.params.get(param) and option in available_options:
|
||||
option = "--%s" % option
|
||||
options.append(option)
|
||||
|
||||
if module.check_mode:
|
||||
if 'dry-run' in available_options:
|
||||
options.append('--dry-run')
|
||||
else:
|
||||
module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command)
|
||||
|
||||
rc, out, err = composer_command(module, command, arguments, options, global_command)
|
||||
|
||||
if rc != 0:
|
||||
output = parse_out(err)
|
||||
module.fail_json(msg=output, stdout=err)
|
||||
else:
|
||||
# Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
|
||||
output = parse_out(out + err)
|
||||
module.exit_json(changed=has_changed(output), msg=output, stdout=out + err)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,607 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2015, Steve Gargan <steve.gargan@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: consul
|
||||
short_description: Add, modify & delete services within a consul cluster
|
||||
description:
|
||||
- Registers services and checks for an agent with a consul cluster.
|
||||
A service is some process running on the agent node that should be advertised by
|
||||
consul's discovery mechanism. It may optionally supply a check definition,
|
||||
a periodic service test to notify the consul cluster of service's health.
|
||||
- "Checks may also be registered per node e.g. disk usage, or cpu usage and
|
||||
notify the health of the entire node to the cluster.
|
||||
Service level checks do not require a check name or id as these are derived
|
||||
by Consul from the Service name and id respectively by appending 'service:'
|
||||
Node level checks require a I(check_name) and optionally a I(check_id)."
|
||||
- Currently, there is no complete way to retrieve the script, interval or ttl
|
||||
metadata for a registered check. Without this metadata it is not possible to
|
||||
tell if the data supplied with ansible represents a change to a check. As a
|
||||
result this does not attempt to determine changes and will always report a
|
||||
changed occurred. An API method is planned to supply this metadata so at that
|
||||
stage change management will be added.
|
||||
- "See U(http://consul.io) for more details."
|
||||
requirements:
|
||||
- python-consul
|
||||
- requests
|
||||
author: "Steve Gargan (@sgargan)"
|
||||
options:
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- register or deregister the consul service, defaults to present
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
service_name:
|
||||
type: str
|
||||
description:
|
||||
- Unique name for the service on a node, must be unique per node,
|
||||
required if registering a service. May be omitted if registering
|
||||
a node level check
|
||||
service_id:
|
||||
type: str
|
||||
description:
|
||||
- the ID for the service, must be unique per node. If I(state=absent),
|
||||
defaults to the service name if supplied.
|
||||
host:
|
||||
type: str
|
||||
description:
|
||||
- host of the consul agent defaults to localhost
|
||||
default: localhost
|
||||
port:
|
||||
type: int
|
||||
description:
|
||||
- the port on which the consul agent is running
|
||||
default: 8500
|
||||
scheme:
|
||||
type: str
|
||||
description:
|
||||
- the protocol scheme on which the consul agent is running
|
||||
default: http
|
||||
validate_certs:
|
||||
description:
|
||||
- whether to verify the TLS certificate of the consul agent
|
||||
type: bool
|
||||
default: true
|
||||
notes:
|
||||
type: str
|
||||
description:
|
||||
- Notes to attach to check when registering it.
|
||||
service_port:
|
||||
type: int
|
||||
description:
|
||||
- the port on which the service is listening. Can optionally be supplied for
|
||||
registration of a service, i.e. if I(service_name) or I(service_id) is set
|
||||
service_address:
|
||||
type: str
|
||||
description:
|
||||
- the address to advertise that the service will be listening on.
|
||||
This value will be passed as the I(address) parameter to Consul's
|
||||
C(/v1/agent/service/register) API method, so refer to the Consul API
|
||||
documentation for further details.
|
||||
tags:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- tags that will be attached to the service registration.
|
||||
script:
|
||||
type: str
|
||||
description:
|
||||
- the script/command that will be run periodically to check the health
|
||||
of the service. Scripts require I(interval) and vice versa.
|
||||
interval:
|
||||
type: str
|
||||
description:
|
||||
- the interval at which the service check will be run. This is a number
|
||||
with a s or m suffix to signify the units of seconds or minutes e.g
|
||||
C(15s) or C(1m). If no suffix is supplied, m will be used by default e.g.
|
||||
C(1) will be C(1m). Required if the I(script) parameter is specified.
|
||||
check_id:
|
||||
type: str
|
||||
description:
|
||||
- an ID for the service check. If I(state=absent), defaults to
|
||||
I(check_name). Ignored if part of a service definition.
|
||||
check_name:
|
||||
type: str
|
||||
description:
|
||||
- a name for the service check. Required if standalone, ignored if
|
||||
part of service definition.
|
||||
ttl:
|
||||
type: str
|
||||
description:
|
||||
- checks can be registered with a ttl instead of a I(script) and I(interval)
|
||||
this means that the service will check in with the agent before the
|
||||
ttl expires. If it doesn't the check will be considered failed.
|
||||
Required if registering a check and the script an interval are missing
|
||||
Similar to the interval this is a number with a s or m suffix to
|
||||
signify the units of seconds or minutes e.g C(15s) or C(1m). If no suffix
|
||||
is supplied, C(m) will be used by default e.g. C(1) will be C(1m)
|
||||
tcp:
|
||||
type: str
|
||||
description:
|
||||
- Checks can be registered with a TCP port. This means that consul
|
||||
will check if the connection attempt to that port is successful (that is, the port is currently accepting connections).
|
||||
The format is C(host:port), for example C(localhost:80).
|
||||
I(interval) must also be provided with this option.
|
||||
version_added: '1.3.0'
|
||||
http:
|
||||
type: str
|
||||
description:
|
||||
- checks can be registered with an HTTP endpoint. This means that consul
|
||||
will check that the http endpoint returns a successful HTTP status.
|
||||
I(interval) must also be provided with this option.
|
||||
timeout:
|
||||
type: str
|
||||
description:
|
||||
- A custom HTTP check timeout. The consul default is 10 seconds.
|
||||
Similar to the interval this is a number with a C(s) or C(m) suffix to
|
||||
signify the units of seconds or minutes, e.g. C(15s) or C(1m).
|
||||
token:
|
||||
type: str
|
||||
description:
|
||||
- the token key identifying an ACL rule set. May be required to register services.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Register nginx service with the local consul agent
|
||||
community.general.consul:
|
||||
service_name: nginx
|
||||
service_port: 80
|
||||
|
||||
- name: Register nginx service with curl check
|
||||
community.general.consul:
|
||||
service_name: nginx
|
||||
service_port: 80
|
||||
script: curl http://localhost
|
||||
interval: 60s
|
||||
|
||||
- name: register nginx with a tcp check
|
||||
community.general.consul:
|
||||
service_name: nginx
|
||||
service_port: 80
|
||||
interval: 60s
|
||||
tcp: localhost:80
|
||||
|
||||
- name: Register nginx with an http check
|
||||
community.general.consul:
|
||||
service_name: nginx
|
||||
service_port: 80
|
||||
interval: 60s
|
||||
http: http://localhost:80/status
|
||||
|
||||
- name: Register external service nginx available at 10.1.5.23
|
||||
community.general.consul:
|
||||
service_name: nginx
|
||||
service_port: 80
|
||||
service_address: 10.1.5.23
|
||||
|
||||
- name: Register nginx with some service tags
|
||||
community.general.consul:
|
||||
service_name: nginx
|
||||
service_port: 80
|
||||
tags:
|
||||
- prod
|
||||
- webservers
|
||||
|
||||
- name: Remove nginx service
|
||||
community.general.consul:
|
||||
service_name: nginx
|
||||
state: absent
|
||||
|
||||
- name: Register celery worker service
|
||||
community.general.consul:
|
||||
service_name: celery-worker
|
||||
tags:
|
||||
- prod
|
||||
- worker
|
||||
|
||||
- name: Create a node level check to test disk usage
|
||||
community.general.consul:
|
||||
check_name: Disk usage
|
||||
check_id: disk_usage
|
||||
script: /opt/disk_usage.py
|
||||
interval: 5m
|
||||
|
||||
- name: Register an http check against a service that's already registered
|
||||
community.general.consul:
|
||||
check_name: nginx-check2
|
||||
check_id: nginx-check2
|
||||
service_id: nginx
|
||||
interval: 60s
|
||||
http: http://localhost:80/morestatus
|
||||
'''
|
||||
|
||||
try:
|
||||
import consul
|
||||
from requests.exceptions import ConnectionError
|
||||
|
||||
class PatchedConsulAgentService(consul.Consul.Agent.Service):
|
||||
def deregister(self, service_id, token=None):
|
||||
params = {}
|
||||
if token:
|
||||
params['token'] = token
|
||||
return self.agent.http.put(consul.base.CB.bool(),
|
||||
'/v1/agent/service/deregister/%s' % service_id,
|
||||
params=params)
|
||||
|
||||
python_consul_installed = True
|
||||
except ImportError:
|
||||
python_consul_installed = False
|
||||
|
||||
import re
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def register_with_consul(module):
|
||||
state = module.params['state']
|
||||
|
||||
if state == 'present':
|
||||
add(module)
|
||||
else:
|
||||
remove(module)
|
||||
|
||||
|
||||
def add(module):
|
||||
''' adds a service or a check depending on supplied configuration'''
|
||||
check = parse_check(module)
|
||||
service = parse_service(module)
|
||||
|
||||
if not service and not check:
|
||||
module.fail_json(msg='a name and port are required to register a service')
|
||||
|
||||
if service:
|
||||
if check:
|
||||
service.add_check(check)
|
||||
add_service(module, service)
|
||||
elif check:
|
||||
add_check(module, check)
|
||||
|
||||
|
||||
def remove(module):
|
||||
''' removes a service or a check '''
|
||||
service_id = module.params['service_id'] or module.params['service_name']
|
||||
check_id = module.params['check_id'] or module.params['check_name']
|
||||
if service_id:
|
||||
remove_service(module, service_id)
|
||||
else:
|
||||
remove_check(module, check_id)
|
||||
|
||||
|
||||
def add_check(module, check):
|
||||
''' registers a check with the given agent. currently there is no way
|
||||
retrieve the full metadata of an existing check through the consul api.
|
||||
Without this we can't compare to the supplied check and so we must assume
|
||||
a change. '''
|
||||
if not check.name and not check.service_id:
|
||||
module.fail_json(msg='a check name is required for a node level check, one not attached to a service')
|
||||
|
||||
consul_api = get_consul_api(module)
|
||||
check.register(consul_api)
|
||||
|
||||
module.exit_json(changed=True,
|
||||
check_id=check.check_id,
|
||||
check_name=check.name,
|
||||
script=check.script,
|
||||
interval=check.interval,
|
||||
ttl=check.ttl,
|
||||
tcp=check.tcp,
|
||||
http=check.http,
|
||||
timeout=check.timeout,
|
||||
service_id=check.service_id)
|
||||
|
||||
|
||||
def remove_check(module, check_id):
|
||||
''' removes a check using its id '''
|
||||
consul_api = get_consul_api(module)
|
||||
|
||||
if check_id in consul_api.agent.checks():
|
||||
consul_api.agent.check.deregister(check_id)
|
||||
module.exit_json(changed=True, id=check_id)
|
||||
|
||||
module.exit_json(changed=False, id=check_id)
|
||||
|
||||
|
||||
def add_service(module, service):
|
||||
''' registers a service with the current agent '''
|
||||
result = service
|
||||
changed = False
|
||||
|
||||
consul_api = get_consul_api(module)
|
||||
existing = get_service_by_id_or_name(consul_api, service.id)
|
||||
|
||||
# there is no way to retrieve the details of checks so if a check is present
|
||||
# in the service it must be re-registered
|
||||
if service.has_checks() or not existing or not existing == service:
|
||||
|
||||
service.register(consul_api)
|
||||
# check that it registered correctly
|
||||
registered = get_service_by_id_or_name(consul_api, service.id)
|
||||
if registered:
|
||||
result = registered
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed,
|
||||
service_id=result.id,
|
||||
service_name=result.name,
|
||||
service_port=result.port,
|
||||
checks=[check.to_dict() for check in service.checks()],
|
||||
tags=result.tags)
|
||||
|
||||
|
||||
def remove_service(module, service_id):
|
||||
''' deregister a service from the given agent using its service id '''
|
||||
consul_api = get_consul_api(module)
|
||||
service = get_service_by_id_or_name(consul_api, service_id)
|
||||
if service:
|
||||
consul_api.agent.service.deregister(service_id, token=module.params['token'])
|
||||
module.exit_json(changed=True, id=service_id)
|
||||
|
||||
module.exit_json(changed=False, id=service_id)
|
||||
|
||||
|
||||
def get_consul_api(module):
|
||||
consulClient = consul.Consul(host=module.params['host'],
|
||||
port=module.params['port'],
|
||||
scheme=module.params['scheme'],
|
||||
verify=module.params['validate_certs'],
|
||||
token=module.params['token'])
|
||||
consulClient.agent.service = PatchedConsulAgentService(consulClient)
|
||||
return consulClient
|
||||
|
||||
|
||||
def get_service_by_id_or_name(consul_api, service_id_or_name):
|
||||
''' iterate the registered services and find one with the given id '''
|
||||
for dummy, service in consul_api.agent.services().items():
|
||||
if service_id_or_name in (service['ID'], service['Service']):
|
||||
return ConsulService(loaded=service)
|
||||
|
||||
|
||||
def parse_check(module):
|
||||
_checks = [module.params[p] for p in ('script', 'ttl', 'tcp', 'http') if module.params[p]]
|
||||
|
||||
if len(_checks) > 1:
|
||||
module.fail_json(
|
||||
msg='checks are either script, tcp, http or ttl driven, supplying more than one does not make sense')
|
||||
|
||||
if module.params['check_id'] or _checks:
|
||||
return ConsulCheck(
|
||||
module.params['check_id'],
|
||||
module.params['check_name'],
|
||||
module.params['check_node'],
|
||||
module.params['check_host'],
|
||||
module.params['script'],
|
||||
module.params['interval'],
|
||||
module.params['ttl'],
|
||||
module.params['notes'],
|
||||
module.params['tcp'],
|
||||
module.params['http'],
|
||||
module.params['timeout'],
|
||||
module.params['service_id'],
|
||||
)
|
||||
|
||||
|
||||
def parse_service(module):
|
||||
return ConsulService(
|
||||
module.params['service_id'],
|
||||
module.params['service_name'],
|
||||
module.params['service_address'],
|
||||
module.params['service_port'],
|
||||
module.params['tags'],
|
||||
)
|
||||
|
||||
|
||||
class ConsulService(object):
|
||||
|
||||
def __init__(self, service_id=None, name=None, address=None, port=-1,
|
||||
tags=None, loaded=None):
|
||||
self.id = self.name = name
|
||||
if service_id:
|
||||
self.id = service_id
|
||||
self.address = address
|
||||
self.port = port
|
||||
self.tags = tags
|
||||
self._checks = []
|
||||
if loaded:
|
||||
self.id = loaded['ID']
|
||||
self.name = loaded['Service']
|
||||
self.port = loaded['Port']
|
||||
self.tags = loaded['Tags']
|
||||
|
||||
def register(self, consul_api):
|
||||
optional = {}
|
||||
|
||||
if self.port:
|
||||
optional['port'] = self.port
|
||||
|
||||
if len(self._checks) > 0:
|
||||
optional['check'] = self._checks[0].check
|
||||
|
||||
consul_api.agent.service.register(
|
||||
self.name,
|
||||
service_id=self.id,
|
||||
address=self.address,
|
||||
tags=self.tags,
|
||||
**optional)
|
||||
|
||||
def add_check(self, check):
|
||||
self._checks.append(check)
|
||||
|
||||
def checks(self):
|
||||
return self._checks
|
||||
|
||||
def has_checks(self):
|
||||
return len(self._checks) > 0
|
||||
|
||||
def __eq__(self, other):
|
||||
return (isinstance(other, self.__class__) and
|
||||
self.id == other.id and
|
||||
self.name == other.name and
|
||||
self.port == other.port and
|
||||
self.tags == other.tags)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def to_dict(self):
|
||||
data = {'id': self.id, "name": self.name}
|
||||
if self.port:
|
||||
data['port'] = self.port
|
||||
if self.tags and len(self.tags) > 0:
|
||||
data['tags'] = self.tags
|
||||
if len(self._checks) > 0:
|
||||
data['check'] = self._checks[0].to_dict()
|
||||
return data
|
||||
|
||||
|
||||
class ConsulCheck(object):
|
||||
|
||||
def __init__(self, check_id, name, node=None, host='localhost',
|
||||
script=None, interval=None, ttl=None, notes=None, tcp=None, http=None, timeout=None, service_id=None):
|
||||
self.check_id = self.name = name
|
||||
if check_id:
|
||||
self.check_id = check_id
|
||||
self.service_id = service_id
|
||||
self.notes = notes
|
||||
self.node = node
|
||||
self.host = host
|
||||
|
||||
self.interval = self.validate_duration('interval', interval)
|
||||
self.ttl = self.validate_duration('ttl', ttl)
|
||||
self.script = script
|
||||
self.tcp = tcp
|
||||
self.http = http
|
||||
self.timeout = self.validate_duration('timeout', timeout)
|
||||
|
||||
self.check = None
|
||||
|
||||
if script:
|
||||
self.check = consul.Check.script(script, self.interval)
|
||||
|
||||
if ttl:
|
||||
self.check = consul.Check.ttl(self.ttl)
|
||||
|
||||
if http:
|
||||
if interval is None:
|
||||
raise Exception('http check must specify interval')
|
||||
|
||||
self.check = consul.Check.http(http, self.interval, self.timeout)
|
||||
|
||||
if tcp:
|
||||
if interval is None:
|
||||
raise Exception('tcp check must specify interval')
|
||||
|
||||
regex = r"(?P<host>.*):(?P<port>(?:[0-9]+))$"
|
||||
match = re.match(regex, tcp)
|
||||
|
||||
if not match:
|
||||
raise Exception('tcp check must be in host:port format')
|
||||
|
||||
self.check = consul.Check.tcp(match.group('host').strip('[]'), int(match.group('port')), self.interval)
|
||||
|
||||
def validate_duration(self, name, duration):
|
||||
if duration:
|
||||
duration_units = ['ns', 'us', 'ms', 's', 'm', 'h']
|
||||
if not any(duration.endswith(suffix) for suffix in duration_units):
|
||||
duration = "{0}s".format(duration)
|
||||
return duration
|
||||
|
||||
def register(self, consul_api):
|
||||
consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id,
|
||||
notes=self.notes,
|
||||
check=self.check)
|
||||
|
||||
def __eq__(self, other):
|
||||
return (isinstance(other, self.__class__) and
|
||||
self.check_id == other.check_id and
|
||||
self.service_id == other.service_id and
|
||||
self.name == other.name and
|
||||
self.script == other.script and
|
||||
self.interval == other.interval)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def to_dict(self):
|
||||
data = {}
|
||||
self._add(data, 'id', attr='check_id')
|
||||
self._add(data, 'name', attr='check_name')
|
||||
self._add(data, 'script')
|
||||
self._add(data, 'node')
|
||||
self._add(data, 'notes')
|
||||
self._add(data, 'host')
|
||||
self._add(data, 'interval')
|
||||
self._add(data, 'ttl')
|
||||
self._add(data, 'tcp')
|
||||
self._add(data, 'http')
|
||||
self._add(data, 'timeout')
|
||||
self._add(data, 'service_id')
|
||||
return data
|
||||
|
||||
def _add(self, data, key, attr=None):
|
||||
try:
|
||||
if attr is None:
|
||||
attr = key
|
||||
data[key] = getattr(self, attr)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def test_dependencies(module):
|
||||
if not python_consul_installed:
|
||||
module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation")
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(default='localhost'),
|
||||
port=dict(default=8500, type='int'),
|
||||
scheme=dict(default='http'),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
check_id=dict(),
|
||||
check_name=dict(),
|
||||
check_node=dict(),
|
||||
check_host=dict(),
|
||||
notes=dict(),
|
||||
script=dict(),
|
||||
service_id=dict(),
|
||||
service_name=dict(),
|
||||
service_address=dict(type='str'),
|
||||
service_port=dict(type='int'),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
interval=dict(type='str'),
|
||||
ttl=dict(type='str'),
|
||||
tcp=dict(type='str'),
|
||||
http=dict(type='str'),
|
||||
timeout=dict(type='str'),
|
||||
tags=dict(type='list', elements='str'),
|
||||
token=dict(no_log=True)
|
||||
),
|
||||
required_if=[
|
||||
('state', 'present', ['service_name']),
|
||||
('state', 'absent', ['service_id', 'service_name', 'check_id', 'check_name'], True),
|
||||
],
|
||||
supports_check_mode=False,
|
||||
)
|
||||
|
||||
test_dependencies(module)
|
||||
|
||||
try:
|
||||
register_with_consul(module)
|
||||
except ConnectionError as e:
|
||||
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
|
||||
module.params['host'], module.params['port'], str(e)))
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,684 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2015, Steve Gargan <steve.gargan@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: consul_acl
|
||||
short_description: Manipulate Consul ACL keys and rules
|
||||
description:
|
||||
- Allows the addition, modification and deletion of ACL keys and associated
|
||||
rules in a consul cluster via the agent. For more details on using and
|
||||
configuring ACLs, see https://www.consul.io/docs/guides/acl.html.
|
||||
author:
|
||||
- Steve Gargan (@sgargan)
|
||||
- Colin Nolan (@colin-nolan)
|
||||
options:
|
||||
mgmt_token:
|
||||
description:
|
||||
- a management token is required to manipulate the acl lists
|
||||
required: true
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- whether the ACL pair should be present or absent
|
||||
required: false
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
type: str
|
||||
token_type:
|
||||
description:
|
||||
- the type of token that should be created
|
||||
choices: ['client', 'management']
|
||||
default: client
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- the name that should be associated with the acl key, this is opaque
|
||||
to Consul
|
||||
required: false
|
||||
type: str
|
||||
token:
|
||||
description:
|
||||
- the token key identifying an ACL rule set. If generated by consul
|
||||
this will be a UUID
|
||||
required: false
|
||||
type: str
|
||||
rules:
|
||||
type: list
|
||||
elements: dict
|
||||
description:
|
||||
- rules that should be associated with a given token
|
||||
required: false
|
||||
host:
|
||||
description:
|
||||
- host of the consul agent defaults to localhost
|
||||
required: false
|
||||
default: localhost
|
||||
type: str
|
||||
port:
|
||||
type: int
|
||||
description:
|
||||
- the port on which the consul agent is running
|
||||
required: false
|
||||
default: 8500
|
||||
scheme:
|
||||
description:
|
||||
- the protocol scheme on which the consul agent is running
|
||||
required: false
|
||||
default: http
|
||||
type: str
|
||||
validate_certs:
|
||||
type: bool
|
||||
description:
|
||||
- whether to verify the tls certificate of the consul agent
|
||||
required: false
|
||||
default: true
|
||||
requirements:
|
||||
- python-consul
|
||||
- pyhcl
|
||||
- requests
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Create an ACL with rules
|
||||
community.general.consul_acl:
|
||||
host: consul1.example.com
|
||||
mgmt_token: some_management_acl
|
||||
name: Foo access
|
||||
rules:
|
||||
- key: "foo"
|
||||
policy: read
|
||||
- key: "private/foo"
|
||||
policy: deny
|
||||
|
||||
- name: Create an ACL with a specific token
|
||||
community.general.consul_acl:
|
||||
host: consul1.example.com
|
||||
mgmt_token: some_management_acl
|
||||
name: Foo access
|
||||
token: my-token
|
||||
rules:
|
||||
- key: "foo"
|
||||
policy: read
|
||||
|
||||
- name: Update the rules associated to an ACL token
|
||||
community.general.consul_acl:
|
||||
host: consul1.example.com
|
||||
mgmt_token: some_management_acl
|
||||
name: Foo access
|
||||
token: some_client_token
|
||||
rules:
|
||||
- event: "bbq"
|
||||
policy: write
|
||||
- key: "foo"
|
||||
policy: read
|
||||
- key: "private"
|
||||
policy: deny
|
||||
- keyring: write
|
||||
- node: "hgs4"
|
||||
policy: write
|
||||
- operator: read
|
||||
- query: ""
|
||||
policy: write
|
||||
- service: "consul"
|
||||
policy: write
|
||||
- session: "standup"
|
||||
policy: write
|
||||
|
||||
- name: Remove a token
|
||||
community.general.consul_acl:
|
||||
host: consul1.example.com
|
||||
mgmt_token: some_management_acl
|
||||
token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e
|
||||
state: absent
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
token:
|
||||
description: the token associated to the ACL (the ACL's ID)
|
||||
returned: success
|
||||
type: str
|
||||
sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da
|
||||
rules:
|
||||
description: the HCL JSON representation of the rules associated to the ACL, in the format described in the
|
||||
Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification).
|
||||
returned: I(status) == "present"
|
||||
type: dict
|
||||
sample: {
|
||||
"key": {
|
||||
"foo": {
|
||||
"policy": "write"
|
||||
},
|
||||
"bar": {
|
||||
"policy": "deny"
|
||||
}
|
||||
}
|
||||
}
|
||||
operation:
|
||||
description: the operation performed on the ACL
|
||||
returned: changed
|
||||
type: str
|
||||
sample: update
|
||||
"""
|
||||
|
||||
|
||||
try:
|
||||
import consul
|
||||
python_consul_installed = True
|
||||
except ImportError:
|
||||
python_consul_installed = False
|
||||
|
||||
try:
|
||||
import hcl
|
||||
pyhcl_installed = True
|
||||
except ImportError:
|
||||
pyhcl_installed = False
|
||||
|
||||
try:
|
||||
from requests.exceptions import ConnectionError
|
||||
has_requests = True
|
||||
except ImportError:
|
||||
has_requests = False
|
||||
|
||||
from collections import defaultdict
|
||||
from ansible.module_utils.basic import to_text, AnsibleModule
|
||||
|
||||
|
||||
RULE_SCOPES = [
|
||||
"agent",
|
||||
"agent_prefix",
|
||||
"event",
|
||||
"event_prefix",
|
||||
"key",
|
||||
"key_prefix",
|
||||
"keyring",
|
||||
"node",
|
||||
"node_prefix",
|
||||
"operator",
|
||||
"query",
|
||||
"query_prefix",
|
||||
"service",
|
||||
"service_prefix",
|
||||
"session",
|
||||
"session_prefix",
|
||||
]
|
||||
|
||||
MANAGEMENT_PARAMETER_NAME = "mgmt_token"
|
||||
HOST_PARAMETER_NAME = "host"
|
||||
SCHEME_PARAMETER_NAME = "scheme"
|
||||
VALIDATE_CERTS_PARAMETER_NAME = "validate_certs"
|
||||
NAME_PARAMETER_NAME = "name"
|
||||
PORT_PARAMETER_NAME = "port"
|
||||
RULES_PARAMETER_NAME = "rules"
|
||||
STATE_PARAMETER_NAME = "state"
|
||||
TOKEN_PARAMETER_NAME = "token"
|
||||
TOKEN_TYPE_PARAMETER_NAME = "token_type"
|
||||
|
||||
PRESENT_STATE_VALUE = "present"
|
||||
ABSENT_STATE_VALUE = "absent"
|
||||
|
||||
CLIENT_TOKEN_TYPE_VALUE = "client"
|
||||
MANAGEMENT_TOKEN_TYPE_VALUE = "management"
|
||||
|
||||
REMOVE_OPERATION = "remove"
|
||||
UPDATE_OPERATION = "update"
|
||||
CREATE_OPERATION = "create"
|
||||
|
||||
_POLICY_JSON_PROPERTY = "policy"
|
||||
_RULES_JSON_PROPERTY = "Rules"
|
||||
_TOKEN_JSON_PROPERTY = "ID"
|
||||
_TOKEN_TYPE_JSON_PROPERTY = "Type"
|
||||
_NAME_JSON_PROPERTY = "Name"
|
||||
_POLICY_YML_PROPERTY = "policy"
|
||||
_POLICY_HCL_PROPERTY = "policy"
|
||||
|
||||
_ARGUMENT_SPEC = {
|
||||
MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True),
|
||||
HOST_PARAMETER_NAME: dict(default='localhost'),
|
||||
SCHEME_PARAMETER_NAME: dict(default='http'),
|
||||
VALIDATE_CERTS_PARAMETER_NAME: dict(type='bool', default=True),
|
||||
NAME_PARAMETER_NAME: dict(),
|
||||
PORT_PARAMETER_NAME: dict(default=8500, type='int'),
|
||||
RULES_PARAMETER_NAME: dict(type='list', elements='dict'),
|
||||
STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]),
|
||||
TOKEN_PARAMETER_NAME: dict(no_log=False),
|
||||
TOKEN_TYPE_PARAMETER_NAME: dict(choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE],
|
||||
default=CLIENT_TOKEN_TYPE_VALUE)
|
||||
}
|
||||
|
||||
|
||||
def set_acl(consul_client, configuration):
|
||||
"""
|
||||
Sets an ACL based on the given configuration.
|
||||
:param consul_client: the consul client
|
||||
:param configuration: the run configuration
|
||||
:return: the output of setting the ACL
|
||||
"""
|
||||
acls_as_json = decode_acls_as_json(consul_client.acl.list())
|
||||
existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None)
|
||||
existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json)
|
||||
if None in existing_acls_mapped_by_token:
|
||||
raise AssertionError("expecting ACL list to be associated to a token: %s" %
|
||||
existing_acls_mapped_by_token[None])
|
||||
|
||||
if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name:
|
||||
# No token but name given so can get token from name
|
||||
configuration.token = existing_acls_mapped_by_name[configuration.name].token
|
||||
|
||||
if configuration.token and configuration.token in existing_acls_mapped_by_token:
|
||||
return update_acl(consul_client, configuration)
|
||||
else:
|
||||
if configuration.token in existing_acls_mapped_by_token:
|
||||
raise AssertionError()
|
||||
if configuration.name in existing_acls_mapped_by_name:
|
||||
raise AssertionError()
|
||||
return create_acl(consul_client, configuration)
|
||||
|
||||
|
||||
def update_acl(consul_client, configuration):
|
||||
"""
|
||||
Updates an ACL.
|
||||
:param consul_client: the consul client
|
||||
:param configuration: the run configuration
|
||||
:return: the output of the update
|
||||
"""
|
||||
existing_acl = load_acl_with_token(consul_client, configuration.token)
|
||||
changed = existing_acl.rules != configuration.rules
|
||||
|
||||
if changed:
|
||||
name = configuration.name if configuration.name is not None else existing_acl.name
|
||||
rules_as_hcl = encode_rules_as_hcl_string(configuration.rules)
|
||||
updated_token = consul_client.acl.update(
|
||||
configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl)
|
||||
if updated_token != configuration.token:
|
||||
raise AssertionError()
|
||||
|
||||
return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION)
|
||||
|
||||
|
||||
def create_acl(consul_client, configuration):
|
||||
"""
|
||||
Creates an ACL.
|
||||
:param consul_client: the consul client
|
||||
:param configuration: the run configuration
|
||||
:return: the output of the creation
|
||||
"""
|
||||
rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None
|
||||
token = consul_client.acl.create(
|
||||
name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token)
|
||||
rules = configuration.rules
|
||||
return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION)
|
||||
|
||||
|
||||
def remove_acl(consul, configuration):
|
||||
"""
|
||||
Removes an ACL.
|
||||
:param consul: the consul client
|
||||
:param configuration: the run configuration
|
||||
:return: the output of the removal
|
||||
"""
|
||||
token = configuration.token
|
||||
changed = consul.acl.info(token) is not None
|
||||
if changed:
|
||||
consul.acl.destroy(token)
|
||||
return Output(changed=changed, token=token, operation=REMOVE_OPERATION)
|
||||
|
||||
|
||||
def load_acl_with_token(consul, token):
|
||||
"""
|
||||
Loads the ACL with the given token (token == rule ID).
|
||||
:param consul: the consul client
|
||||
:param token: the ACL "token"/ID (not name)
|
||||
:return: the ACL associated to the given token
|
||||
:exception ConsulACLTokenNotFoundException: raised if the given token does not exist
|
||||
"""
|
||||
acl_as_json = consul.acl.info(token)
|
||||
if acl_as_json is None:
|
||||
raise ConsulACLNotFoundException(token)
|
||||
return decode_acl_as_json(acl_as_json)
|
||||
|
||||
|
||||
def encode_rules_as_hcl_string(rules):
|
||||
"""
|
||||
Converts the given rules into the equivalent HCL (string) representation.
|
||||
:param rules: the rules
|
||||
:return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal
|
||||
note for justification)
|
||||
"""
|
||||
if len(rules) == 0:
|
||||
# Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty
|
||||
# string if there is no rules...
|
||||
return None
|
||||
rules_as_hcl = ""
|
||||
for rule in rules:
|
||||
rules_as_hcl += encode_rule_as_hcl_string(rule)
|
||||
return rules_as_hcl
|
||||
|
||||
|
||||
def encode_rule_as_hcl_string(rule):
|
||||
"""
|
||||
Converts the given rule into the equivalent HCL (string) representation.
|
||||
:param rule: the rule
|
||||
:return: the equivalent HCL (string) representation of the rule
|
||||
"""
|
||||
if rule.pattern is not None:
|
||||
return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy)
|
||||
else:
|
||||
return '%s = "%s"\n' % (rule.scope, rule.policy)
|
||||
|
||||
|
||||
def decode_rules_as_hcl_string(rules_as_hcl):
|
||||
"""
|
||||
Converts the given HCL (string) representation of rules into a list of rule domain models.
|
||||
:param rules_as_hcl: the HCL (string) representation of a collection of rules
|
||||
:return: the equivalent domain model to the given rules
|
||||
"""
|
||||
rules_as_hcl = to_text(rules_as_hcl)
|
||||
rules_as_json = hcl.loads(rules_as_hcl)
|
||||
return decode_rules_as_json(rules_as_json)
|
||||
|
||||
|
||||
def decode_rules_as_json(rules_as_json):
|
||||
"""
|
||||
Converts the given JSON representation of rules into a list of rule domain models.
|
||||
:param rules_as_json: the JSON representation of a collection of rules
|
||||
:return: the equivalent domain model to the given rules
|
||||
"""
|
||||
rules = RuleCollection()
|
||||
for scope in rules_as_json:
|
||||
if not isinstance(rules_as_json[scope], dict):
|
||||
rules.add(Rule(scope, rules_as_json[scope]))
|
||||
else:
|
||||
for pattern, policy in rules_as_json[scope].items():
|
||||
rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern))
|
||||
return rules
|
||||
|
||||
|
||||
def encode_rules_as_json(rules):
|
||||
"""
|
||||
Converts the given rules into the equivalent JSON representation according to the documentation:
|
||||
https://www.consul.io/docs/guides/acl.html#rule-specification.
|
||||
:param rules: the rules
|
||||
:return: JSON representation of the given rules
|
||||
"""
|
||||
rules_as_json = defaultdict(dict)
|
||||
for rule in rules:
|
||||
if rule.pattern is not None:
|
||||
if rule.pattern in rules_as_json[rule.scope]:
|
||||
raise AssertionError()
|
||||
rules_as_json[rule.scope][rule.pattern] = {
|
||||
_POLICY_JSON_PROPERTY: rule.policy
|
||||
}
|
||||
else:
|
||||
if rule.scope in rules_as_json:
|
||||
raise AssertionError()
|
||||
rules_as_json[rule.scope] = rule.policy
|
||||
return rules_as_json
|
||||
|
||||
|
||||
def decode_rules_as_yml(rules_as_yml):
|
||||
"""
|
||||
Converts the given YAML representation of rules into a list of rule domain models.
|
||||
:param rules_as_yml: the YAML representation of a collection of rules
|
||||
:return: the equivalent domain model to the given rules
|
||||
"""
|
||||
rules = RuleCollection()
|
||||
if rules_as_yml:
|
||||
for rule_as_yml in rules_as_yml:
|
||||
rule_added = False
|
||||
for scope in RULE_SCOPES:
|
||||
if scope in rule_as_yml:
|
||||
if rule_as_yml[scope] is None:
|
||||
raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope)
|
||||
policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \
|
||||
else rule_as_yml[scope]
|
||||
pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None
|
||||
rules.add(Rule(scope, policy, pattern))
|
||||
rule_added = True
|
||||
break
|
||||
if not rule_added:
|
||||
raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES)))
|
||||
return rules
|
||||
|
||||
|
||||
def decode_acl_as_json(acl_as_json):
|
||||
"""
|
||||
Converts the given JSON representation of an ACL into the equivalent domain model.
|
||||
:param acl_as_json: the JSON representation of an ACL
|
||||
:return: the equivalent domain model to the given ACL
|
||||
"""
|
||||
rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY]
|
||||
rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \
|
||||
else RuleCollection()
|
||||
return ACL(
|
||||
rules=rules,
|
||||
token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY],
|
||||
token=acl_as_json[_TOKEN_JSON_PROPERTY],
|
||||
name=acl_as_json[_NAME_JSON_PROPERTY]
|
||||
)
|
||||
|
||||
|
||||
def decode_acls_as_json(acls_as_json):
|
||||
"""
|
||||
Converts the given JSON representation of ACLs into a list of ACL domain models.
|
||||
:param acls_as_json: the JSON representation of a collection of ACLs
|
||||
:return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same)
|
||||
"""
|
||||
return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json]
|
||||
|
||||
|
||||
class ConsulACLNotFoundException(Exception):
|
||||
"""
|
||||
Exception raised if an ACL with is not found.
|
||||
"""
|
||||
|
||||
|
||||
class Configuration:
|
||||
"""
|
||||
Configuration for this module.
|
||||
"""
|
||||
|
||||
def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
|
||||
rules=None, state=None, token=None, token_type=None):
|
||||
self.management_token = management_token # type: str
|
||||
self.host = host # type: str
|
||||
self.scheme = scheme # type: str
|
||||
self.validate_certs = validate_certs # type: bool
|
||||
self.name = name # type: str
|
||||
self.port = port # type: int
|
||||
self.rules = rules # type: RuleCollection
|
||||
self.state = state # type: str
|
||||
self.token = token # type: str
|
||||
self.token_type = token_type # type: str
|
||||
|
||||
|
||||
class Output:
|
||||
"""
|
||||
Output of an action of this module.
|
||||
"""
|
||||
|
||||
def __init__(self, changed=None, token=None, rules=None, operation=None):
|
||||
self.changed = changed # type: bool
|
||||
self.token = token # type: str
|
||||
self.rules = rules # type: RuleCollection
|
||||
self.operation = operation # type: str
|
||||
|
||||
|
||||
class ACL:
|
||||
"""
|
||||
Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
|
||||
"""
|
||||
|
||||
def __init__(self, rules, token_type, token, name):
|
||||
self.rules = rules
|
||||
self.token_type = token_type
|
||||
self.token = token
|
||||
self.name = name
|
||||
|
||||
def __eq__(self, other):
|
||||
return other \
|
||||
and isinstance(other, self.__class__) \
|
||||
and self.rules == other.rules \
|
||||
and self.token_type == other.token_type \
|
||||
and self.token == other.token \
|
||||
and self.name == other.name
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name)
|
||||
|
||||
|
||||
class Rule:
|
||||
"""
|
||||
ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
|
||||
"""
|
||||
|
||||
def __init__(self, scope, policy, pattern=None):
|
||||
self.scope = scope
|
||||
self.policy = policy
|
||||
self.pattern = pattern
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, self.__class__) \
|
||||
and self.scope == other.scope \
|
||||
and self.policy == other.policy \
|
||||
and self.pattern == other.pattern
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __hash__(self):
|
||||
return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern)
|
||||
|
||||
def __str__(self):
|
||||
return encode_rule_as_hcl_string(self)
|
||||
|
||||
|
||||
class RuleCollection:
|
||||
"""
|
||||
Collection of ACL rules, which are part of a Consul ACL.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._rules = {}
|
||||
for scope in RULE_SCOPES:
|
||||
self._rules[scope] = {}
|
||||
|
||||
def __iter__(self):
|
||||
all_rules = []
|
||||
for scope, pattern_keyed_rules in self._rules.items():
|
||||
for pattern, rule in pattern_keyed_rules.items():
|
||||
all_rules.append(rule)
|
||||
return iter(all_rules)
|
||||
|
||||
def __len__(self):
|
||||
count = 0
|
||||
for scope in RULE_SCOPES:
|
||||
count += len(self._rules[scope])
|
||||
return count
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, self.__class__) \
|
||||
and set(self) == set(other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __str__(self):
|
||||
return encode_rules_as_hcl_string(self)
|
||||
|
||||
def add(self, rule):
|
||||
"""
|
||||
Adds the given rule to this collection.
|
||||
:param rule: model of a rule
|
||||
:raises ValueError: raised if there already exists a rule for a given scope and pattern
|
||||
"""
|
||||
if rule.pattern in self._rules[rule.scope]:
|
||||
patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else ""
|
||||
raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info))
|
||||
self._rules[rule.scope][rule.pattern] = rule
|
||||
|
||||
|
||||
def get_consul_client(configuration):
|
||||
"""
|
||||
Gets a Consul client for the given configuration.
|
||||
|
||||
Does not check if the Consul client can connect.
|
||||
:param configuration: the run configuration
|
||||
:return: Consul client
|
||||
"""
|
||||
token = configuration.management_token
|
||||
if token is None:
|
||||
token = configuration.token
|
||||
if token is None:
|
||||
raise AssertionError("Expecting the management token to always be set")
|
||||
return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme,
|
||||
verify=configuration.validate_certs, token=token)
|
||||
|
||||
|
||||
def check_dependencies():
|
||||
"""
|
||||
Checks that the required dependencies have been imported.
|
||||
:exception ImportError: if it is detected that any of the required dependencies have not been imported
|
||||
"""
|
||||
if not python_consul_installed:
|
||||
raise ImportError("python-consul required for this module. "
|
||||
"See: https://python-consul.readthedocs.io/en/latest/#installation")
|
||||
|
||||
if not pyhcl_installed:
|
||||
raise ImportError("pyhcl required for this module. "
|
||||
"See: https://pypi.org/project/pyhcl/")
|
||||
|
||||
if not has_requests:
|
||||
raise ImportError("requests required for this module. See https://pypi.org/project/requests/")
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main method.
|
||||
"""
|
||||
module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False)
|
||||
|
||||
try:
|
||||
check_dependencies()
|
||||
except ImportError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
configuration = Configuration(
|
||||
management_token=module.params.get(MANAGEMENT_PARAMETER_NAME),
|
||||
host=module.params.get(HOST_PARAMETER_NAME),
|
||||
scheme=module.params.get(SCHEME_PARAMETER_NAME),
|
||||
validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME),
|
||||
name=module.params.get(NAME_PARAMETER_NAME),
|
||||
port=module.params.get(PORT_PARAMETER_NAME),
|
||||
rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)),
|
||||
state=module.params.get(STATE_PARAMETER_NAME),
|
||||
token=module.params.get(TOKEN_PARAMETER_NAME),
|
||||
token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME)
|
||||
)
|
||||
consul_client = get_consul_client(configuration)
|
||||
|
||||
try:
|
||||
if configuration.state == PRESENT_STATE_VALUE:
|
||||
output = set_acl(consul_client, configuration)
|
||||
else:
|
||||
output = remove_acl(consul_client, configuration)
|
||||
except ConnectionError as e:
|
||||
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
|
||||
configuration.host, configuration.port, str(e)))
|
||||
raise
|
||||
|
||||
return_values = dict(changed=output.changed, token=output.token, operation=output.operation)
|
||||
if output.rules is not None:
|
||||
return_values["rules"] = encode_rules_as_json(output.rules)
|
||||
module.exit_json(**return_values)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,329 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2015, Steve Gargan <steve.gargan@gmail.com>
|
||||
# Copyright (c) 2018 Genome Research Ltd.
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: consul_kv
|
||||
short_description: Manipulate entries in the key/value store of a consul cluster
|
||||
description:
|
||||
- Allows the retrieval, addition, modification and deletion of key/value entries in a
|
||||
consul cluster via the agent. The entire contents of the record, including
|
||||
the indices, flags and session are returned as C(value).
|
||||
- If the C(key) represents a prefix then note that when a value is removed, the existing
|
||||
value if any is returned as part of the results.
|
||||
- See http://www.consul.io/docs/agent/http.html#kv for more details.
|
||||
requirements:
|
||||
- python-consul
|
||||
- requests
|
||||
author:
|
||||
- Steve Gargan (@sgargan)
|
||||
- Colin Nolan (@colin-nolan)
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- The action to take with the supplied key and value. If the state is C(present) and I(value) is set, the key
|
||||
contents will be set to the value supplied and C(changed) will be set to C(true) only if the value was
|
||||
different to the current contents. If the state is C(present) and I(value) is not set, the existing value
|
||||
associated to the key will be returned. The state C(absent) will remove the key/value pair,
|
||||
again C(changed) will be set to true only if the key actually existed
|
||||
prior to the removal. An attempt can be made to obtain or free the
|
||||
lock associated with a key/value pair with the states C(acquire) or
|
||||
C(release) respectively. a valid session must be supplied to make the
|
||||
attempt changed will be true if the attempt is successful, false
|
||||
otherwise.
|
||||
type: str
|
||||
choices: [ absent, acquire, present, release ]
|
||||
default: present
|
||||
key:
|
||||
description:
|
||||
- The key at which the value should be stored.
|
||||
type: str
|
||||
required: true
|
||||
value:
|
||||
description:
|
||||
- The value should be associated with the given key, required if C(state)
|
||||
is C(present).
|
||||
type: str
|
||||
recurse:
|
||||
description:
|
||||
- If the key represents a prefix, each entry with the prefix can be
|
||||
retrieved by setting this to C(true).
|
||||
type: bool
|
||||
retrieve:
|
||||
description:
|
||||
- If the I(state) is C(present) and I(value) is set, perform a
|
||||
read after setting the value and return this value.
|
||||
default: true
|
||||
type: bool
|
||||
session:
|
||||
description:
|
||||
- The session that should be used to acquire or release a lock
|
||||
associated with a key/value pair.
|
||||
type: str
|
||||
token:
|
||||
description:
|
||||
- The token key identifying an ACL rule set that controls access to
|
||||
the key value pair
|
||||
type: str
|
||||
cas:
|
||||
description:
|
||||
- Used when acquiring a lock with a session. If the C(cas) is C(0), then
|
||||
Consul will only put the key if it does not already exist. If the
|
||||
C(cas) value is non-zero, then the key is only set if the index matches
|
||||
the ModifyIndex of that key.
|
||||
type: str
|
||||
flags:
|
||||
description:
|
||||
- Opaque positive integer value that can be passed when setting a value.
|
||||
type: str
|
||||
host:
|
||||
description:
|
||||
- Host of the consul agent.
|
||||
type: str
|
||||
default: localhost
|
||||
port:
|
||||
description:
|
||||
- The port on which the consul agent is running.
|
||||
type: int
|
||||
default: 8500
|
||||
scheme:
|
||||
description:
|
||||
- The protocol scheme on which the consul agent is running.
|
||||
type: str
|
||||
default: http
|
||||
validate_certs:
|
||||
description:
|
||||
- Whether to verify the tls certificate of the consul agent.
|
||||
type: bool
|
||||
default: true
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None`
|
||||
# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None`
|
||||
- name: Retrieve a value from the key/value store
|
||||
community.general.consul_kv:
|
||||
key: somekey
|
||||
register: retrieved_key
|
||||
|
||||
- name: Add or update the value associated with a key in the key/value store
|
||||
community.general.consul_kv:
|
||||
key: somekey
|
||||
value: somevalue
|
||||
|
||||
- name: Remove a key from the store
|
||||
community.general.consul_kv:
|
||||
key: somekey
|
||||
state: absent
|
||||
|
||||
- name: Add a node to an arbitrary group via consul inventory (see consul.ini)
|
||||
community.general.consul_kv:
|
||||
key: ansible/groups/dc1/somenode
|
||||
value: top_secret
|
||||
|
||||
- name: Register a key/value pair with an associated session
|
||||
community.general.consul_kv:
|
||||
key: stg/node/server_birthday
|
||||
value: 20160509
|
||||
session: "{{ sessionid }}"
|
||||
state: acquire
|
||||
'''
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
try:
|
||||
import consul
|
||||
from requests.exceptions import ConnectionError
|
||||
python_consul_installed = True
|
||||
except ImportError:
|
||||
python_consul_installed = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a
|
||||
# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call,
|
||||
# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key)
|
||||
NOT_SET = None
|
||||
|
||||
|
||||
def _has_value_changed(consul_client, key, target_value):
|
||||
"""
|
||||
Uses the given Consul client to determine if the value associated to the given key is different to the given target
|
||||
value.
|
||||
:param consul_client: Consul connected client
|
||||
:param key: key in Consul
|
||||
:param target_value: value to be associated to the key
|
||||
:return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the
|
||||
value has changed (i.e. the stored value is not the target value)
|
||||
"""
|
||||
index, existing = consul_client.kv.get(key)
|
||||
if not existing:
|
||||
return index, True
|
||||
try:
|
||||
changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value
|
||||
return index, changed
|
||||
except UnicodeError:
|
||||
# Existing value was not decodable but all values we set are valid utf-8
|
||||
return index, True
|
||||
|
||||
|
||||
def execute(module):
|
||||
state = module.params.get('state')
|
||||
|
||||
if state == 'acquire' or state == 'release':
|
||||
lock(module, state)
|
||||
elif state == 'present':
|
||||
if module.params.get('value') is NOT_SET:
|
||||
get_value(module)
|
||||
else:
|
||||
set_value(module)
|
||||
elif state == 'absent':
|
||||
remove_value(module)
|
||||
else:
|
||||
module.exit_json(msg="Unsupported state: %s" % (state, ))
|
||||
|
||||
|
||||
def lock(module, state):
|
||||
|
||||
consul_api = get_consul_api(module)
|
||||
|
||||
session = module.params.get('session')
|
||||
key = module.params.get('key')
|
||||
value = module.params.get('value')
|
||||
|
||||
if not session:
|
||||
module.fail(
|
||||
msg='%s of lock for %s requested but no session supplied' %
|
||||
(state, key))
|
||||
|
||||
index, changed = _has_value_changed(consul_api, key, value)
|
||||
|
||||
if changed and not module.check_mode:
|
||||
if state == 'acquire':
|
||||
changed = consul_api.kv.put(key, value,
|
||||
cas=module.params.get('cas'),
|
||||
acquire=session,
|
||||
flags=module.params.get('flags'))
|
||||
else:
|
||||
changed = consul_api.kv.put(key, value,
|
||||
cas=module.params.get('cas'),
|
||||
release=session,
|
||||
flags=module.params.get('flags'))
|
||||
|
||||
module.exit_json(changed=changed,
|
||||
index=index,
|
||||
key=key)
|
||||
|
||||
|
||||
def get_value(module):
|
||||
consul_api = get_consul_api(module)
|
||||
key = module.params.get('key')
|
||||
|
||||
index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse'))
|
||||
|
||||
module.exit_json(changed=False, index=index, data=existing_value)
|
||||
|
||||
|
||||
def set_value(module):
|
||||
consul_api = get_consul_api(module)
|
||||
|
||||
key = module.params.get('key')
|
||||
value = module.params.get('value')
|
||||
|
||||
if value is NOT_SET:
|
||||
raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key)
|
||||
|
||||
index, changed = _has_value_changed(consul_api, key, value)
|
||||
|
||||
if changed and not module.check_mode:
|
||||
changed = consul_api.kv.put(key, value,
|
||||
cas=module.params.get('cas'),
|
||||
flags=module.params.get('flags'))
|
||||
|
||||
stored = None
|
||||
if module.params.get('retrieve'):
|
||||
index, stored = consul_api.kv.get(key)
|
||||
|
||||
module.exit_json(changed=changed,
|
||||
index=index,
|
||||
key=key,
|
||||
data=stored)
|
||||
|
||||
|
||||
def remove_value(module):
|
||||
''' remove the value associated with the given key. if the recurse parameter
|
||||
is set then any key prefixed with the given key will be removed. '''
|
||||
consul_api = get_consul_api(module)
|
||||
|
||||
key = module.params.get('key')
|
||||
|
||||
index, existing = consul_api.kv.get(
|
||||
key, recurse=module.params.get('recurse'))
|
||||
|
||||
changed = existing is not None
|
||||
if changed and not module.check_mode:
|
||||
consul_api.kv.delete(key, module.params.get('recurse'))
|
||||
|
||||
module.exit_json(changed=changed,
|
||||
index=index,
|
||||
key=key,
|
||||
data=existing)
|
||||
|
||||
|
||||
def get_consul_api(module):
|
||||
return consul.Consul(host=module.params.get('host'),
|
||||
port=module.params.get('port'),
|
||||
scheme=module.params.get('scheme'),
|
||||
verify=module.params.get('validate_certs'),
|
||||
token=module.params.get('token'))
|
||||
|
||||
|
||||
def test_dependencies(module):
|
||||
if not python_consul_installed:
|
||||
module.fail_json(msg="python-consul required for this module. "
|
||||
"see https://python-consul.readthedocs.io/en/latest/#installation")
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
cas=dict(type='str'),
|
||||
flags=dict(type='str'),
|
||||
key=dict(type='str', required=True, no_log=False),
|
||||
host=dict(type='str', default='localhost'),
|
||||
scheme=dict(type='str', default='http'),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
port=dict(type='int', default=8500),
|
||||
recurse=dict(type='bool'),
|
||||
retrieve=dict(type='bool', default=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']),
|
||||
token=dict(type='str', no_log=True),
|
||||
value=dict(type='str', default=NOT_SET),
|
||||
session=dict(type='str'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
test_dependencies(module)
|
||||
|
||||
try:
|
||||
execute(module)
|
||||
except ConnectionError as e:
|
||||
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
|
||||
module.params.get('host'), module.params.get('port'), e))
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,300 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2015, Steve Gargan <steve.gargan@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: consul_session
|
||||
short_description: Manipulate consul sessions
|
||||
description:
|
||||
- Allows the addition, modification and deletion of sessions in a consul
|
||||
cluster. These sessions can then be used in conjunction with key value pairs
|
||||
to implement distributed locks. In depth documentation for working with
|
||||
sessions can be found at http://www.consul.io/docs/internals/sessions.html
|
||||
requirements:
|
||||
- python-consul
|
||||
- requests
|
||||
author:
|
||||
- Steve Gargan (@sgargan)
|
||||
options:
|
||||
id:
|
||||
description:
|
||||
- ID of the session, required when I(state) is either C(info) or
|
||||
C(remove).
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether the session should be present i.e. created if it doesn't
|
||||
exist, or absent, removed if present. If created, the I(id) for the
|
||||
session is returned in the output. If C(absent), I(id) is
|
||||
required to remove the session. Info for a single session, all the
|
||||
sessions for a node or all available sessions can be retrieved by
|
||||
specifying C(info), C(node) or C(list) for the I(state); for C(node)
|
||||
or C(info), the node I(name) or session I(id) is required as parameter.
|
||||
choices: [ absent, info, list, node, present ]
|
||||
type: str
|
||||
default: present
|
||||
name:
|
||||
description:
|
||||
- The name that should be associated with the session. Required when
|
||||
I(state=node) is used.
|
||||
type: str
|
||||
delay:
|
||||
description:
|
||||
- The optional lock delay that can be attached to the session when it
|
||||
is created. Locks for invalidated sessions ar blocked from being
|
||||
acquired until this delay has expired. Durations are in seconds.
|
||||
type: int
|
||||
default: 15
|
||||
node:
|
||||
description:
|
||||
- The name of the node that with which the session will be associated.
|
||||
by default this is the name of the agent.
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- The name of the datacenter in which the session exists or should be
|
||||
created.
|
||||
type: str
|
||||
checks:
|
||||
description:
|
||||
- Checks that will be used to verify the session health. If
|
||||
all the checks fail, the session will be invalidated and any locks
|
||||
associated with the session will be release and can be acquired once
|
||||
the associated lock delay has expired.
|
||||
type: list
|
||||
elements: str
|
||||
host:
|
||||
description:
|
||||
- The host of the consul agent defaults to localhost.
|
||||
type: str
|
||||
default: localhost
|
||||
port:
|
||||
description:
|
||||
- The port on which the consul agent is running.
|
||||
type: int
|
||||
default: 8500
|
||||
scheme:
|
||||
description:
|
||||
- The protocol scheme on which the consul agent is running.
|
||||
type: str
|
||||
default: http
|
||||
validate_certs:
|
||||
description:
|
||||
- Whether to verify the TLS certificate of the consul agent.
|
||||
type: bool
|
||||
default: true
|
||||
behavior:
|
||||
description:
|
||||
- The optional behavior that can be attached to the session when it
|
||||
is created. This controls the behavior when a session is invalidated.
|
||||
choices: [ delete, release ]
|
||||
type: str
|
||||
default: release
|
||||
ttl:
|
||||
description:
|
||||
- Specifies the duration of a session in seconds (between 10 and 86400).
|
||||
type: int
|
||||
version_added: 5.4.0
|
||||
token:
|
||||
description:
|
||||
- The token key identifying an ACL rule set that controls access to
|
||||
the key value pair.
|
||||
type: str
|
||||
version_added: 5.6.0
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Register basic session with consul
|
||||
community.general.consul_session:
|
||||
name: session1
|
||||
|
||||
- name: Register a session with an existing check
|
||||
community.general.consul_session:
|
||||
name: session_with_check
|
||||
checks:
|
||||
- existing_check_name
|
||||
|
||||
- name: Register a session with lock_delay
|
||||
community.general.consul_session:
|
||||
name: session_with_delay
|
||||
delay: 20s
|
||||
|
||||
- name: Retrieve info about session by id
|
||||
community.general.consul_session:
|
||||
id: session_id
|
||||
state: info
|
||||
|
||||
- name: Retrieve active sessions
|
||||
community.general.consul_session:
|
||||
state: list
|
||||
|
||||
- name: Register session with a ttl
|
||||
community.general.consul_session:
|
||||
name: session-with-ttl
|
||||
ttl: 600 # sec
|
||||
'''
|
||||
|
||||
try:
|
||||
import consul
|
||||
from requests.exceptions import ConnectionError
|
||||
python_consul_installed = True
|
||||
except ImportError:
|
||||
python_consul_installed = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def execute(module):
|
||||
|
||||
state = module.params.get('state')
|
||||
|
||||
if state in ['info', 'list', 'node']:
|
||||
lookup_sessions(module)
|
||||
elif state == 'present':
|
||||
update_session(module)
|
||||
else:
|
||||
remove_session(module)
|
||||
|
||||
|
||||
def lookup_sessions(module):
|
||||
|
||||
datacenter = module.params.get('datacenter')
|
||||
|
||||
state = module.params.get('state')
|
||||
consul_client = get_consul_api(module)
|
||||
try:
|
||||
if state == 'list':
|
||||
sessions_list = consul_client.session.list(dc=datacenter)
|
||||
# Ditch the index, this can be grabbed from the results
|
||||
if sessions_list and len(sessions_list) >= 2:
|
||||
sessions_list = sessions_list[1]
|
||||
module.exit_json(changed=True,
|
||||
sessions=sessions_list)
|
||||
elif state == 'node':
|
||||
node = module.params.get('node')
|
||||
sessions = consul_client.session.node(node, dc=datacenter)
|
||||
module.exit_json(changed=True,
|
||||
node=node,
|
||||
sessions=sessions)
|
||||
elif state == 'info':
|
||||
session_id = module.params.get('id')
|
||||
|
||||
session_by_id = consul_client.session.info(session_id, dc=datacenter)
|
||||
module.exit_json(changed=True,
|
||||
session_id=session_id,
|
||||
sessions=session_by_id)
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Could not retrieve session info %s" % e)
|
||||
|
||||
|
||||
def update_session(module):
|
||||
|
||||
name = module.params.get('name')
|
||||
delay = module.params.get('delay')
|
||||
checks = module.params.get('checks')
|
||||
datacenter = module.params.get('datacenter')
|
||||
node = module.params.get('node')
|
||||
behavior = module.params.get('behavior')
|
||||
ttl = module.params.get('ttl')
|
||||
|
||||
consul_client = get_consul_api(module)
|
||||
|
||||
try:
|
||||
session = consul_client.session.create(
|
||||
name=name,
|
||||
behavior=behavior,
|
||||
ttl=ttl,
|
||||
node=node,
|
||||
lock_delay=delay,
|
||||
dc=datacenter,
|
||||
checks=checks
|
||||
)
|
||||
module.exit_json(changed=True,
|
||||
session_id=session,
|
||||
name=name,
|
||||
behavior=behavior,
|
||||
ttl=ttl,
|
||||
delay=delay,
|
||||
checks=checks,
|
||||
node=node)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Could not create/update session %s" % e)
|
||||
|
||||
|
||||
def remove_session(module):
|
||||
session_id = module.params.get('id')
|
||||
|
||||
consul_client = get_consul_api(module)
|
||||
|
||||
try:
|
||||
consul_client.session.destroy(session_id)
|
||||
|
||||
module.exit_json(changed=True,
|
||||
session_id=session_id)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Could not remove session with id '%s' %s" % (
|
||||
session_id, e))
|
||||
|
||||
|
||||
def get_consul_api(module):
|
||||
return consul.Consul(host=module.params.get('host'),
|
||||
port=module.params.get('port'),
|
||||
scheme=module.params.get('scheme'),
|
||||
verify=module.params.get('validate_certs'),
|
||||
token=module.params.get('token'))
|
||||
|
||||
|
||||
def test_dependencies(module):
|
||||
if not python_consul_installed:
|
||||
module.fail_json(msg="python-consul required for this module. "
|
||||
"see https://python-consul.readthedocs.io/en/latest/#installation")
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
checks=dict(type='list', elements='str'),
|
||||
delay=dict(type='int', default='15'),
|
||||
behavior=dict(type='str', default='release', choices=['release', 'delete']),
|
||||
ttl=dict(type='int'),
|
||||
host=dict(type='str', default='localhost'),
|
||||
port=dict(type='int', default=8500),
|
||||
scheme=dict(type='str', default='http'),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
id=dict(type='str'),
|
||||
name=dict(type='str'),
|
||||
node=dict(type='str'),
|
||||
state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']),
|
||||
datacenter=dict(type='str'),
|
||||
token=dict(type='str', no_log=True),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_if=[
|
||||
('state', 'node', ['name']),
|
||||
('state', 'info', ['id']),
|
||||
('state', 'remove', ['id']),
|
||||
],
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
test_dependencies(module)
|
||||
|
||||
try:
|
||||
execute(module)
|
||||
except ConnectionError as e:
|
||||
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
|
||||
module.params.get('host'), module.params.get('port'), e))
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,493 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2020, Silvie Chlupova <schlupov@redhat.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
---
|
||||
module: copr
|
||||
short_description: Manage one of the Copr repositories
|
||||
version_added: 2.0.0
|
||||
description: This module can enable, disable or remove the specified repository.
|
||||
author: Silvie Chlupova (@schlupov) <schlupov@redhat.com>
|
||||
requirements:
|
||||
- dnf
|
||||
- dnf-plugins-core
|
||||
notes:
|
||||
- Supports C(check_mode).
|
||||
options:
|
||||
host:
|
||||
description: The Copr host to work with.
|
||||
default: copr.fedorainfracloud.org
|
||||
type: str
|
||||
protocol:
|
||||
description: This indicate which protocol to use with the host.
|
||||
default: https
|
||||
type: str
|
||||
name:
|
||||
description: Copr directory name, for example C(@copr/copr-dev).
|
||||
required: true
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether to set this project as C(enabled), C(disabled) or C(absent).
|
||||
default: enabled
|
||||
type: str
|
||||
choices: [absent, enabled, disabled]
|
||||
chroot:
|
||||
description:
|
||||
- The name of the chroot that you want to enable/disable/remove in the project,
|
||||
for example C(epel-7-x86_64). Default chroot is determined by the operating system,
|
||||
version of the operating system, and architecture on which the module is run.
|
||||
type: str
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: Enable project Test of the user schlupov
|
||||
community.general.copr:
|
||||
host: copr.fedorainfracloud.org
|
||||
state: enabled
|
||||
name: schlupov/Test
|
||||
chroot: fedora-31-x86_64
|
||||
|
||||
- name: Remove project integration_tests of the group copr
|
||||
community.general.copr:
|
||||
state: absent
|
||||
name: '@copr/integration_tests'
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
repo_filename:
|
||||
description: The name of the repo file in which the copr project information is stored.
|
||||
returned: success
|
||||
type: str
|
||||
sample: _copr:copr.fedorainfracloud.org:group_copr:integration_tests.repo
|
||||
|
||||
repo:
|
||||
description: Path to the project on the host.
|
||||
returned: success
|
||||
type: str
|
||||
sample: copr.fedorainfracloud.org/group_copr/integration_tests
|
||||
"""
|
||||
|
||||
import stat
|
||||
import os
|
||||
import traceback
|
||||
|
||||
try:
|
||||
import dnf
|
||||
import dnf.cli
|
||||
import dnf.repodict
|
||||
from dnf.conf import Conf
|
||||
HAS_DNF_PACKAGES = True
|
||||
DNF_IMP_ERR = None
|
||||
except ImportError:
|
||||
DNF_IMP_ERR = traceback.format_exc()
|
||||
HAS_DNF_PACKAGES = False
|
||||
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
from ansible.module_utils import distro # pylint: disable=import-error
|
||||
from ansible.module_utils.basic import AnsibleModule # pylint: disable=import-error
|
||||
from ansible.module_utils.urls import open_url # pylint: disable=import-error
|
||||
|
||||
|
||||
class CoprModule(object):
|
||||
"""The class represents a copr module.
|
||||
|
||||
The class contains methods that take care of the repository state of a project,
|
||||
whether the project is enabled, disabled or missing.
|
||||
"""
|
||||
|
||||
ansible_module = None
|
||||
|
||||
def __init__(self, host, name, state, protocol, chroot=None, check_mode=False):
|
||||
self.host = host
|
||||
self.name = name
|
||||
self.state = state
|
||||
self.chroot = chroot
|
||||
self.protocol = protocol
|
||||
self.check_mode = check_mode
|
||||
if not chroot:
|
||||
self.chroot = self.chroot_conf()
|
||||
else:
|
||||
self.chroot = chroot
|
||||
self.get_base()
|
||||
|
||||
@property
|
||||
def short_chroot(self):
|
||||
"""str: Chroot (distribution-version-architecture) shorten to distribution-version."""
|
||||
return self.chroot.rsplit('-', 1)[0]
|
||||
|
||||
@property
|
||||
def arch(self):
|
||||
"""str: Target architecture."""
|
||||
chroot_parts = self.chroot.split("-")
|
||||
return chroot_parts[-1]
|
||||
|
||||
@property
|
||||
def user(self):
|
||||
"""str: Copr user (this can also be the name of the group)."""
|
||||
return self._sanitize_username(self.name.split("/")[0])
|
||||
|
||||
@property
|
||||
def project(self):
|
||||
"""str: The name of the copr project."""
|
||||
return self.name.split("/")[1]
|
||||
|
||||
@classmethod
|
||||
def need_root(cls):
|
||||
"""Check if the module was run as root."""
|
||||
if os.geteuid() != 0:
|
||||
cls.raise_exception("This command has to be run under the root user.")
|
||||
|
||||
@classmethod
|
||||
def get_base(cls):
|
||||
"""Initialize the configuration from dnf.
|
||||
|
||||
Returns:
|
||||
An instance of the BaseCli class.
|
||||
"""
|
||||
cls.base = dnf.cli.cli.BaseCli(Conf())
|
||||
return cls.base
|
||||
|
||||
@classmethod
|
||||
def raise_exception(cls, msg):
|
||||
"""Raise either an ansible exception or a python exception.
|
||||
|
||||
Args:
|
||||
msg: The message to be displayed when an exception is thrown.
|
||||
"""
|
||||
if cls.ansible_module:
|
||||
raise cls.ansible_module.fail_json(msg=msg, changed=False)
|
||||
raise Exception(msg)
|
||||
|
||||
def _get(self, chroot):
|
||||
"""Send a get request to the server to obtain the necessary data.
|
||||
|
||||
Args:
|
||||
chroot: Chroot in the form of distribution-version.
|
||||
|
||||
Returns:
|
||||
Info about a repository and status code of the get request.
|
||||
"""
|
||||
repo_info = None
|
||||
url = "{0}://{1}/coprs/{2}/repo/{3}/dnf.repo?arch={4}".format(
|
||||
self.protocol, self.host, self.name, chroot, self.arch
|
||||
)
|
||||
try:
|
||||
r = open_url(url)
|
||||
status_code = r.getcode()
|
||||
repo_info = r.read().decode("utf-8")
|
||||
except HTTPError as e:
|
||||
status_code = e.getcode()
|
||||
return repo_info, status_code
|
||||
|
||||
def _download_repo_info(self):
|
||||
"""Download information about the repository.
|
||||
|
||||
Returns:
|
||||
Information about the repository.
|
||||
"""
|
||||
distribution, version = self.short_chroot.split('-', 1)
|
||||
chroot = self.short_chroot
|
||||
while True:
|
||||
repo_info, status_code = self._get(chroot)
|
||||
if repo_info:
|
||||
return repo_info
|
||||
if distribution == "rhel":
|
||||
chroot = "centos-stream-8"
|
||||
distribution = "centos"
|
||||
elif distribution == "centos":
|
||||
if version == "stream-8":
|
||||
version = "8"
|
||||
elif version == "stream-9":
|
||||
version = "9"
|
||||
chroot = "epel-{0}".format(version)
|
||||
distribution = "epel"
|
||||
else:
|
||||
if str(status_code) != "404":
|
||||
self.raise_exception(
|
||||
"This repository does not have any builds yet so you cannot enable it now."
|
||||
)
|
||||
else:
|
||||
self.raise_exception(
|
||||
"Chroot {0} does not exist in {1}".format(self.chroot, self.name)
|
||||
)
|
||||
|
||||
def _enable_repo(self, repo_filename_path, repo_content=None):
|
||||
"""Write information to a repo file.
|
||||
|
||||
Args:
|
||||
repo_filename_path: Path to repository.
|
||||
repo_content: Repository information from the host.
|
||||
|
||||
Returns:
|
||||
True, if the information in the repo file matches that stored on the host,
|
||||
False otherwise.
|
||||
"""
|
||||
if not repo_content:
|
||||
repo_content = self._download_repo_info()
|
||||
if self._compare_repo_content(repo_filename_path, repo_content):
|
||||
return False
|
||||
if not self.check_mode:
|
||||
with open(repo_filename_path, "w+") as file:
|
||||
file.write(repo_content)
|
||||
os.chmod(
|
||||
repo_filename_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH,
|
||||
)
|
||||
return True
|
||||
|
||||
def _get_repo_with_old_id(self):
|
||||
"""Try to get a repository with the old name."""
|
||||
repo_id = "{0}-{1}".format(self.user, self.project)
|
||||
if repo_id in self.base.repos and "_copr" in self.base.repos[repo_id].repofile:
|
||||
file_name = self.base.repos[repo_id].repofile.split("/")[-1]
|
||||
try:
|
||||
copr_hostname = file_name.rsplit(":", 2)[0].split(":", 1)[1]
|
||||
if copr_hostname != self.host:
|
||||
return None
|
||||
return file_name
|
||||
except IndexError:
|
||||
return file_name
|
||||
return None
|
||||
|
||||
def _read_all_repos(self, repo_id=None):
|
||||
"""The method is used to initialize the base variable by
|
||||
repositories using the RepoReader class from dnf.
|
||||
|
||||
Args:
|
||||
repo_id: Repo id of the repository we want to work with.
|
||||
"""
|
||||
reader = dnf.conf.read.RepoReader(self.base.conf, None)
|
||||
for repo in reader:
|
||||
try:
|
||||
if repo_id:
|
||||
if repo.id == repo_id:
|
||||
self.base.repos.add(repo)
|
||||
break
|
||||
else:
|
||||
self.base.repos.add(repo)
|
||||
except dnf.exceptions.ConfigError as e:
|
||||
self.raise_exception(str(e))
|
||||
|
||||
def _get_copr_repo(self):
|
||||
"""Return one specific repository from all repositories on the system.
|
||||
|
||||
Returns:
|
||||
The repository that a user wants to enable, disable, or remove.
|
||||
"""
|
||||
repo_id = "copr:{0}:{1}:{2}".format(self.host, self.user, self.project)
|
||||
if repo_id not in self.base.repos:
|
||||
if self._get_repo_with_old_id() is None:
|
||||
return None
|
||||
return self.base.repos[repo_id]
|
||||
|
||||
def _disable_repo(self, repo_filename_path):
|
||||
"""Disable the repository.
|
||||
|
||||
Args:
|
||||
repo_filename_path: Path to repository.
|
||||
|
||||
Returns:
|
||||
False, if the repository is already disabled on the system,
|
||||
True otherwise.
|
||||
"""
|
||||
self._read_all_repos()
|
||||
repo = self._get_copr_repo()
|
||||
if repo is None:
|
||||
if self.check_mode:
|
||||
return True
|
||||
self._enable_repo(repo_filename_path)
|
||||
self._read_all_repos("copr:{0}:{1}:{2}".format(self.host, self.user, self.project))
|
||||
repo = self._get_copr_repo()
|
||||
for repo_id in repo.cfg.sections():
|
||||
repo_content_api = self._download_repo_info()
|
||||
with open(repo_filename_path, "r") as file:
|
||||
repo_content_file = file.read()
|
||||
if repo_content_file != repo_content_api:
|
||||
if not self.resolve_differences(
|
||||
repo_content_file, repo_content_api, repo_filename_path
|
||||
):
|
||||
return False
|
||||
if not self.check_mode:
|
||||
self.base.conf.write_raw_configfile(
|
||||
repo.repofile, repo_id, self.base.conf.substitutions, {"enabled": "0"},
|
||||
)
|
||||
return True
|
||||
|
||||
def resolve_differences(self, repo_content_file, repo_content_api, repo_filename_path):
|
||||
"""Detect differences between the contents of the repository stored on the
|
||||
system and the information about the repository on the server.
|
||||
|
||||
Args:
|
||||
repo_content_file: The contents of the repository stored on the system.
|
||||
repo_content_api: The information about the repository from the server.
|
||||
repo_filename_path: Path to repository.
|
||||
|
||||
Returns:
|
||||
False, if the contents of the repo file and the information on the server match,
|
||||
True otherwise.
|
||||
"""
|
||||
repo_file_lines = repo_content_file.split("\n")
|
||||
repo_api_lines = repo_content_api.split("\n")
|
||||
repo_api_lines.remove("enabled=1")
|
||||
if "enabled=0" in repo_file_lines:
|
||||
repo_file_lines.remove("enabled=0")
|
||||
if " ".join(repo_api_lines) == " ".join(repo_file_lines):
|
||||
return False
|
||||
if not self.check_mode:
|
||||
os.remove(repo_filename_path)
|
||||
self._enable_repo(repo_filename_path, repo_content_api)
|
||||
else:
|
||||
repo_file_lines.remove("enabled=1")
|
||||
if " ".join(repo_api_lines) != " ".join(repo_file_lines):
|
||||
if not self.check_mode:
|
||||
os.remove(repo_filename_path)
|
||||
self._enable_repo(repo_filename_path, repo_content_api)
|
||||
return True
|
||||
|
||||
def _remove_repo(self):
|
||||
"""Remove the required repository.
|
||||
|
||||
Returns:
|
||||
True, if the repository has been removed, False otherwise.
|
||||
"""
|
||||
self._read_all_repos()
|
||||
repo = self._get_copr_repo()
|
||||
if not repo:
|
||||
return False
|
||||
if not self.check_mode:
|
||||
try:
|
||||
os.remove(repo.repofile)
|
||||
except OSError as e:
|
||||
self.raise_exception(str(e))
|
||||
return True
|
||||
|
||||
def run(self):
|
||||
"""The method uses methods of the CoprModule class to change the state of the repository.
|
||||
|
||||
Returns:
|
||||
Dictionary with information that the ansible module displays to the user at the end of the run.
|
||||
"""
|
||||
self.need_root()
|
||||
state = dict()
|
||||
repo_filename = "_copr:{0}:{1}:{2}.repo".format(self.host, self.user, self.project)
|
||||
state["repo"] = "{0}/{1}/{2}".format(self.host, self.user, self.project)
|
||||
state["repo_filename"] = repo_filename
|
||||
repo_filename_path = "{0}/_copr:{1}:{2}:{3}.repo".format(
|
||||
self.base.conf.get_reposdir, self.host, self.user, self.project
|
||||
)
|
||||
if self.state == "enabled":
|
||||
enabled = self._enable_repo(repo_filename_path)
|
||||
state["msg"] = "enabled"
|
||||
state["state"] = bool(enabled)
|
||||
elif self.state == "disabled":
|
||||
disabled = self._disable_repo(repo_filename_path)
|
||||
state["msg"] = "disabled"
|
||||
state["state"] = bool(disabled)
|
||||
elif self.state == "absent":
|
||||
removed = self._remove_repo()
|
||||
state["msg"] = "absent"
|
||||
state["state"] = bool(removed)
|
||||
return state
|
||||
|
||||
@staticmethod
|
||||
def _compare_repo_content(repo_filename_path, repo_content_api):
|
||||
"""Compare the contents of the stored repository with the information from the server.
|
||||
|
||||
Args:
|
||||
repo_filename_path: Path to repository.
|
||||
repo_content_api: The information about the repository from the server.
|
||||
|
||||
Returns:
|
||||
True, if the information matches, False otherwise.
|
||||
"""
|
||||
if not os.path.isfile(repo_filename_path):
|
||||
return False
|
||||
with open(repo_filename_path, "r") as file:
|
||||
repo_content_file = file.read()
|
||||
return repo_content_file == repo_content_api
|
||||
|
||||
@staticmethod
|
||||
def chroot_conf():
|
||||
"""Obtain information about the distribution, version, and architecture of the target.
|
||||
|
||||
Returns:
|
||||
Chroot info in the form of distribution-version-architecture.
|
||||
"""
|
||||
(distribution, version, codename) = distro.linux_distribution(full_distribution_name=False)
|
||||
base = CoprModule.get_base()
|
||||
return "{0}-{1}-{2}".format(distribution, version, base.conf.arch)
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_username(user):
|
||||
"""Modify the group name.
|
||||
|
||||
Args:
|
||||
user: User name.
|
||||
|
||||
Returns:
|
||||
Modified user name if it is a group name with @.
|
||||
"""
|
||||
if user[0] == "@":
|
||||
return "group_{0}".format(user[1:])
|
||||
return user
|
||||
|
||||
|
||||
def run_module():
|
||||
"""The function takes care of the functioning of the whole ansible copr module."""
|
||||
module_args = dict(
|
||||
host=dict(type="str", default="copr.fedorainfracloud.org"),
|
||||
protocol=dict(type="str", default="https"),
|
||||
name=dict(type="str", required=True),
|
||||
state=dict(type="str", choices=["enabled", "disabled", "absent"], default="enabled"),
|
||||
chroot=dict(type="str"),
|
||||
)
|
||||
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
|
||||
params = module.params
|
||||
|
||||
if not HAS_DNF_PACKAGES:
|
||||
module.fail_json(msg=missing_required_lib("dnf"), exception=DNF_IMP_ERR)
|
||||
|
||||
CoprModule.ansible_module = module
|
||||
copr_module = CoprModule(
|
||||
host=params["host"],
|
||||
name=params["name"],
|
||||
state=params["state"],
|
||||
protocol=params["protocol"],
|
||||
chroot=params["chroot"],
|
||||
check_mode=module.check_mode,
|
||||
)
|
||||
state = copr_module.run()
|
||||
|
||||
info = "Please note that this repository is not part of the main distribution"
|
||||
|
||||
if params["state"] == "enabled" and state["state"]:
|
||||
module.exit_json(
|
||||
changed=state["state"],
|
||||
msg=state["msg"],
|
||||
repo=state["repo"],
|
||||
repo_filename=state["repo_filename"],
|
||||
info=info,
|
||||
)
|
||||
module.exit_json(
|
||||
changed=state["state"],
|
||||
msg=state["msg"],
|
||||
repo=state["repo"],
|
||||
repo_filename=state["repo_filename"],
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
"""Launches ansible Copr module."""
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,240 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2012, Franck Cuny <franck@lumberjaph.net>
|
||||
# Copyright (c) 2021, Alexei Znamensky <russoz@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cpanm
|
||||
short_description: Manages Perl library dependencies
|
||||
description:
|
||||
- Manage Perl library dependencies using cpanminus.
|
||||
options:
|
||||
name:
|
||||
type: str
|
||||
description:
|
||||
- The Perl library to install. Valid values change according to the I(mode), see notes for more details.
|
||||
- Note that for installing from a local path the parameter I(from_path) should be used.
|
||||
aliases: [pkg]
|
||||
from_path:
|
||||
type: path
|
||||
description:
|
||||
- The local directory or C(tar.gz) file to install from.
|
||||
notest:
|
||||
description:
|
||||
- Do not run unit tests.
|
||||
type: bool
|
||||
default: false
|
||||
locallib:
|
||||
description:
|
||||
- Specify the install base to install modules.
|
||||
type: path
|
||||
mirror:
|
||||
description:
|
||||
- Specifies the base URL for the CPAN mirror to use.
|
||||
type: str
|
||||
mirror_only:
|
||||
description:
|
||||
- Use the mirror's index file instead of the CPAN Meta DB.
|
||||
type: bool
|
||||
default: false
|
||||
installdeps:
|
||||
description:
|
||||
- Only install dependencies.
|
||||
type: bool
|
||||
default: false
|
||||
version:
|
||||
description:
|
||||
- Version specification for the perl module. When I(mode) is C(new), C(cpanm) version operators are accepted.
|
||||
type: str
|
||||
executable:
|
||||
description:
|
||||
- Override the path to the cpanm executable.
|
||||
type: path
|
||||
mode:
|
||||
description:
|
||||
- Controls the module behavior. See notes below for more details.
|
||||
type: str
|
||||
choices: [compatibility, new]
|
||||
default: compatibility
|
||||
version_added: 3.0.0
|
||||
name_check:
|
||||
description:
|
||||
- When in C(new) mode, this parameter can be used to check if there is a module I(name) installed (at I(version), when specified).
|
||||
type: str
|
||||
version_added: 3.0.0
|
||||
notes:
|
||||
- Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
|
||||
- "This module now comes with a choice of execution I(mode): C(compatibility) or C(new)."
|
||||
- "C(compatibility) mode:"
|
||||
- When using C(compatibility) mode, the module will keep backward compatibility. This is the default mode.
|
||||
- I(name) must be either a module name or a distribution file.
|
||||
- >
|
||||
If the perl module given by I(name) is installed (at the exact I(version) when specified), then nothing happens.
|
||||
Otherwise, it will be installed using the C(cpanm) executable.
|
||||
- I(name) cannot be an URL, or a git URL.
|
||||
- C(cpanm) version specifiers do not work in this mode.
|
||||
- "C(new) mode:"
|
||||
- "When using C(new) mode, the module will behave differently"
|
||||
- >
|
||||
The I(name) parameter may refer to a module name, a distribution file,
|
||||
a HTTP URL or a git repository URL as described in C(cpanminus) documentation.
|
||||
- C(cpanm) version specifiers are recognized.
|
||||
author:
|
||||
- "Franck Cuny (@fcuny)"
|
||||
- "Alexei Znamensky (@russoz)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install Dancer perl package
|
||||
community.general.cpanm:
|
||||
name: Dancer
|
||||
|
||||
- name: Install version 0.99_05 of the Plack perl package
|
||||
community.general.cpanm:
|
||||
name: MIYAGAWA/Plack-0.99_05.tar.gz
|
||||
|
||||
- name: Install Dancer into the specified locallib
|
||||
community.general.cpanm:
|
||||
name: Dancer
|
||||
locallib: /srv/webapps/my_app/extlib
|
||||
|
||||
- name: Install perl dependencies from local directory
|
||||
community.general.cpanm:
|
||||
from_path: /srv/webapps/my_app/src/
|
||||
|
||||
- name: Install Dancer perl package without running the unit tests in indicated locallib
|
||||
community.general.cpanm:
|
||||
name: Dancer
|
||||
notest: true
|
||||
locallib: /srv/webapps/my_app/extlib
|
||||
|
||||
- name: Install Dancer perl package from a specific mirror
|
||||
community.general.cpanm:
|
||||
name: Dancer
|
||||
mirror: 'http://cpan.cpantesters.org/'
|
||||
|
||||
- name: Install Dancer perl package into the system root path
|
||||
become: true
|
||||
community.general.cpanm:
|
||||
name: Dancer
|
||||
|
||||
- name: Install Dancer if it is not already installed OR the installed version is older than version 1.0
|
||||
community.general.cpanm:
|
||||
name: Dancer
|
||||
version: '1.0'
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
|
||||
from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
|
||||
|
||||
|
||||
class CPANMinus(ModuleHelper):
|
||||
output_params = ['name', 'version']
|
||||
module = dict(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', aliases=['pkg']),
|
||||
version=dict(type='str'),
|
||||
from_path=dict(type='path'),
|
||||
notest=dict(type='bool', default=False),
|
||||
locallib=dict(type='path'),
|
||||
mirror=dict(type='str'),
|
||||
mirror_only=dict(type='bool', default=False),
|
||||
installdeps=dict(type='bool', default=False),
|
||||
executable=dict(type='path'),
|
||||
mode=dict(type='str', choices=['compatibility', 'new'], default='compatibility'),
|
||||
name_check=dict(type='str')
|
||||
),
|
||||
required_one_of=[('name', 'from_path')],
|
||||
|
||||
)
|
||||
command = 'cpanm'
|
||||
command_args_formats = dict(
|
||||
notest=cmd_runner_fmt.as_bool("--notest"),
|
||||
locallib=cmd_runner_fmt.as_opt_val('--local-lib'),
|
||||
mirror=cmd_runner_fmt.as_opt_val('--mirror'),
|
||||
mirror_only=cmd_runner_fmt.as_bool("--mirror-only"),
|
||||
installdeps=cmd_runner_fmt.as_bool("--installdeps"),
|
||||
pkg_spec=cmd_runner_fmt.as_list(),
|
||||
)
|
||||
|
||||
def __init_module__(self):
|
||||
v = self.vars
|
||||
if v.mode == "compatibility":
|
||||
if v.name_check:
|
||||
self.do_raise("Parameter name_check can only be used with mode=new")
|
||||
else:
|
||||
if v.name and v.from_path:
|
||||
self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'")
|
||||
|
||||
self.command = self.module.get_bin_path(v.executable if v.executable else self.command)
|
||||
self.vars.set("binary", self.command)
|
||||
|
||||
def _is_package_installed(self, name, locallib, version):
|
||||
def process(rc, out, err):
|
||||
return rc == 0
|
||||
|
||||
if name is None or name.endswith('.tar.gz'):
|
||||
return False
|
||||
version = "" if version is None else " " + version
|
||||
|
||||
env = {"PERL5LIB": "%s/lib/perl5" % locallib} if locallib else {}
|
||||
runner = CmdRunner(self.module, ["perl", "-le"], {"mod": cmd_runner_fmt.as_list()}, check_rc=False, environ_update=env)
|
||||
with runner("mod", output_process=process) as ctx:
|
||||
return ctx.run(mod='use %s%s;' % (name, version))
|
||||
|
||||
def sanitize_pkg_spec_version(self, pkg_spec, version):
|
||||
if version is None:
|
||||
return pkg_spec
|
||||
if pkg_spec.endswith('.tar.gz'):
|
||||
self.do_raise(msg="parameter 'version' must not be used when installing from a file")
|
||||
if os.path.isdir(pkg_spec):
|
||||
self.do_raise(msg="parameter 'version' must not be used when installing from a directory")
|
||||
if pkg_spec.endswith('.git'):
|
||||
if version.startswith('~'):
|
||||
self.do_raise(msg="operator '~' not allowed in version parameter when installing from git repository")
|
||||
version = version if version.startswith('@') else '@' + version
|
||||
elif version[0] not in ('@', '~'):
|
||||
version = '~' + version
|
||||
return pkg_spec + version
|
||||
|
||||
def __run__(self):
|
||||
def process(rc, out, err):
|
||||
if self.vars.mode == "compatibility" and rc != 0:
|
||||
self.do_raise(msg=err, cmd=self.vars.cmd_args)
|
||||
return 'is up to date' not in err and 'is up to date' not in out
|
||||
|
||||
runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True)
|
||||
|
||||
v = self.vars
|
||||
pkg_param = 'from_path' if v.from_path else 'name'
|
||||
|
||||
if v.mode == 'compatibility':
|
||||
if self._is_package_installed(v.name, v.locallib, v.version):
|
||||
return
|
||||
pkg_spec = v[pkg_param]
|
||||
else:
|
||||
installed = self._is_package_installed(v.name_check, v.locallib, v.version) if v.name_check else False
|
||||
if installed:
|
||||
return
|
||||
pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version)
|
||||
|
||||
with runner(['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', 'pkg_spec'], output_process=process) as ctx:
|
||||
self.changed = ctx.run(pkg_spec=pkg_spec)
|
||||
|
||||
|
||||
def main():
|
||||
CPANMinus.execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,424 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017, Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Cronvar Plugin: The goal of this plugin is to provide an idempotent
|
||||
# method for set cron variable values. It should play well with the
|
||||
# existing cron module as well as allow for manually added variables.
|
||||
# Each variable entered will be preceded with a comment describing the
|
||||
# variable so that it can be found later. This is required to be
|
||||
# present in order for this plugin to find/modify the variable
|
||||
|
||||
# This module is based on the crontab module.
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: cronvar
|
||||
short_description: Manage variables in crontabs
|
||||
description:
|
||||
- Use this module to manage crontab variables.
|
||||
- This module allows you to create, update, or delete cron variable definitions.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the crontab variable.
|
||||
type: str
|
||||
required: true
|
||||
value:
|
||||
description:
|
||||
- The value to set this variable to.
|
||||
- Required if I(state=present).
|
||||
type: str
|
||||
insertafter:
|
||||
description:
|
||||
- If specified, the variable will be inserted after the variable specified.
|
||||
- Used with I(state=present).
|
||||
type: str
|
||||
insertbefore:
|
||||
description:
|
||||
- Used with I(state=present). If specified, the variable will be inserted
|
||||
just before the variable specified.
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether to ensure that the variable is present or absent.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
user:
|
||||
description:
|
||||
- The specific user whose crontab should be modified.
|
||||
- This parameter defaults to C(root) when unset.
|
||||
type: str
|
||||
cron_file:
|
||||
description:
|
||||
- If specified, uses this file instead of an individual user's crontab.
|
||||
- Without a leading C(/), this is assumed to be in I(/etc/cron.d).
|
||||
- With a leading C(/), this is taken as absolute.
|
||||
type: str
|
||||
backup:
|
||||
description:
|
||||
- If set, create a backup of the crontab before it is modified.
|
||||
The location of the backup is returned in the C(backup) variable by this module.
|
||||
type: bool
|
||||
default: false
|
||||
requirements:
|
||||
- cron
|
||||
author:
|
||||
- Doug Luce (@dougluce)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists
|
||||
community.general.cronvar:
|
||||
name: EMAIL
|
||||
value: doug@ansibmod.con.com
|
||||
|
||||
- name: Ensure a variable does not exist. This may remove any variable named "LEGACY"
|
||||
community.general.cronvar:
|
||||
name: LEGACY
|
||||
state: absent
|
||||
|
||||
- name: Add a variable to a file under /etc/cron.d
|
||||
community.general.cronvar:
|
||||
name: LOGFILE
|
||||
value: /var/log/yum-autoupdate.log
|
||||
user: root
|
||||
cron_file: ansible_yum-autoupdate
|
||||
'''
|
||||
|
||||
import os
|
||||
import platform
|
||||
import pwd
|
||||
import re
|
||||
import shlex
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
|
||||
|
||||
class CronVarError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CronVar(object):
|
||||
"""
|
||||
CronVar object to write variables to crontabs.
|
||||
|
||||
user - the user of the crontab (defaults to root)
|
||||
cron_file - a cron file under /etc/cron.d
|
||||
"""
|
||||
|
||||
def __init__(self, module, user=None, cron_file=None):
|
||||
self.module = module
|
||||
self.user = user
|
||||
self.lines = None
|
||||
self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',))
|
||||
self.cron_cmd = self.module.get_bin_path('crontab', required=True)
|
||||
|
||||
if cron_file:
|
||||
self.cron_file = ""
|
||||
if os.path.isabs(cron_file):
|
||||
self.cron_file = cron_file
|
||||
else:
|
||||
self.cron_file = os.path.join('/etc/cron.d', cron_file)
|
||||
else:
|
||||
self.cron_file = None
|
||||
|
||||
self.read()
|
||||
|
||||
def read(self):
|
||||
# Read in the crontab from the system
|
||||
self.lines = []
|
||||
if self.cron_file:
|
||||
# read the cronfile
|
||||
try:
|
||||
f = open(self.cron_file, 'r')
|
||||
self.lines = f.read().splitlines()
|
||||
f.close()
|
||||
except IOError:
|
||||
# cron file does not exist
|
||||
return
|
||||
except Exception:
|
||||
raise CronVarError("Unexpected error:", sys.exc_info()[0])
|
||||
else:
|
||||
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
|
||||
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
|
||||
|
||||
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
|
||||
raise CronVarError("Unable to read crontab")
|
||||
|
||||
lines = out.splitlines()
|
||||
count = 0
|
||||
for l in lines:
|
||||
if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l
|
||||
) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)):
|
||||
self.lines.append(l)
|
||||
count += 1
|
||||
|
||||
def log_message(self, message):
|
||||
self.module.debug('ansible: "%s"' % message)
|
||||
|
||||
def write(self, backup_file=None):
|
||||
"""
|
||||
Write the crontab to the system. Saves all information.
|
||||
"""
|
||||
if backup_file:
|
||||
fileh = open(backup_file, 'w')
|
||||
elif self.cron_file:
|
||||
fileh = open(self.cron_file, 'w')
|
||||
else:
|
||||
filed, path = tempfile.mkstemp(prefix='crontab')
|
||||
fileh = os.fdopen(filed, 'w')
|
||||
|
||||
fileh.write(self.render())
|
||||
fileh.close()
|
||||
|
||||
# return if making a backup
|
||||
if backup_file:
|
||||
return
|
||||
|
||||
# Add the entire crontab back to the user crontab
|
||||
if not self.cron_file:
|
||||
# quoting shell args for now but really this should be two non-shell calls. FIXME
|
||||
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
|
||||
os.unlink(path)
|
||||
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg=err)
|
||||
|
||||
def remove_variable_file(self):
|
||||
try:
|
||||
os.unlink(self.cron_file)
|
||||
return True
|
||||
except OSError:
|
||||
# cron file does not exist
|
||||
return False
|
||||
except Exception:
|
||||
raise CronVarError("Unexpected error:", sys.exc_info()[0])
|
||||
|
||||
def parse_for_var(self, line):
|
||||
lexer = shlex.shlex(line)
|
||||
lexer.wordchars = self.wordchars
|
||||
varname = lexer.get_token()
|
||||
is_env_var = lexer.get_token() == '='
|
||||
value = ''.join(lexer)
|
||||
if is_env_var:
|
||||
return (varname, value)
|
||||
raise CronVarError("Not a variable.")
|
||||
|
||||
def find_variable(self, name):
|
||||
for l in self.lines:
|
||||
try:
|
||||
(varname, value) = self.parse_for_var(l)
|
||||
if varname == name:
|
||||
return value
|
||||
except CronVarError:
|
||||
pass
|
||||
return None
|
||||
|
||||
def get_var_names(self):
|
||||
var_names = []
|
||||
for l in self.lines:
|
||||
try:
|
||||
var_name, dummy = self.parse_for_var(l)
|
||||
var_names.append(var_name)
|
||||
except CronVarError:
|
||||
pass
|
||||
return var_names
|
||||
|
||||
def add_variable(self, name, value, insertbefore, insertafter):
|
||||
if insertbefore is None and insertafter is None:
|
||||
# Add the variable to the top of the file.
|
||||
self.lines.insert(0, "%s=%s" % (name, value))
|
||||
else:
|
||||
newlines = []
|
||||
for l in self.lines:
|
||||
try:
|
||||
varname, dummy = self.parse_for_var(l) # Throws if not a var line
|
||||
if varname == insertbefore:
|
||||
newlines.append("%s=%s" % (name, value))
|
||||
newlines.append(l)
|
||||
elif varname == insertafter:
|
||||
newlines.append(l)
|
||||
newlines.append("%s=%s" % (name, value))
|
||||
else:
|
||||
raise CronVarError # Append.
|
||||
except CronVarError:
|
||||
newlines.append(l)
|
||||
|
||||
self.lines = newlines
|
||||
|
||||
def remove_variable(self, name):
|
||||
self.update_variable(name, None, remove=True)
|
||||
|
||||
def update_variable(self, name, value, remove=False):
|
||||
newlines = []
|
||||
for l in self.lines:
|
||||
try:
|
||||
varname, dummy = self.parse_for_var(l) # Throws if not a var line
|
||||
if varname != name:
|
||||
raise CronVarError # Append.
|
||||
if not remove:
|
||||
newlines.append("%s=%s" % (name, value))
|
||||
except CronVarError:
|
||||
newlines.append(l)
|
||||
|
||||
self.lines = newlines
|
||||
|
||||
def render(self):
|
||||
"""
|
||||
Render a proper crontab
|
||||
"""
|
||||
result = '\n'.join(self.lines)
|
||||
if result and result[-1] not in ['\n', '\r']:
|
||||
result += '\n'
|
||||
return result
|
||||
|
||||
def _read_user_execute(self):
|
||||
"""
|
||||
Returns the command line for reading a crontab
|
||||
"""
|
||||
user = ''
|
||||
|
||||
if self.user:
|
||||
if platform.system() == 'SunOS':
|
||||
return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
|
||||
elif platform.system() == 'AIX':
|
||||
return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
|
||||
elif platform.system() == 'HP-UX':
|
||||
return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
|
||||
elif pwd.getpwuid(os.getuid())[0] != self.user:
|
||||
user = '-u %s' % shlex_quote(self.user)
|
||||
return "%s %s %s" % (self.cron_cmd, user, '-l')
|
||||
|
||||
def _write_execute(self, path):
|
||||
"""
|
||||
Return the command line for writing a crontab
|
||||
"""
|
||||
user = ''
|
||||
if self.user:
|
||||
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
|
||||
return "chown %s %s ; su '%s' -c '%s %s'" % (
|
||||
shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
|
||||
elif pwd.getpwuid(os.getuid())[0] != self.user:
|
||||
user = '-u %s' % shlex_quote(self.user)
|
||||
return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
|
||||
|
||||
|
||||
# ==================================================
|
||||
|
||||
def main():
|
||||
# The following example playbooks:
|
||||
#
|
||||
# - community.general.cronvar: name="SHELL" value="/bin/bash"
|
||||
#
|
||||
# - name: Set the email
|
||||
# community.general.cronvar: name="EMAILTO" value="doug@ansibmod.con.com"
|
||||
#
|
||||
# - name: Get rid of the old new host variable
|
||||
# community.general.cronvar: name="NEW_HOST" state=absent
|
||||
#
|
||||
# Would produce:
|
||||
# SHELL = /bin/bash
|
||||
# EMAILTO = doug@ansibmod.con.com
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', required=True),
|
||||
value=dict(type='str'),
|
||||
user=dict(type='str'),
|
||||
cron_file=dict(type='str'),
|
||||
insertafter=dict(type='str'),
|
||||
insertbefore=dict(type='str'),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
backup=dict(type='bool', default=False),
|
||||
),
|
||||
mutually_exclusive=[['insertbefore', 'insertafter']],
|
||||
supports_check_mode=False,
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
value = module.params['value']
|
||||
user = module.params['user']
|
||||
cron_file = module.params['cron_file']
|
||||
insertafter = module.params['insertafter']
|
||||
insertbefore = module.params['insertbefore']
|
||||
state = module.params['state']
|
||||
backup = module.params['backup']
|
||||
ensure_present = state == 'present'
|
||||
|
||||
changed = False
|
||||
res_args = dict()
|
||||
|
||||
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
|
||||
os.umask(int('022', 8))
|
||||
cronvar = CronVar(module, user, cron_file)
|
||||
|
||||
module.debug('cronvar instantiated - name: "%s"' % name)
|
||||
|
||||
# --- user input validation ---
|
||||
|
||||
if name is None and ensure_present:
|
||||
module.fail_json(msg="You must specify 'name' to insert a new cron variable")
|
||||
|
||||
if value is None and ensure_present:
|
||||
module.fail_json(msg="You must specify 'value' to insert a new cron variable")
|
||||
|
||||
if name is None and not ensure_present:
|
||||
module.fail_json(msg="You must specify 'name' to remove a cron variable")
|
||||
|
||||
# if requested make a backup before making a change
|
||||
if backup:
|
||||
dummy, backup_file = tempfile.mkstemp(prefix='cronvar')
|
||||
cronvar.write(backup_file)
|
||||
|
||||
if cronvar.cron_file and not name and not ensure_present:
|
||||
changed = cronvar.remove_job_file()
|
||||
module.exit_json(changed=changed, cron_file=cron_file, state=state)
|
||||
|
||||
old_value = cronvar.find_variable(name)
|
||||
|
||||
if ensure_present:
|
||||
if old_value is None:
|
||||
cronvar.add_variable(name, value, insertbefore, insertafter)
|
||||
changed = True
|
||||
elif old_value != value:
|
||||
cronvar.update_variable(name, value)
|
||||
changed = True
|
||||
else:
|
||||
if old_value is not None:
|
||||
cronvar.remove_variable(name)
|
||||
changed = True
|
||||
|
||||
res_args = {
|
||||
"vars": cronvar.get_var_names(),
|
||||
"changed": changed
|
||||
}
|
||||
|
||||
if changed:
|
||||
cronvar.write()
|
||||
|
||||
# retain the backup only if crontab or cron file have changed
|
||||
if backup:
|
||||
if changed:
|
||||
res_args['backup_file'] = backup_file
|
||||
else:
|
||||
os.unlink(backup_file)
|
||||
|
||||
if cron_file:
|
||||
res_args['cron_file'] = cron_file
|
||||
|
||||
module.exit_json(**res_args)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,355 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2014, Steve <yo@groks.org>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: crypttab
|
||||
short_description: Encrypted Linux block devices
|
||||
description:
|
||||
- Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab).
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
|
||||
optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/)
|
||||
will be stripped from I(name).
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Use I(present) to add a line to C(/etc/crypttab) or update its definition
|
||||
if already present.
|
||||
- Use I(absent) to remove a line with matching I(name).
|
||||
- Use I(opts_present) to add options to those already present; options with
|
||||
different values will be updated.
|
||||
- Use I(opts_absent) to remove options from the existing set.
|
||||
type: str
|
||||
required: true
|
||||
choices: [ absent, opts_absent, opts_present, present ]
|
||||
backing_device:
|
||||
description:
|
||||
- Path to the underlying block device or file, or the UUID of a block-device
|
||||
prefixed with I(UUID=).
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- Encryption password, the path to a file containing the password, or
|
||||
C(-) or unset if the password should be entered at boot.
|
||||
type: path
|
||||
opts:
|
||||
description:
|
||||
- A comma-delimited list of options. See C(crypttab(5) ) for details.
|
||||
type: str
|
||||
path:
|
||||
description:
|
||||
- Path to file to use instead of C(/etc/crypttab).
|
||||
- This might be useful in a chroot environment.
|
||||
type: path
|
||||
default: /etc/crypttab
|
||||
author:
|
||||
- Steve (@groks)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Set the options explicitly a device which must already exist
|
||||
community.general.crypttab:
|
||||
name: luks-home
|
||||
state: present
|
||||
opts: discard,cipher=aes-cbc-essiv:sha256
|
||||
|
||||
- name: Add the 'discard' option to any existing options for all devices
|
||||
community.general.crypttab:
|
||||
name: '{{ item.device }}'
|
||||
state: opts_present
|
||||
opts: discard
|
||||
loop: '{{ ansible_mounts }}'
|
||||
when: "'/dev/mapper/luks-' in {{ item.device }}"
|
||||
'''
|
||||
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']),
|
||||
backing_device=dict(type='str'),
|
||||
password=dict(type='path'),
|
||||
opts=dict(type='str'),
|
||||
path=dict(type='path', default='/etc/crypttab')
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
backing_device = module.params['backing_device']
|
||||
password = module.params['password']
|
||||
opts = module.params['opts']
|
||||
state = module.params['state']
|
||||
path = module.params['path']
|
||||
name = module.params['name']
|
||||
if name.startswith('/dev/mapper/'):
|
||||
name = name[len('/dev/mapper/'):]
|
||||
|
||||
if state != 'absent' and backing_device is None and password is None and opts is None:
|
||||
module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'",
|
||||
**module.params)
|
||||
|
||||
if 'opts' in state and (backing_device is not None or password is not None):
|
||||
module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state,
|
||||
**module.params)
|
||||
|
||||
for arg_name, arg in (('name', name),
|
||||
('backing_device', backing_device),
|
||||
('password', password),
|
||||
('opts', opts)):
|
||||
if (arg is not None and (' ' in arg or '\t' in arg or arg == '')):
|
||||
module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name,
|
||||
**module.params)
|
||||
|
||||
try:
|
||||
crypttab = Crypttab(path)
|
||||
existing_line = crypttab.match(name)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="failed to open and parse crypttab file: %s" % to_native(e),
|
||||
exception=traceback.format_exc(), **module.params)
|
||||
|
||||
if 'present' in state and existing_line is None and backing_device is None:
|
||||
module.fail_json(msg="'backing_device' required to add a new entry",
|
||||
**module.params)
|
||||
|
||||
changed, reason = False, '?'
|
||||
|
||||
if state == 'absent':
|
||||
if existing_line is not None:
|
||||
changed, reason = existing_line.remove()
|
||||
|
||||
elif state == 'present':
|
||||
if existing_line is not None:
|
||||
changed, reason = existing_line.set(backing_device, password, opts)
|
||||
else:
|
||||
changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
|
||||
|
||||
elif state == 'opts_present':
|
||||
if existing_line is not None:
|
||||
changed, reason = existing_line.opts.add(opts)
|
||||
else:
|
||||
changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
|
||||
|
||||
elif state == 'opts_absent':
|
||||
if existing_line is not None:
|
||||
changed, reason = existing_line.opts.remove(opts)
|
||||
|
||||
if changed and not module.check_mode:
|
||||
try:
|
||||
f = open(path, 'wb')
|
||||
f.write(to_bytes(crypttab, errors='surrogate_or_strict'))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
module.exit_json(changed=changed, msg=reason, **module.params)
|
||||
|
||||
|
||||
class Crypttab(object):
|
||||
_lines = []
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
if not os.path.exists(path):
|
||||
if not os.path.exists(os.path.dirname(path)):
|
||||
os.makedirs(os.path.dirname(path))
|
||||
open(path, 'a').close()
|
||||
|
||||
try:
|
||||
f = open(path, 'r')
|
||||
for line in f.readlines():
|
||||
self._lines.append(Line(line))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def add(self, line):
|
||||
self._lines.append(line)
|
||||
return True, 'added line'
|
||||
|
||||
def lines(self):
|
||||
for line in self._lines:
|
||||
if line.valid():
|
||||
yield line
|
||||
|
||||
def match(self, name):
|
||||
for line in self.lines():
|
||||
if line.name == name:
|
||||
return line
|
||||
return None
|
||||
|
||||
def __str__(self):
|
||||
lines = []
|
||||
for line in self._lines:
|
||||
lines.append(str(line))
|
||||
crypttab = '\n'.join(lines)
|
||||
if len(crypttab) == 0:
|
||||
crypttab += '\n'
|
||||
if crypttab[-1] != '\n':
|
||||
crypttab += '\n'
|
||||
return crypttab
|
||||
|
||||
|
||||
class Line(object):
|
||||
def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None):
|
||||
self.line = line
|
||||
self.name = name
|
||||
self.backing_device = backing_device
|
||||
self.password = password
|
||||
self.opts = Options(opts)
|
||||
|
||||
if line is not None:
|
||||
self.line = self.line.rstrip('\n')
|
||||
if self._line_valid(line):
|
||||
self.name, backing_device, password, opts = self._split_line(line)
|
||||
|
||||
self.set(backing_device, password, opts)
|
||||
|
||||
def set(self, backing_device, password, opts):
|
||||
changed = False
|
||||
|
||||
if backing_device is not None and self.backing_device != backing_device:
|
||||
self.backing_device = backing_device
|
||||
changed = True
|
||||
|
||||
if password is not None and self.password != password:
|
||||
self.password = password
|
||||
changed = True
|
||||
|
||||
if opts is not None:
|
||||
opts = Options(opts)
|
||||
if opts != self.opts:
|
||||
self.opts = opts
|
||||
changed = True
|
||||
|
||||
return changed, 'updated line'
|
||||
|
||||
def _line_valid(self, line):
|
||||
if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _split_line(self, line):
|
||||
fields = line.split()
|
||||
try:
|
||||
field2 = fields[2]
|
||||
except IndexError:
|
||||
field2 = None
|
||||
try:
|
||||
field3 = fields[3]
|
||||
except IndexError:
|
||||
field3 = None
|
||||
|
||||
return (fields[0],
|
||||
fields[1],
|
||||
field2,
|
||||
field3)
|
||||
|
||||
def remove(self):
|
||||
self.line, self.name, self.backing_device = '', None, None
|
||||
return True, 'removed line'
|
||||
|
||||
def valid(self):
|
||||
if self.name is not None and self.backing_device is not None:
|
||||
return True
|
||||
return False
|
||||
|
||||
def __str__(self):
|
||||
if self.valid():
|
||||
fields = [self.name, self.backing_device]
|
||||
if self.password is not None or self.opts:
|
||||
if self.password is not None:
|
||||
fields.append(self.password)
|
||||
else:
|
||||
fields.append('none')
|
||||
if self.opts:
|
||||
fields.append(str(self.opts))
|
||||
return ' '.join(fields)
|
||||
return self.line
|
||||
|
||||
|
||||
class Options(dict):
|
||||
"""opts_string looks like: 'discard,foo=bar,baz=greeble' """
|
||||
|
||||
def __init__(self, opts_string):
|
||||
super(Options, self).__init__()
|
||||
self.itemlist = []
|
||||
if opts_string is not None:
|
||||
for opt in opts_string.split(','):
|
||||
kv = opt.split('=')
|
||||
if len(kv) > 1:
|
||||
k, v = (kv[0], kv[1])
|
||||
else:
|
||||
k, v = (kv[0], None)
|
||||
self[k] = v
|
||||
|
||||
def add(self, opts_string):
|
||||
changed = False
|
||||
for k, v in Options(opts_string).items():
|
||||
if k in self:
|
||||
if self[k] != v:
|
||||
changed = True
|
||||
else:
|
||||
changed = True
|
||||
self[k] = v
|
||||
return changed, 'updated options'
|
||||
|
||||
def remove(self, opts_string):
|
||||
changed = False
|
||||
for k in Options(opts_string):
|
||||
if k in self:
|
||||
del self[k]
|
||||
changed = True
|
||||
return changed, 'removed options'
|
||||
|
||||
def keys(self):
|
||||
return self.itemlist
|
||||
|
||||
def values(self):
|
||||
return [self[key] for key in self]
|
||||
|
||||
def items(self):
|
||||
return [(key, self[key]) for key in self]
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.itemlist)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if key not in self:
|
||||
self.itemlist.append(key)
|
||||
super(Options, self).__setitem__(key, value)
|
||||
|
||||
def __delitem__(self, key):
|
||||
self.itemlist.remove(key)
|
||||
super(Options, self).__delitem__(key)
|
||||
|
||||
def __ne__(self, obj):
|
||||
return not (isinstance(obj, Options) and sorted(self.items()) == sorted(obj.items()))
|
||||
|
||||
def __str__(self):
|
||||
ret = []
|
||||
for k, v in self.items():
|
||||
if v is None:
|
||||
ret.append(k)
|
||||
else:
|
||||
ret.append('%s=%s' % (k, v))
|
||||
return ','.join(ret)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,308 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2020, Datadog, Inc
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: datadog_downtime
|
||||
short_description: Manages Datadog downtimes
|
||||
version_added: 2.0.0
|
||||
description:
|
||||
- Manages downtimes within Datadog.
|
||||
- Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/s).
|
||||
author:
|
||||
- Datadog (@Datadog)
|
||||
requirements:
|
||||
- datadog-api-client
|
||||
- Python 3.6+
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Your Datadog API key.
|
||||
required: true
|
||||
type: str
|
||||
api_host:
|
||||
description:
|
||||
- The URL to the Datadog API.
|
||||
- This value can also be set with the C(DATADOG_HOST) environment variable.
|
||||
required: false
|
||||
default: https://api.datadoghq.com
|
||||
type: str
|
||||
app_key:
|
||||
description:
|
||||
- Your Datadog app key.
|
||||
required: true
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The designated state of the downtime.
|
||||
required: false
|
||||
choices: ["present", "absent"]
|
||||
default: present
|
||||
type: str
|
||||
id:
|
||||
description:
|
||||
- The identifier of the downtime.
|
||||
- If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the C(state).
|
||||
- To keep your playbook idempotent, you should save the identifier in a file and read it in a lookup.
|
||||
type: int
|
||||
monitor_tags:
|
||||
description:
|
||||
- A list of monitor tags to which the downtime applies.
|
||||
- The resulting downtime applies to monitors that match ALL provided monitor tags.
|
||||
type: list
|
||||
elements: str
|
||||
scope:
|
||||
description:
|
||||
- A list of scopes to which the downtime applies.
|
||||
- The resulting downtime applies to sources that matches ALL provided scopes.
|
||||
type: list
|
||||
elements: str
|
||||
monitor_id:
|
||||
description:
|
||||
- The ID of the monitor to mute. If not provided, the downtime applies to all monitors.
|
||||
type: int
|
||||
downtime_message:
|
||||
description:
|
||||
- A message to include with notifications for this downtime.
|
||||
- Email notifications can be sent to specific users by using the same "@username" notation as events.
|
||||
type: str
|
||||
start:
|
||||
type: int
|
||||
description:
|
||||
- POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created.
|
||||
end:
|
||||
type: int
|
||||
description:
|
||||
- POSIX timestamp to end the downtime. If not provided, the downtime is in effect until you cancel it.
|
||||
timezone:
|
||||
description:
|
||||
- The timezone for the downtime.
|
||||
type: str
|
||||
rrule:
|
||||
description:
|
||||
- The C(RRULE) standard for defining recurring events.
|
||||
- For example, to have a recurring event on the first day of each month,
|
||||
select a type of rrule and set the C(FREQ) to C(MONTHLY) and C(BYMONTHDAY) to C(1).
|
||||
- Most common rrule options from the iCalendar Spec are supported.
|
||||
- Attributes specifying the duration in C(RRULE) are not supported (e.g. C(DTSTART), C(DTEND), C(DURATION)).
|
||||
type: str
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Create a downtime
|
||||
register: downtime_var
|
||||
community.general.datadog_downtime:
|
||||
state: present
|
||||
monitor_tags:
|
||||
- "foo:bar"
|
||||
downtime_message: "Downtime for foo:bar"
|
||||
scope: "test"
|
||||
api_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
app_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
# Lookup the id in the file and ignore errors if the file doesn't exits, so downtime gets created
|
||||
id: "{{ lookup('file', inventory_hostname ~ '_downtime_id.txt', errors='ignore') }}"
|
||||
- name: Save downtime id to file for later updates and idempotence
|
||||
delegate_to: localhost
|
||||
copy:
|
||||
content: "{{ downtime.downtime.id }}"
|
||||
dest: "{{ inventory_hostname ~ '_downtime_id.txt' }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
# Returns the downtime JSON dictionary from the API response under the C(downtime) key.
|
||||
# See https://docs.datadoghq.com/api/v1/downtimes/#schedule-a-downtime for more details.
|
||||
downtime:
|
||||
description: The downtime returned by the API.
|
||||
type: dict
|
||||
returned: always
|
||||
sample: {
|
||||
"active": true,
|
||||
"canceled": null,
|
||||
"creator_id": 1445416,
|
||||
"disabled": false,
|
||||
"downtime_type": 2,
|
||||
"end": null,
|
||||
"id": 1055751000,
|
||||
"message": "Downtime for foo:bar",
|
||||
"monitor_id": null,
|
||||
"monitor_tags": [
|
||||
"foo:bar"
|
||||
],
|
||||
"parent_id": null,
|
||||
"recurrence": null,
|
||||
"scope": [
|
||||
"test"
|
||||
],
|
||||
"start": 1607015009,
|
||||
"timezone": "UTC",
|
||||
"updater_id": null
|
||||
}
|
||||
"""
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
# Import Datadog
|
||||
|
||||
DATADOG_IMP_ERR = None
|
||||
HAS_DATADOG = True
|
||||
try:
|
||||
from datadog_api_client.v1 import Configuration, ApiClient, ApiException
|
||||
from datadog_api_client.v1.api.downtimes_api import DowntimesApi
|
||||
from datadog_api_client.v1.model.downtime import Downtime
|
||||
from datadog_api_client.v1.model.downtime_recurrence import DowntimeRecurrence
|
||||
except ImportError:
|
||||
DATADOG_IMP_ERR = traceback.format_exc()
|
||||
HAS_DATADOG = False
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
api_key=dict(required=True, no_log=True),
|
||||
api_host=dict(required=False, default="https://api.datadoghq.com"),
|
||||
app_key=dict(required=True, no_log=True),
|
||||
state=dict(required=False, choices=["present", "absent"], default="present"),
|
||||
monitor_tags=dict(required=False, type="list", elements="str"),
|
||||
scope=dict(required=False, type="list", elements="str"),
|
||||
monitor_id=dict(required=False, type="int"),
|
||||
downtime_message=dict(required=False, no_log=True),
|
||||
start=dict(required=False, type="int"),
|
||||
end=dict(required=False, type="int"),
|
||||
timezone=dict(required=False, type="str"),
|
||||
rrule=dict(required=False, type="str"),
|
||||
id=dict(required=False, type="int"),
|
||||
)
|
||||
)
|
||||
|
||||
# Prepare Datadog
|
||||
if not HAS_DATADOG:
|
||||
module.fail_json(msg=missing_required_lib("datadog-api-client"), exception=DATADOG_IMP_ERR)
|
||||
|
||||
configuration = Configuration(
|
||||
host=module.params["api_host"],
|
||||
api_key={
|
||||
"apiKeyAuth": module.params["api_key"],
|
||||
"appKeyAuth": module.params["app_key"]
|
||||
}
|
||||
)
|
||||
with ApiClient(configuration) as api_client:
|
||||
api_client.user_agent = "ansible_collection/community_general (module_name datadog_downtime) {0}".format(
|
||||
api_client.user_agent
|
||||
)
|
||||
api_instance = DowntimesApi(api_client)
|
||||
|
||||
# Validate api and app keys
|
||||
try:
|
||||
api_instance.list_downtimes(current_only=True)
|
||||
except ApiException as e:
|
||||
module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key: {0}".format(e))
|
||||
|
||||
if module.params["state"] == "present":
|
||||
schedule_downtime(module, api_client)
|
||||
elif module.params["state"] == "absent":
|
||||
cancel_downtime(module, api_client)
|
||||
|
||||
|
||||
def _get_downtime(module, api_client):
|
||||
api = DowntimesApi(api_client)
|
||||
downtime = None
|
||||
if module.params["id"]:
|
||||
try:
|
||||
downtime = api.get_downtime(module.params["id"])
|
||||
except ApiException as e:
|
||||
module.fail_json(msg="Failed to retrieve downtime with id {0}: {1}".format(module.params["id"], e))
|
||||
return downtime
|
||||
|
||||
|
||||
def build_downtime(module):
|
||||
downtime = Downtime()
|
||||
if module.params["monitor_tags"]:
|
||||
downtime.monitor_tags = module.params["monitor_tags"]
|
||||
if module.params["scope"]:
|
||||
downtime.scope = module.params["scope"]
|
||||
if module.params["monitor_id"]:
|
||||
downtime.monitor_id = module.params["monitor_id"]
|
||||
if module.params["downtime_message"]:
|
||||
downtime.message = module.params["downtime_message"]
|
||||
if module.params["start"]:
|
||||
downtime.start = module.params["start"]
|
||||
if module.params["end"]:
|
||||
downtime.end = module.params["end"]
|
||||
if module.params["timezone"]:
|
||||
downtime.timezone = module.params["timezone"]
|
||||
if module.params["rrule"]:
|
||||
downtime.recurrence = DowntimeRecurrence(
|
||||
rrule=module.params["rrule"]
|
||||
)
|
||||
return downtime
|
||||
|
||||
|
||||
def _post_downtime(module, api_client):
|
||||
api = DowntimesApi(api_client)
|
||||
downtime = build_downtime(module)
|
||||
try:
|
||||
resp = api.create_downtime(downtime)
|
||||
module.params["id"] = resp.id
|
||||
module.exit_json(changed=True, downtime=resp.to_dict())
|
||||
except ApiException as e:
|
||||
module.fail_json(msg="Failed to create downtime: {0}".format(e))
|
||||
|
||||
|
||||
def _equal_dicts(a, b, ignore_keys):
|
||||
ka = set(a).difference(ignore_keys)
|
||||
kb = set(b).difference(ignore_keys)
|
||||
return ka == kb and all(a[k] == b[k] for k in ka)
|
||||
|
||||
|
||||
def _update_downtime(module, current_downtime, api_client):
|
||||
api = DowntimesApi(api_client)
|
||||
downtime = build_downtime(module)
|
||||
try:
|
||||
if current_downtime.disabled:
|
||||
resp = api.create_downtime(downtime)
|
||||
else:
|
||||
resp = api.update_downtime(module.params["id"], downtime)
|
||||
if _equal_dicts(
|
||||
resp.to_dict(),
|
||||
current_downtime.to_dict(),
|
||||
["active", "creator_id", "updater_id"]
|
||||
):
|
||||
module.exit_json(changed=False, downtime=resp.to_dict())
|
||||
else:
|
||||
module.exit_json(changed=True, downtime=resp.to_dict())
|
||||
except ApiException as e:
|
||||
module.fail_json(msg="Failed to update downtime: {0}".format(e))
|
||||
|
||||
|
||||
def schedule_downtime(module, api_client):
|
||||
downtime = _get_downtime(module, api_client)
|
||||
if downtime is None:
|
||||
_post_downtime(module, api_client)
|
||||
else:
|
||||
_update_downtime(module, downtime, api_client)
|
||||
|
||||
|
||||
def cancel_downtime(module, api_client):
|
||||
downtime = _get_downtime(module, api_client)
|
||||
api = DowntimesApi(api_client)
|
||||
if downtime is None:
|
||||
module.exit_json(changed=False)
|
||||
try:
|
||||
api.cancel_downtime(downtime["id"])
|
||||
except ApiException as e:
|
||||
module.fail_json(msg="Failed to create downtime: {0}".format(e))
|
||||
|
||||
module.exit_json(changed=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Author: Artūras 'arturaz' Šlajus <x11@arturaz.net>
|
||||
# Author: Naoya Nakazawa <naoya.n@gmail.com>
|
||||
#
|
||||
# This module is proudly sponsored by iGeolise (www.igeolise.com) and
|
||||
# Tiny Lab Productions (www.tinylabproductions.com).
|
||||
# Copyright (c) Ansible project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: datadog_event
|
||||
short_description: Posts events to Datadog service
|
||||
description:
|
||||
- "Allows to post events to Datadog (www.datadoghq.com) service."
|
||||
- "Uses http://docs.datadoghq.com/api/#events API."
|
||||
author:
|
||||
- "Artūras 'arturaz' Šlajus (@arturaz)"
|
||||
- "Naoya Nakazawa (@n0ts)"
|
||||
options:
|
||||
api_key:
|
||||
type: str
|
||||
description: ["Your DataDog API key."]
|
||||
required: true
|
||||
app_key:
|
||||
type: str
|
||||
description: ["Your DataDog app key."]
|
||||
required: true
|
||||
title:
|
||||
type: str
|
||||
description: ["The event title."]
|
||||
required: true
|
||||
text:
|
||||
type: str
|
||||
description: ["The body of the event."]
|
||||
required: true
|
||||
date_happened:
|
||||
type: int
|
||||
description:
|
||||
- POSIX timestamp of the event.
|
||||
- Default value is now.
|
||||
priority:
|
||||
type: str
|
||||
description: ["The priority of the event."]
|
||||
default: normal
|
||||
choices: [normal, low]
|
||||
host:
|
||||
type: str
|
||||
description:
|
||||
- Host name to associate with the event.
|
||||
- If not specified, it defaults to the remote system's hostname.
|
||||
api_host:
|
||||
type: str
|
||||
description:
|
||||
- DataDog API endpoint URL.
|
||||
version_added: '3.3.0'
|
||||
tags:
|
||||
type: list
|
||||
elements: str
|
||||
description: ["Comma separated list of tags to apply to the event."]
|
||||
alert_type:
|
||||
type: str
|
||||
description: ["Type of alert."]
|
||||
default: info
|
||||
choices: ['error', 'warning', 'info', 'success']
|
||||
aggregation_key:
|
||||
type: str
|
||||
description: ["An arbitrary string to use for aggregation."]
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(false), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
type: bool
|
||||
default: true
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Post an event with low priority
|
||||
community.general.datadog_event:
|
||||
title: Testing from ansible
|
||||
text: Test
|
||||
priority: low
|
||||
api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
|
||||
app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
|
||||
|
||||
- name: Post an event with several tags
|
||||
community.general.datadog_event:
|
||||
title: Testing from ansible
|
||||
text: Test
|
||||
api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
|
||||
app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
|
||||
tags: 'aa,bb,#host:{{ inventory_hostname }}'
|
||||
|
||||
- name: Post an event with several tags to another endpoint
|
||||
community.general.datadog_event:
|
||||
title: Testing from ansible
|
||||
text: Test
|
||||
api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
|
||||
app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
|
||||
api_host: 'https://example.datadoghq.eu'
|
||||
tags:
|
||||
- aa
|
||||
- b
|
||||
- '#host:{{ inventory_hostname }}'
|
||||
|
||||
'''
|
||||
|
||||
import platform
|
||||
import traceback
|
||||
|
||||
# Import Datadog
|
||||
DATADOG_IMP_ERR = None
|
||||
try:
|
||||
from datadog import initialize, api
|
||||
HAS_DATADOG = True
|
||||
except Exception:
|
||||
DATADOG_IMP_ERR = traceback.format_exc()
|
||||
HAS_DATADOG = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
api_key=dict(required=True, no_log=True),
|
||||
app_key=dict(required=True, no_log=True),
|
||||
api_host=dict(type='str'),
|
||||
title=dict(required=True),
|
||||
text=dict(required=True),
|
||||
date_happened=dict(type='int'),
|
||||
priority=dict(default='normal', choices=['normal', 'low']),
|
||||
host=dict(),
|
||||
tags=dict(type='list', elements='str'),
|
||||
alert_type=dict(default='info', choices=['error', 'warning', 'info', 'success']),
|
||||
aggregation_key=dict(no_log=False),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
)
|
||||
)
|
||||
|
||||
# Prepare Datadog
|
||||
if not HAS_DATADOG:
|
||||
module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR)
|
||||
|
||||
options = {
|
||||
'api_key': module.params['api_key'],
|
||||
'app_key': module.params['app_key'],
|
||||
}
|
||||
if module.params['api_host'] is not None:
|
||||
options['api_host'] = module.params['api_host']
|
||||
|
||||
initialize(**options)
|
||||
|
||||
_post_event(module)
|
||||
|
||||
|
||||
def _post_event(module):
|
||||
try:
|
||||
if module.params['host'] is None:
|
||||
module.params['host'] = platform.node().split('.')[0]
|
||||
msg = api.Event.create(title=module.params['title'],
|
||||
text=module.params['text'],
|
||||
host=module.params['host'],
|
||||
tags=module.params['tags'],
|
||||
priority=module.params['priority'],
|
||||
alert_type=module.params['alert_type'],
|
||||
aggregation_key=module.params['aggregation_key'],
|
||||
source_type_name='ansible')
|
||||
if msg['status'] != 'ok':
|
||||
module.fail_json(msg=msg)
|
||||
|
||||
module.exit_json(changed=True, msg=msg)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,421 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: datadog_monitor
|
||||
short_description: Manages Datadog monitors
|
||||
description:
|
||||
- Manages monitors within Datadog.
|
||||
- Options as described on https://docs.datadoghq.com/api/.
|
||||
- The type C(event-v2) was added in community.general 4.8.0.
|
||||
author: Sebastian Kornehl (@skornehl)
|
||||
requirements: [datadog]
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Your Datadog API key.
|
||||
required: true
|
||||
type: str
|
||||
api_host:
|
||||
description:
|
||||
- The URL to the Datadog API. Default value is C(https://api.datadoghq.com).
|
||||
- This value can also be set with the C(DATADOG_HOST) environment variable.
|
||||
required: false
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
app_key:
|
||||
description:
|
||||
- Your Datadog app key.
|
||||
required: true
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- The designated state of the monitor.
|
||||
required: true
|
||||
choices: ['present', 'absent', 'mute', 'unmute']
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- A list of tags to associate with your monitor when creating or updating.
|
||||
- This can help you categorize and filter monitors.
|
||||
type: list
|
||||
elements: str
|
||||
type:
|
||||
description:
|
||||
- The type of the monitor.
|
||||
- The types C(query alert), C(trace-analytics alert) and C(rum alert) were added in community.general 2.1.0.
|
||||
- The type C(composite) was added in community.general 3.4.0.
|
||||
choices:
|
||||
- metric alert
|
||||
- service check
|
||||
- event alert
|
||||
- event-v2 alert
|
||||
- process alert
|
||||
- log alert
|
||||
- query alert
|
||||
- trace-analytics alert
|
||||
- rum alert
|
||||
- composite
|
||||
type: str
|
||||
query:
|
||||
description:
|
||||
- The monitor query to notify on.
|
||||
- Syntax varies depending on what type of monitor you are creating.
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name of the alert.
|
||||
required: true
|
||||
type: str
|
||||
notification_message:
|
||||
description:
|
||||
- A message to include with notifications for this monitor.
|
||||
- Email notifications can be sent to specific users by using the same '@username' notation as events.
|
||||
- Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'.
|
||||
type: str
|
||||
silenced:
|
||||
type: dict
|
||||
description:
|
||||
- Dictionary of scopes to silence, with timestamps or None.
|
||||
- Each scope will be muted until the given POSIX timestamp or forever if the value is None.
|
||||
notify_no_data:
|
||||
description:
|
||||
- Whether this monitor will notify when data stops reporting.
|
||||
type: bool
|
||||
default: false
|
||||
no_data_timeframe:
|
||||
description:
|
||||
- The number of minutes before a monitor will notify when data stops reporting.
|
||||
- Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks.
|
||||
- If not specified, it defaults to 2x timeframe for metric, 2 minutes for service.
|
||||
type: str
|
||||
timeout_h:
|
||||
description:
|
||||
- The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state.
|
||||
type: str
|
||||
renotify_interval:
|
||||
description:
|
||||
- The number of minutes after the last notification before a monitor will re-notify on the current status.
|
||||
- It will only re-notify if it is not resolved.
|
||||
type: str
|
||||
escalation_message:
|
||||
description:
|
||||
- A message to include with a re-notification. Supports the '@username' notification we allow elsewhere.
|
||||
- Not applicable if I(renotify_interval=None).
|
||||
type: str
|
||||
notify_audit:
|
||||
description:
|
||||
- Whether tagged users will be notified on changes to this monitor.
|
||||
type: bool
|
||||
default: false
|
||||
thresholds:
|
||||
type: dict
|
||||
description:
|
||||
- A dictionary of thresholds by status.
|
||||
- Only available for service checks and metric alerts.
|
||||
- Because each of them can have multiple thresholds, we do not define them directly in the query.
|
||||
- "If not specified, it defaults to: C({'ok': 1, 'critical': 1, 'warning': 1})."
|
||||
locked:
|
||||
description:
|
||||
- Whether changes to this monitor should be restricted to the creator or admins.
|
||||
type: bool
|
||||
default: false
|
||||
require_full_window:
|
||||
description:
|
||||
- Whether this monitor needs a full window of data before it gets evaluated.
|
||||
- We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped.
|
||||
type: bool
|
||||
new_host_delay:
|
||||
description:
|
||||
- A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts.
|
||||
- This gives the host time to fully initialize.
|
||||
type: str
|
||||
evaluation_delay:
|
||||
description:
|
||||
- Time to delay evaluation (in seconds).
|
||||
- Effective for sparse values.
|
||||
type: str
|
||||
id:
|
||||
description:
|
||||
- The ID of the alert.
|
||||
- If set, will be used instead of the name to locate the alert.
|
||||
type: str
|
||||
include_tags:
|
||||
description:
|
||||
- Whether notifications from this monitor automatically inserts its triggering tags into the title.
|
||||
type: bool
|
||||
default: true
|
||||
version_added: 1.3.0
|
||||
priority:
|
||||
description:
|
||||
- Integer from 1 (high) to 5 (low) indicating alert severity.
|
||||
type: int
|
||||
version_added: 4.6.0
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a metric monitor
|
||||
community.general.datadog_monitor:
|
||||
type: "metric alert"
|
||||
name: "Test monitor"
|
||||
state: "present"
|
||||
query: "datadog.agent.up.over('host:host1').last(2).count_by_status()"
|
||||
notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
|
||||
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
|
||||
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
|
||||
|
||||
- name: Deletes a monitor
|
||||
community.general.datadog_monitor:
|
||||
name: "Test monitor"
|
||||
state: "absent"
|
||||
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
|
||||
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
|
||||
|
||||
- name: Mutes a monitor
|
||||
community.general.datadog_monitor:
|
||||
name: "Test monitor"
|
||||
state: "mute"
|
||||
silenced: '{"*":None}'
|
||||
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
|
||||
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
|
||||
|
||||
- name: Unmutes a monitor
|
||||
community.general.datadog_monitor:
|
||||
name: "Test monitor"
|
||||
state: "unmute"
|
||||
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
|
||||
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
|
||||
|
||||
- name: Use datadoghq.eu platform instead of datadoghq.com
|
||||
community.general.datadog_monitor:
|
||||
name: "Test monitor"
|
||||
state: "absent"
|
||||
api_host: https://api.datadoghq.eu
|
||||
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
|
||||
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
|
||||
'''
|
||||
import traceback
|
||||
|
||||
# Import Datadog
|
||||
DATADOG_IMP_ERR = None
|
||||
try:
|
||||
from datadog import initialize, api
|
||||
HAS_DATADOG = True
|
||||
except Exception:
|
||||
DATADOG_IMP_ERR = traceback.format_exc()
|
||||
HAS_DATADOG = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
api_key=dict(required=True, no_log=True),
|
||||
api_host=dict(),
|
||||
app_key=dict(required=True, no_log=True),
|
||||
state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']),
|
||||
type=dict(choices=['metric alert', 'service check', 'event alert', 'event-v2 alert', 'process alert',
|
||||
'log alert', 'query alert', 'trace-analytics alert',
|
||||
'rum alert', 'composite']),
|
||||
name=dict(required=True),
|
||||
query=dict(),
|
||||
notification_message=dict(no_log=True),
|
||||
silenced=dict(type='dict'),
|
||||
notify_no_data=dict(default=False, type='bool'),
|
||||
no_data_timeframe=dict(),
|
||||
timeout_h=dict(),
|
||||
renotify_interval=dict(),
|
||||
escalation_message=dict(),
|
||||
notify_audit=dict(default=False, type='bool'),
|
||||
thresholds=dict(type='dict', default=None),
|
||||
tags=dict(type='list', elements='str', default=None),
|
||||
locked=dict(default=False, type='bool'),
|
||||
require_full_window=dict(type='bool'),
|
||||
new_host_delay=dict(),
|
||||
evaluation_delay=dict(),
|
||||
id=dict(),
|
||||
include_tags=dict(required=False, default=True, type='bool'),
|
||||
priority=dict(type='int'),
|
||||
)
|
||||
)
|
||||
|
||||
# Prepare Datadog
|
||||
if not HAS_DATADOG:
|
||||
module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR)
|
||||
|
||||
options = {
|
||||
'api_key': module.params['api_key'],
|
||||
'api_host': module.params['api_host'],
|
||||
'app_key': module.params['app_key']
|
||||
}
|
||||
|
||||
initialize(**options)
|
||||
|
||||
# Check if api_key and app_key is correct or not
|
||||
# if not, then fail here.
|
||||
response = api.Monitor.get_all()
|
||||
if isinstance(response, dict):
|
||||
msg = response.get('errors', None)
|
||||
if msg:
|
||||
module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key : {0}".format(msg[0]))
|
||||
|
||||
if module.params['state'] == 'present':
|
||||
install_monitor(module)
|
||||
elif module.params['state'] == 'absent':
|
||||
delete_monitor(module)
|
||||
elif module.params['state'] == 'mute':
|
||||
mute_monitor(module)
|
||||
elif module.params['state'] == 'unmute':
|
||||
unmute_monitor(module)
|
||||
|
||||
|
||||
def _fix_template_vars(message):
|
||||
if message:
|
||||
return message.replace('[[', '{{').replace(']]', '}}')
|
||||
return message
|
||||
|
||||
|
||||
def _get_monitor(module):
|
||||
if module.params['id'] is not None:
|
||||
monitor = api.Monitor.get(module.params['id'])
|
||||
if 'errors' in monitor:
|
||||
module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors'])))
|
||||
return monitor
|
||||
else:
|
||||
monitors = api.Monitor.get_all()
|
||||
for monitor in monitors:
|
||||
if monitor['name'] == _fix_template_vars(module.params['name']):
|
||||
return monitor
|
||||
return {}
|
||||
|
||||
|
||||
def _post_monitor(module, options):
|
||||
try:
|
||||
kwargs = dict(type=module.params['type'], query=module.params['query'],
|
||||
name=_fix_template_vars(module.params['name']),
|
||||
message=_fix_template_vars(module.params['notification_message']),
|
||||
escalation_message=_fix_template_vars(module.params['escalation_message']),
|
||||
priority=module.params['priority'],
|
||||
options=options)
|
||||
if module.params['tags'] is not None:
|
||||
kwargs['tags'] = module.params['tags']
|
||||
msg = api.Monitor.create(**kwargs)
|
||||
if 'errors' in msg:
|
||||
module.fail_json(msg=str(msg['errors']))
|
||||
else:
|
||||
module.exit_json(changed=True, msg=msg)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
def _equal_dicts(a, b, ignore_keys):
|
||||
ka = set(a).difference(ignore_keys)
|
||||
kb = set(b).difference(ignore_keys)
|
||||
return ka == kb and all(a[k] == b[k] for k in ka)
|
||||
|
||||
|
||||
def _update_monitor(module, monitor, options):
|
||||
try:
|
||||
kwargs = dict(id=monitor['id'], query=module.params['query'],
|
||||
name=_fix_template_vars(module.params['name']),
|
||||
message=_fix_template_vars(module.params['notification_message']),
|
||||
escalation_message=_fix_template_vars(module.params['escalation_message']),
|
||||
priority=module.params['priority'],
|
||||
options=options)
|
||||
if module.params['tags'] is not None:
|
||||
kwargs['tags'] = module.params['tags']
|
||||
msg = api.Monitor.update(**kwargs)
|
||||
|
||||
if 'errors' in msg:
|
||||
module.fail_json(msg=str(msg['errors']))
|
||||
elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']):
|
||||
module.exit_json(changed=False, msg=msg)
|
||||
else:
|
||||
module.exit_json(changed=True, msg=msg)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
def install_monitor(module):
|
||||
options = {
|
||||
"silenced": module.params['silenced'],
|
||||
"notify_no_data": module.boolean(module.params['notify_no_data']),
|
||||
"no_data_timeframe": module.params['no_data_timeframe'],
|
||||
"timeout_h": module.params['timeout_h'],
|
||||
"renotify_interval": module.params['renotify_interval'],
|
||||
"escalation_message": module.params['escalation_message'],
|
||||
"notify_audit": module.boolean(module.params['notify_audit']),
|
||||
"locked": module.boolean(module.params['locked']),
|
||||
"require_full_window": module.params['require_full_window'],
|
||||
"new_host_delay": module.params['new_host_delay'],
|
||||
"evaluation_delay": module.params['evaluation_delay'],
|
||||
"include_tags": module.params['include_tags'],
|
||||
}
|
||||
|
||||
if module.params['type'] == "service check":
|
||||
options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
|
||||
if module.params['type'] in ["metric alert", "log alert", "query alert", "trace-analytics alert", "rum alert"] and module.params['thresholds'] is not None:
|
||||
options["thresholds"] = module.params['thresholds']
|
||||
|
||||
monitor = _get_monitor(module)
|
||||
if not monitor:
|
||||
_post_monitor(module, options)
|
||||
else:
|
||||
_update_monitor(module, monitor, options)
|
||||
|
||||
|
||||
def delete_monitor(module):
|
||||
monitor = _get_monitor(module)
|
||||
if not monitor:
|
||||
module.exit_json(changed=False)
|
||||
try:
|
||||
msg = api.Monitor.delete(monitor['id'])
|
||||
module.exit_json(changed=True, msg=msg)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
def mute_monitor(module):
|
||||
monitor = _get_monitor(module)
|
||||
if not monitor:
|
||||
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
|
||||
elif monitor['options']['silenced']:
|
||||
module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
|
||||
elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0):
|
||||
module.exit_json(changed=False)
|
||||
try:
|
||||
if module.params['silenced'] is None or module.params['silenced'] == "":
|
||||
msg = api.Monitor.mute(id=monitor['id'])
|
||||
else:
|
||||
msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
|
||||
module.exit_json(changed=True, msg=msg)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
def unmute_monitor(module):
|
||||
monitor = _get_monitor(module)
|
||||
if not monitor:
|
||||
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
|
||||
elif not monitor['options']['silenced']:
|
||||
module.exit_json(changed=False)
|
||||
try:
|
||||
msg = api.Monitor.unmute(monitor['id'])
|
||||
module.exit_json(changed=True, msg=msg)
|
||||
except Exception as e:
|
||||
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,388 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017, Branko Majic <branko@majic.rs>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
module: dconf
|
||||
author:
|
||||
- "Branko Majic (@azaghal)"
|
||||
short_description: Modify and read dconf database
|
||||
description:
|
||||
- This module allows modifications and reading of C(dconf) database. The module
|
||||
is implemented as a wrapper around C(dconf) tool. Please see the dconf(1) man
|
||||
page for more details.
|
||||
- Since C(dconf) requires a running D-Bus session to change values, the module
|
||||
will try to detect an existing session and reuse it, or run the tool via
|
||||
C(dbus-run-session).
|
||||
notes:
|
||||
- This module depends on C(psutil) Python library (version 4.0.0 and upwards),
|
||||
C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on
|
||||
distribution you are using, you may need to install additional packages to
|
||||
have these available.
|
||||
- Detection of existing, running D-Bus session, required to change settings
|
||||
via C(dconf), is not 100% reliable due to implementation details of D-Bus
|
||||
daemon itself. This might lead to running applications not picking-up
|
||||
changes on the fly if options are changed via Ansible and
|
||||
C(dbus-run-session).
|
||||
- Keep in mind that the C(dconf) CLI tool, which this module wraps around,
|
||||
utilises an unusual syntax for the values (GVariant). For example, if you
|
||||
wanted to provide a string value, the correct syntax would be
|
||||
I(value="'myvalue'") - with single quotes as part of the Ansible parameter
|
||||
value.
|
||||
- When using loops in combination with a value like
|
||||
:code:`"[('xkb', 'us'), ('xkb', 'se')]"`, you need to be aware of possible
|
||||
type conversions. Applying a filter :code:`"{{ item.value | string }}"`
|
||||
to the parameter variable can avoid potential conversion problems.
|
||||
- The easiest way to figure out exact syntax/value you need to provide for a
|
||||
key is by making the configuration change in application affected by the
|
||||
key, and then having a look at value set via commands C(dconf dump
|
||||
/path/to/dir/) or C(dconf read /path/to/key).
|
||||
options:
|
||||
key:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- A dconf key to modify or read from the dconf database.
|
||||
value:
|
||||
type: str
|
||||
required: false
|
||||
description:
|
||||
- Value to set for the specified dconf key. Value should be specified in
|
||||
GVariant format. Due to complexity of this format, it is best to have a
|
||||
look at existing values in the dconf database.
|
||||
- Required for I(state=present).
|
||||
state:
|
||||
type: str
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'read', 'present', 'absent' ]
|
||||
description:
|
||||
- The action to take upon the key/value.
|
||||
'''
|
||||
|
||||
RETURN = r"""
|
||||
value:
|
||||
description: value associated with the requested key
|
||||
returned: success, state was "read"
|
||||
type: str
|
||||
sample: "'Default'"
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
- name: Configure available keyboard layouts in Gnome
|
||||
community.general.dconf:
|
||||
key: "/org/gnome/desktop/input-sources/sources"
|
||||
value: "[('xkb', 'us'), ('xkb', 'se')]"
|
||||
state: present
|
||||
|
||||
- name: Read currently available keyboard layouts in Gnome
|
||||
community.general.dconf:
|
||||
key: "/org/gnome/desktop/input-sources/sources"
|
||||
state: read
|
||||
register: keyboard_layouts
|
||||
|
||||
- name: Reset the available keyboard layouts in Gnome
|
||||
community.general.dconf:
|
||||
key: "/org/gnome/desktop/input-sources/sources"
|
||||
state: absent
|
||||
|
||||
- name: Configure available keyboard layouts in Cinnamon
|
||||
community.general.dconf:
|
||||
key: "/org/gnome/libgnomekbd/keyboard/layouts"
|
||||
value: "['us', 'se']"
|
||||
state: present
|
||||
|
||||
- name: Read currently available keyboard layouts in Cinnamon
|
||||
community.general.dconf:
|
||||
key: "/org/gnome/libgnomekbd/keyboard/layouts"
|
||||
state: read
|
||||
register: keyboard_layouts
|
||||
|
||||
- name: Reset the available keyboard layouts in Cinnamon
|
||||
community.general.dconf:
|
||||
key: "/org/gnome/libgnomekbd/keyboard/layouts"
|
||||
state: absent
|
||||
|
||||
- name: Disable desktop effects in Cinnamon
|
||||
community.general.dconf:
|
||||
key: "/org/cinnamon/desktop-effects"
|
||||
value: "false"
|
||||
state: present
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import traceback
|
||||
|
||||
PSUTIL_IMP_ERR = None
|
||||
try:
|
||||
import psutil
|
||||
HAS_PSUTIL = True
|
||||
except ImportError:
|
||||
PSUTIL_IMP_ERR = traceback.format_exc()
|
||||
HAS_PSUTIL = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
class DBusWrapper(object):
|
||||
"""
|
||||
Helper class that can be used for running a command with a working D-Bus
|
||||
session.
|
||||
|
||||
If possible, command will be run against an existing D-Bus session,
|
||||
otherwise the session will be spawned via dbus-run-session.
|
||||
|
||||
Example usage:
|
||||
|
||||
dbus_wrapper = DBusWrapper(ansible_module)
|
||||
dbus_wrapper.run_command(["printenv", "DBUS_SESSION_BUS_ADDRESS"])
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
"""
|
||||
Initialises an instance of the class.
|
||||
|
||||
:param module: Ansible module instance used to signal failures and run commands.
|
||||
:type module: AnsibleModule
|
||||
"""
|
||||
|
||||
# Store passed-in arguments and set-up some defaults.
|
||||
self.module = module
|
||||
|
||||
# Try to extract existing D-Bus session address.
|
||||
self.dbus_session_bus_address = self._get_existing_dbus_session()
|
||||
|
||||
# If no existing D-Bus session was detected, check if dbus-run-session
|
||||
# is available.
|
||||
if self.dbus_session_bus_address is None:
|
||||
self.dbus_run_session_cmd = self.module.get_bin_path('dbus-run-session', required=True)
|
||||
|
||||
def _get_existing_dbus_session(self):
|
||||
"""
|
||||
Detects and returns an existing D-Bus session bus address.
|
||||
|
||||
:returns: string -- D-Bus session bus address. If a running D-Bus session was not detected, returns None.
|
||||
"""
|
||||
|
||||
# We'll be checking the processes of current user only.
|
||||
uid = os.getuid()
|
||||
|
||||
# Go through all the pids for this user, try to extract the D-Bus
|
||||
# session bus address from environment, and ensure it is possible to
|
||||
# connect to it.
|
||||
self.module.debug("Trying to detect existing D-Bus user session for user: %d" % uid)
|
||||
|
||||
for pid in psutil.pids():
|
||||
try:
|
||||
process = psutil.Process(pid)
|
||||
process_real_uid, dummy, dummy = process.uids()
|
||||
if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ():
|
||||
dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS']
|
||||
self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate)
|
||||
dbus_send_cmd = self.module.get_bin_path('dbus-send', required=True)
|
||||
command = [dbus_send_cmd, '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test']
|
||||
rc, dummy, dummy = self.module.run_command(command)
|
||||
|
||||
if rc == 0:
|
||||
self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate)
|
||||
|
||||
return dbus_session_bus_address_candidate
|
||||
|
||||
# This can happen with things like SSH sessions etc.
|
||||
except psutil.AccessDenied:
|
||||
pass
|
||||
# Process has disappeared while inspecting it
|
||||
except psutil.NoSuchProcess:
|
||||
pass
|
||||
|
||||
self.module.debug("Failed to find running D-Bus user session, will use dbus-run-session")
|
||||
|
||||
return None
|
||||
|
||||
def run_command(self, command):
|
||||
"""
|
||||
Runs the specified command within a functional D-Bus session. Command is
|
||||
effectively passed-on to AnsibleModule.run_command() method, with
|
||||
modification for using dbus-run-session if necessary.
|
||||
|
||||
:param command: Command to run, including parameters. Each element of the list should be a string.
|
||||
:type module: list
|
||||
|
||||
:returns: tuple(result_code, standard_output, standard_error) -- Result code, standard output, and standard error from running the command.
|
||||
"""
|
||||
|
||||
if self.dbus_session_bus_address is None:
|
||||
self.module.debug("Using dbus-run-session wrapper for running commands.")
|
||||
command = [self.dbus_run_session_cmd] + command
|
||||
rc, out, err = self.module.run_command(command)
|
||||
|
||||
if self.dbus_session_bus_address is None and rc == 127:
|
||||
self.module.fail_json(msg="Failed to run passed-in command, dbus-run-session faced an internal error: %s" % err)
|
||||
else:
|
||||
extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address}
|
||||
rc, out, err = self.module.run_command(command, environ_update=extra_environment)
|
||||
|
||||
return rc, out, err
|
||||
|
||||
|
||||
class DconfPreference(object):
|
||||
|
||||
def __init__(self, module, check_mode=False):
|
||||
"""
|
||||
Initialises instance of the class.
|
||||
|
||||
:param module: Ansible module instance used to signal failures and run commands.
|
||||
:type module: AnsibleModule
|
||||
|
||||
:param check_mode: Specify whether to only check if a change should be made or if to actually make a change.
|
||||
:type check_mode: bool
|
||||
"""
|
||||
|
||||
self.module = module
|
||||
self.check_mode = check_mode
|
||||
# Check if dconf binary exists
|
||||
self.dconf_bin = self.module.get_bin_path('dconf', required=True)
|
||||
|
||||
def read(self, key):
|
||||
"""
|
||||
Retrieves current value associated with the dconf key.
|
||||
|
||||
If an error occurs, a call will be made to AnsibleModule.fail_json.
|
||||
|
||||
:returns: string -- Value assigned to the provided key. If the value is not set for specified key, returns None.
|
||||
"""
|
||||
command = [self.dconf_bin, "read", key]
|
||||
|
||||
rc, out, err = self.module.run_command(command)
|
||||
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg='dconf failed while reading the value with error: %s' % err,
|
||||
out=out,
|
||||
err=err)
|
||||
|
||||
if out == '':
|
||||
value = None
|
||||
else:
|
||||
value = out.rstrip('\n')
|
||||
|
||||
return value
|
||||
|
||||
def write(self, key, value):
|
||||
"""
|
||||
Writes the value for specified key.
|
||||
|
||||
If an error occurs, a call will be made to AnsibleModule.fail_json.
|
||||
|
||||
:param key: dconf key for which the value should be set. Should be a full path.
|
||||
:type key: str
|
||||
|
||||
:param value: Value to set for the specified dconf key. Should be specified in GVariant format.
|
||||
:type value: str
|
||||
|
||||
:returns: bool -- True if a change was made, False if no change was required.
|
||||
"""
|
||||
# If no change is needed (or won't be done due to check_mode), notify
|
||||
# caller straight away.
|
||||
if value == self.read(key):
|
||||
return False
|
||||
elif self.check_mode:
|
||||
return True
|
||||
|
||||
# Set-up command to run. Since DBus is needed for write operation, wrap
|
||||
# dconf command dbus-launch.
|
||||
command = [self.dconf_bin, "write", key, value]
|
||||
|
||||
# Run the command and fetch standard return code, stdout, and stderr.
|
||||
dbus_wrapper = DBusWrapper(self.module)
|
||||
rc, out, err = dbus_wrapper.run_command(command)
|
||||
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg='dconf failed while write the value with error: %s' % err,
|
||||
out=out,
|
||||
err=err)
|
||||
|
||||
# Value was changed.
|
||||
return True
|
||||
|
||||
def reset(self, key):
|
||||
"""
|
||||
Returns value for the specified key (removes it from user configuration).
|
||||
|
||||
If an error occurs, a call will be made to AnsibleModule.fail_json.
|
||||
|
||||
:param key: dconf key to reset. Should be a full path.
|
||||
:type key: str
|
||||
|
||||
:returns: bool -- True if a change was made, False if no change was required.
|
||||
"""
|
||||
|
||||
# Read the current value first.
|
||||
current_value = self.read(key)
|
||||
|
||||
# No change was needed, key is not set at all, or just notify user if we
|
||||
# are in check mode.
|
||||
if current_value is None:
|
||||
return False
|
||||
elif self.check_mode:
|
||||
return True
|
||||
|
||||
# Set-up command to run. Since DBus is needed for reset operation, wrap
|
||||
# dconf command dbus-launch.
|
||||
command = [self.dconf_bin, "reset", key]
|
||||
|
||||
# Run the command and fetch standard return code, stdout, and stderr.
|
||||
dbus_wrapper = DBusWrapper(self.module)
|
||||
rc, out, err = dbus_wrapper.run_command(command)
|
||||
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err,
|
||||
out=out,
|
||||
err=err)
|
||||
|
||||
# Value was changed.
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
# Setup the Ansible module
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(default='present', choices=['present', 'absent', 'read']),
|
||||
key=dict(required=True, type='str', no_log=False),
|
||||
value=dict(required=False, default=None, type='str'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not HAS_PSUTIL:
|
||||
module.fail_json(msg=missing_required_lib("psutil"), exception=PSUTIL_IMP_ERR)
|
||||
|
||||
# If present state was specified, value must be provided.
|
||||
if module.params['state'] == 'present' and module.params['value'] is None:
|
||||
module.fail_json(msg='State "present" requires "value" to be set.')
|
||||
|
||||
# Create wrapper instance.
|
||||
dconf = DconfPreference(module, module.check_mode)
|
||||
|
||||
# Process based on different states.
|
||||
if module.params['state'] == 'read':
|
||||
value = dconf.read(module.params['key'])
|
||||
module.exit_json(changed=False, value=value)
|
||||
elif module.params['state'] == 'present':
|
||||
changed = dconf.write(module.params['key'], module.params['value'])
|
||||
module.exit_json(changed=changed)
|
||||
elif module.params['state'] == 'absent':
|
||||
changed = dconf.reset(module.params['key'])
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,527 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
|
||||
# Copyright (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: deploy_helper
|
||||
author: "Ramon de la Fuente (@ramondelafuente)"
|
||||
short_description: Manages some of the steps common in deploying projects
|
||||
description:
|
||||
- The Deploy Helper manages some of the steps common in deploying software.
|
||||
It creates a folder structure, manages a symlink for the current release
|
||||
and cleans up old releases.
|
||||
- "Running it with the I(state=query) or I(state=present) will return the C(deploy_helper) fact.
|
||||
C(project_path), whatever you set in the I(path) parameter,
|
||||
C(current_path), the path to the symlink that points to the active release,
|
||||
C(releases_path), the path to the folder to keep releases in,
|
||||
C(shared_path), the path to the folder to keep shared resources in,
|
||||
C(unfinished_filename), the file to check for to recognize unfinished builds,
|
||||
C(previous_release), the release the 'current' symlink is pointing to,
|
||||
C(previous_release_path), the full path to the 'current' symlink target,
|
||||
C(new_release), either the 'release' parameter or a generated timestamp,
|
||||
C(new_release_path), the path to the new release folder (not created by the module)."
|
||||
|
||||
options:
|
||||
path:
|
||||
type: path
|
||||
required: true
|
||||
aliases: ['dest']
|
||||
description:
|
||||
- The root path of the project.
|
||||
Returned in the C(deploy_helper.project_path) fact.
|
||||
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- The state of the project.
|
||||
C(query) will only gather facts,
|
||||
C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
|
||||
C(finalize) will remove the unfinished_filename file, create a symlink to the newly
|
||||
deployed release and optionally clean old releases,
|
||||
C(clean) will remove failed & old releases,
|
||||
C(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with I(state=absent)).
|
||||
choices: [ present, finalize, absent, clean, query ]
|
||||
default: present
|
||||
|
||||
release:
|
||||
type: str
|
||||
description:
|
||||
- The release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
|
||||
This parameter is optional during I(state=present), but needs to be set explicitly for I(state=finalize).
|
||||
You can use the generated fact I(release={{ deploy_helper.new_release }}).
|
||||
|
||||
releases_path:
|
||||
type: str
|
||||
description:
|
||||
- The name of the folder that will hold the releases. This can be relative to I(path) or absolute.
|
||||
Returned in the C(deploy_helper.releases_path) fact.
|
||||
default: releases
|
||||
|
||||
shared_path:
|
||||
type: path
|
||||
description:
|
||||
- The name of the folder that will hold the shared resources. This can be relative to I(path) or absolute.
|
||||
If this is set to an empty string, no shared folder will be created.
|
||||
Returned in the C(deploy_helper.shared_path) fact.
|
||||
default: shared
|
||||
|
||||
current_path:
|
||||
type: path
|
||||
description:
|
||||
- The name of the symlink that is created when the deploy is finalized. Used in I(finalize) and I(clean).
|
||||
Returned in the C(deploy_helper.current_path) fact.
|
||||
default: current
|
||||
|
||||
unfinished_filename:
|
||||
type: str
|
||||
description:
|
||||
- The name of the file that indicates a deploy has not finished. All folders in the I(releases_path) that
|
||||
contain this file will be deleted on I(state=finalize) with I(clean=True), or I(state=clean). This file is
|
||||
automatically deleted from the I(new_release_path) during I(state=finalize).
|
||||
default: DEPLOY_UNFINISHED
|
||||
|
||||
clean:
|
||||
description:
|
||||
- Whether to run the clean procedure in case of I(state=finalize).
|
||||
type: bool
|
||||
default: true
|
||||
|
||||
keep_releases:
|
||||
type: int
|
||||
description:
|
||||
- The number of old releases to keep when cleaning. Used in I(finalize) and I(clean). Any unfinished builds
|
||||
will be deleted first, so only correct releases will count. The current version will not count.
|
||||
default: 5
|
||||
|
||||
notes:
|
||||
- Facts are only returned for I(state=query) and I(state=present). If you use both, you should pass any overridden
|
||||
parameters to both calls, otherwise the second call will overwrite the facts of the first one.
|
||||
- When using I(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
|
||||
new naming strategy without problems.
|
||||
- Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
|
||||
unless you pass your own release name with I(release). Due to the nature of deploying software, this should not
|
||||
be much of a problem.
|
||||
extends_documentation_fragment: files
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
||||
# General explanation, starting with an example folder structure for a project:
|
||||
|
||||
# root:
|
||||
# releases:
|
||||
# - 20140415234508
|
||||
# - 20140415235146
|
||||
# - 20140416082818
|
||||
#
|
||||
# shared:
|
||||
# - sessions
|
||||
# - uploads
|
||||
#
|
||||
# current: releases/20140416082818
|
||||
|
||||
|
||||
# The 'releases' folder holds all the available releases. A release is a complete build of the application being
|
||||
# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
|
||||
# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
|
||||
# git tags or commit hashes.
|
||||
#
|
||||
# During a deploy, a new folder should be created in the releases folder and any build steps required should be
|
||||
# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
|
||||
# with a link to this build.
|
||||
#
|
||||
# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
|
||||
# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
|
||||
# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
|
||||
#
|
||||
# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
|
||||
# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
|
||||
# release is reduced to the time it takes to switch the link.
|
||||
#
|
||||
# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
|
||||
# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
|
||||
# procedure to remove it during cleanup.
|
||||
|
||||
|
||||
# Typical usage
|
||||
- name: Initialize the deploy root and gather facts
|
||||
community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
- name: Clone the project to the new release folder
|
||||
ansible.builtin.git:
|
||||
repo: ansible.builtin.git://foosball.example.org/path/to/repo.git
|
||||
dest: '{{ deploy_helper.new_release_path }}'
|
||||
version: v1.1.1
|
||||
- name: Add an unfinished file, to allow cleanup on successful finalize
|
||||
ansible.builtin.file:
|
||||
path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
|
||||
state: touch
|
||||
- name: Perform some build steps, like running your dependency manager for example
|
||||
composer:
|
||||
command: install
|
||||
working_dir: '{{ deploy_helper.new_release_path }}'
|
||||
- name: Create some folders in the shared folder
|
||||
ansible.builtin.file:
|
||||
path: '{{ deploy_helper.shared_path }}/{{ item }}'
|
||||
state: directory
|
||||
with_items:
|
||||
- sessions
|
||||
- uploads
|
||||
- name: Add symlinks from the new release to the shared folder
|
||||
ansible.builtin.file:
|
||||
path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
|
||||
src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
|
||||
state: link
|
||||
with_items:
|
||||
- path: app/sessions
|
||||
src: sessions
|
||||
- path: web/uploads
|
||||
src: uploads
|
||||
- name: Finalize the deploy, removing the unfinished file and switching the symlink
|
||||
community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
release: '{{ deploy_helper.new_release }}'
|
||||
state: finalize
|
||||
|
||||
# Retrieving facts before running a deploy
|
||||
- name: Run 'state=query' to gather facts without changing anything
|
||||
community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
state: query
|
||||
# Remember to set the 'release' parameter when you actually call 'state=present' later
|
||||
- name: Initialize the deploy root
|
||||
community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
release: '{{ deploy_helper.new_release }}'
|
||||
state: present
|
||||
|
||||
# all paths can be absolute or relative (to the 'path' parameter)
|
||||
- community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
releases_path: /var/www/project/releases
|
||||
shared_path: /var/www/shared
|
||||
current_path: /var/www/active
|
||||
|
||||
# Using your own naming strategy for releases (a version tag in this case):
|
||||
- community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
release: v1.1.1
|
||||
state: present
|
||||
- community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
release: '{{ deploy_helper.new_release }}'
|
||||
state: finalize
|
||||
|
||||
# Using a different unfinished_filename:
|
||||
- community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
unfinished_filename: README.md
|
||||
release: '{{ deploy_helper.new_release }}'
|
||||
state: finalize
|
||||
|
||||
# Postponing the cleanup of older builds:
|
||||
- community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
release: '{{ deploy_helper.new_release }}'
|
||||
state: finalize
|
||||
clean: false
|
||||
- community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
state: clean
|
||||
# Or running the cleanup ahead of the new deploy
|
||||
- community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
state: clean
|
||||
- community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
state: present
|
||||
|
||||
# Keeping more old releases:
|
||||
- community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
release: '{{ deploy_helper.new_release }}'
|
||||
state: finalize
|
||||
keep_releases: 10
|
||||
# Or, if you use 'clean=false' on finalize:
|
||||
- community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
state: clean
|
||||
keep_releases: 10
|
||||
|
||||
# Removing the entire project root folder
|
||||
- community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
state: absent
|
||||
|
||||
# Debugging the facts returned by the module
|
||||
- community.general.deploy_helper:
|
||||
path: /path/to/root
|
||||
- ansible.builtin.debug:
|
||||
var: deploy_helper
|
||||
'''
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
class DeployHelper(object):
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.file_args = module.load_file_common_arguments(module.params)
|
||||
|
||||
self.clean = module.params['clean']
|
||||
self.current_path = module.params['current_path']
|
||||
self.keep_releases = module.params['keep_releases']
|
||||
self.path = module.params['path']
|
||||
self.release = module.params['release']
|
||||
self.releases_path = module.params['releases_path']
|
||||
self.shared_path = module.params['shared_path']
|
||||
self.state = module.params['state']
|
||||
self.unfinished_filename = module.params['unfinished_filename']
|
||||
|
||||
def gather_facts(self):
|
||||
current_path = os.path.join(self.path, self.current_path)
|
||||
releases_path = os.path.join(self.path, self.releases_path)
|
||||
if self.shared_path:
|
||||
shared_path = os.path.join(self.path, self.shared_path)
|
||||
else:
|
||||
shared_path = None
|
||||
|
||||
previous_release, previous_release_path = self._get_last_release(current_path)
|
||||
|
||||
if not self.release and (self.state == 'query' or self.state == 'present'):
|
||||
self.release = time.strftime("%Y%m%d%H%M%S")
|
||||
|
||||
if self.release:
|
||||
new_release_path = os.path.join(releases_path, self.release)
|
||||
else:
|
||||
new_release_path = None
|
||||
|
||||
return {
|
||||
'project_path': self.path,
|
||||
'current_path': current_path,
|
||||
'releases_path': releases_path,
|
||||
'shared_path': shared_path,
|
||||
'previous_release': previous_release,
|
||||
'previous_release_path': previous_release_path,
|
||||
'new_release': self.release,
|
||||
'new_release_path': new_release_path,
|
||||
'unfinished_filename': self.unfinished_filename
|
||||
}
|
||||
|
||||
def delete_path(self, path):
|
||||
if not os.path.lexists(path):
|
||||
return False
|
||||
|
||||
if not os.path.isdir(path):
|
||||
self.module.fail_json(msg="%s exists but is not a directory" % path)
|
||||
|
||||
if not self.module.check_mode:
|
||||
try:
|
||||
shutil.rmtree(path, ignore_errors=False)
|
||||
except Exception as e:
|
||||
self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc())
|
||||
|
||||
return True
|
||||
|
||||
def create_path(self, path):
|
||||
changed = False
|
||||
|
||||
if not os.path.lexists(path):
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
os.makedirs(path)
|
||||
|
||||
elif not os.path.isdir(path):
|
||||
self.module.fail_json(msg="%s exists but is not a directory" % path)
|
||||
|
||||
changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
|
||||
|
||||
return changed
|
||||
|
||||
def check_link(self, path):
|
||||
if os.path.lexists(path):
|
||||
if not os.path.islink(path):
|
||||
self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
|
||||
|
||||
def create_link(self, source, link_name):
|
||||
if os.path.islink(link_name):
|
||||
norm_link = os.path.normpath(os.path.realpath(link_name))
|
||||
norm_source = os.path.normpath(os.path.realpath(source))
|
||||
if norm_link == norm_source:
|
||||
changed = False
|
||||
else:
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
if not os.path.lexists(source):
|
||||
self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
|
||||
tmp_link_name = link_name + '.' + self.unfinished_filename
|
||||
if os.path.islink(tmp_link_name):
|
||||
os.unlink(tmp_link_name)
|
||||
os.symlink(source, tmp_link_name)
|
||||
os.rename(tmp_link_name, link_name)
|
||||
else:
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
os.symlink(source, link_name)
|
||||
|
||||
return changed
|
||||
|
||||
def remove_unfinished_file(self, new_release_path):
|
||||
changed = False
|
||||
unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
|
||||
if os.path.lexists(unfinished_file_path):
|
||||
changed = True
|
||||
if not self.module.check_mode:
|
||||
os.remove(unfinished_file_path)
|
||||
|
||||
return changed
|
||||
|
||||
def remove_unfinished_builds(self, releases_path):
|
||||
changes = 0
|
||||
|
||||
for release in os.listdir(releases_path):
|
||||
if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
|
||||
if self.module.check_mode:
|
||||
changes += 1
|
||||
else:
|
||||
changes += self.delete_path(os.path.join(releases_path, release))
|
||||
|
||||
return changes
|
||||
|
||||
def remove_unfinished_link(self, path):
|
||||
changed = False
|
||||
|
||||
if not self.release:
|
||||
return changed
|
||||
|
||||
tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
|
||||
if not self.module.check_mode and os.path.exists(tmp_link_name):
|
||||
changed = True
|
||||
os.remove(tmp_link_name)
|
||||
|
||||
return changed
|
||||
|
||||
def cleanup(self, releases_path, reserve_version):
|
||||
changes = 0
|
||||
|
||||
if os.path.lexists(releases_path):
|
||||
releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
|
||||
try:
|
||||
releases.remove(reserve_version)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
if not self.module.check_mode:
|
||||
releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
|
||||
for release in releases[self.keep_releases:]:
|
||||
changes += self.delete_path(os.path.join(releases_path, release))
|
||||
elif len(releases) > self.keep_releases:
|
||||
changes += (len(releases) - self.keep_releases)
|
||||
|
||||
return changes
|
||||
|
||||
def _get_file_args(self, path):
|
||||
file_args = self.file_args.copy()
|
||||
file_args['path'] = path
|
||||
return file_args
|
||||
|
||||
def _get_last_release(self, current_path):
|
||||
previous_release = None
|
||||
previous_release_path = None
|
||||
|
||||
if os.path.lexists(current_path):
|
||||
previous_release_path = os.path.realpath(current_path)
|
||||
previous_release = os.path.basename(previous_release_path)
|
||||
|
||||
return previous_release, previous_release_path
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
path=dict(aliases=['dest'], required=True, type='path'),
|
||||
release=dict(type='str'),
|
||||
releases_path=dict(type='str', default='releases'),
|
||||
shared_path=dict(type='path', default='shared'),
|
||||
current_path=dict(type='path', default='current'),
|
||||
keep_releases=dict(type='int', default=5),
|
||||
clean=dict(type='bool', default=True),
|
||||
unfinished_filename=dict(type='str', default='DEPLOY_UNFINISHED'),
|
||||
state=dict(choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
|
||||
),
|
||||
required_if=[
|
||||
('state', 'finalize', ['release']),
|
||||
],
|
||||
add_file_common_args=True,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
deploy_helper = DeployHelper(module)
|
||||
facts = deploy_helper.gather_facts()
|
||||
|
||||
result = {
|
||||
'state': deploy_helper.state
|
||||
}
|
||||
|
||||
changes = 0
|
||||
|
||||
if deploy_helper.state == 'query':
|
||||
result['ansible_facts'] = {'deploy_helper': facts}
|
||||
|
||||
elif deploy_helper.state == 'present':
|
||||
deploy_helper.check_link(facts['current_path'])
|
||||
changes += deploy_helper.create_path(facts['project_path'])
|
||||
changes += deploy_helper.create_path(facts['releases_path'])
|
||||
if deploy_helper.shared_path:
|
||||
changes += deploy_helper.create_path(facts['shared_path'])
|
||||
|
||||
result['ansible_facts'] = {'deploy_helper': facts}
|
||||
|
||||
elif deploy_helper.state == 'finalize':
|
||||
if deploy_helper.keep_releases <= 0:
|
||||
module.fail_json(msg="'keep_releases' should be at least 1")
|
||||
|
||||
changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
|
||||
changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
|
||||
if deploy_helper.clean:
|
||||
changes += deploy_helper.remove_unfinished_link(facts['project_path'])
|
||||
changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
|
||||
changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
|
||||
|
||||
elif deploy_helper.state == 'clean':
|
||||
changes += deploy_helper.remove_unfinished_link(facts['project_path'])
|
||||
changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
|
||||
changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
|
||||
|
||||
elif deploy_helper.state == 'absent':
|
||||
# destroy the facts
|
||||
result['ansible_facts'] = {'deploy_helper': []}
|
||||
changes += deploy_helper.delete_path(facts['project_path'])
|
||||
|
||||
if changes > 0:
|
||||
result['changed'] = True
|
||||
else:
|
||||
result['changed'] = False
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,297 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2016 Dimension Data
|
||||
# Authors:
|
||||
# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
|
||||
# - Bert Diwa <Lamberto.Diwa@dimensiondata.com>
|
||||
# - Adam Friedman <tintoy@tintoy.io>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: dimensiondata_network
|
||||
short_description: Create, update, and delete MCP 1.0 & 2.0 networks
|
||||
extends_documentation_fragment:
|
||||
- community.general.dimensiondata
|
||||
- community.general.dimensiondata_wait
|
||||
|
||||
description:
|
||||
- Create, update, and delete MCP 1.0 & 2.0 networks
|
||||
author: 'Aimon Bustardo (@aimonb)'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the network domain to create.
|
||||
required: true
|
||||
type: str
|
||||
description:
|
||||
description:
|
||||
- Additional description of the network domain.
|
||||
required: false
|
||||
type: str
|
||||
service_plan:
|
||||
description:
|
||||
- The service plan, either "ESSENTIALS" or "ADVANCED".
|
||||
- MCP 2.0 Only.
|
||||
choices: [ESSENTIALS, ADVANCED]
|
||||
default: ESSENTIALS
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Should the resource be present or absent.
|
||||
choices: [present, absent]
|
||||
default: present
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create an MCP 1.0 network
|
||||
community.general.dimensiondata_network:
|
||||
region: na
|
||||
location: NA5
|
||||
name: mynet
|
||||
|
||||
- name: Create an MCP 2.0 network
|
||||
community.general.dimensiondata_network:
|
||||
region: na
|
||||
mcp_user: my_user
|
||||
mcp_password: my_password
|
||||
location: NA9
|
||||
name: mynet
|
||||
service_plan: ADVANCED
|
||||
|
||||
- name: Delete a network
|
||||
community.general.dimensiondata_network:
|
||||
region: na
|
||||
location: NA1
|
||||
name: mynet
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
network:
|
||||
description: Dictionary describing the network.
|
||||
returned: On success when I(state=present).
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description: Network ID.
|
||||
type: str
|
||||
sample: "8c787000-a000-4050-a215-280893411a7d"
|
||||
name:
|
||||
description: Network name.
|
||||
type: str
|
||||
sample: "My network"
|
||||
description:
|
||||
description: Network description.
|
||||
type: str
|
||||
sample: "My network description"
|
||||
location:
|
||||
description: Datacenter location.
|
||||
type: str
|
||||
sample: NA3
|
||||
status:
|
||||
description: Network status. (MCP 2.0 only)
|
||||
type: str
|
||||
sample: NORMAL
|
||||
private_net:
|
||||
description: Private network subnet. (MCP 1.0 only)
|
||||
type: str
|
||||
sample: "10.2.3.0"
|
||||
multicast:
|
||||
description: Multicast enabled? (MCP 1.0 only)
|
||||
type: bool
|
||||
sample: false
|
||||
'''
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
if HAS_LIBCLOUD:
|
||||
from libcloud.compute.base import NodeLocation
|
||||
from libcloud.common.dimensiondata import DimensionDataAPIException
|
||||
|
||||
|
||||
class DimensionDataNetworkModule(DimensionDataModule):
|
||||
"""
|
||||
The dimensiondata_network module for Ansible.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Create a new Dimension Data network module.
|
||||
"""
|
||||
|
||||
super(DimensionDataNetworkModule, self).__init__(
|
||||
module=AnsibleModule(
|
||||
argument_spec=DimensionDataModule.argument_spec_with_wait(
|
||||
name=dict(type='str', required=True),
|
||||
description=dict(type='str', required=False),
|
||||
service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']),
|
||||
state=dict(default='present', choices=['present', 'absent'])
|
||||
),
|
||||
required_together=DimensionDataModule.required_together()
|
||||
)
|
||||
)
|
||||
|
||||
self.name = self.module.params['name']
|
||||
self.description = self.module.params['description']
|
||||
self.service_plan = self.module.params['service_plan']
|
||||
self.state = self.module.params['state']
|
||||
|
||||
def state_present(self):
|
||||
network = self._get_network()
|
||||
|
||||
if network:
|
||||
self.module.exit_json(
|
||||
changed=False,
|
||||
msg='Network already exists',
|
||||
network=self._network_to_dict(network)
|
||||
)
|
||||
|
||||
network = self._create_network()
|
||||
|
||||
self.module.exit_json(
|
||||
changed=True,
|
||||
msg='Created network "%s" in datacenter "%s".' % (self.name, self.location),
|
||||
network=self._network_to_dict(network)
|
||||
)
|
||||
|
||||
def state_absent(self):
|
||||
network = self._get_network()
|
||||
|
||||
if not network:
|
||||
self.module.exit_json(
|
||||
changed=False,
|
||||
msg='Network "%s" does not exist' % self.name,
|
||||
network=self._network_to_dict(network)
|
||||
)
|
||||
|
||||
self._delete_network(network)
|
||||
|
||||
def _get_network(self):
|
||||
if self.mcp_version == '1.0':
|
||||
networks = self.driver.list_networks(location=self.location)
|
||||
else:
|
||||
networks = self.driver.ex_list_network_domains(location=self.location)
|
||||
|
||||
matched_network = [network for network in networks if network.name == self.name]
|
||||
if matched_network:
|
||||
return matched_network[0]
|
||||
|
||||
return None
|
||||
|
||||
def _network_to_dict(self, network):
|
||||
network_dict = dict(
|
||||
id=network.id,
|
||||
name=network.name,
|
||||
description=network.description
|
||||
)
|
||||
|
||||
if isinstance(network.location, NodeLocation):
|
||||
network_dict['location'] = network.location.id
|
||||
else:
|
||||
network_dict['location'] = network.location
|
||||
|
||||
if self.mcp_version == '1.0':
|
||||
network_dict['private_net'] = network.private_net
|
||||
network_dict['multicast'] = network.multicast
|
||||
network_dict['status'] = None
|
||||
else:
|
||||
network_dict['private_net'] = None
|
||||
network_dict['multicast'] = None
|
||||
network_dict['status'] = network.status
|
||||
|
||||
return network_dict
|
||||
|
||||
def _create_network(self):
|
||||
|
||||
# Make sure service_plan argument is defined
|
||||
if self.mcp_version == '2.0' and 'service_plan' not in self.module.params:
|
||||
self.module.fail_json(
|
||||
msg='service_plan required when creating network and location is MCP 2.0'
|
||||
)
|
||||
|
||||
# Create network
|
||||
try:
|
||||
if self.mcp_version == '1.0':
|
||||
network = self.driver.ex_create_network(
|
||||
self.location,
|
||||
self.name,
|
||||
description=self.description
|
||||
)
|
||||
else:
|
||||
network = self.driver.ex_create_network_domain(
|
||||
self.location,
|
||||
self.name,
|
||||
self.module.params['service_plan'],
|
||||
description=self.description
|
||||
)
|
||||
except DimensionDataAPIException as e:
|
||||
|
||||
self.module.fail_json(
|
||||
msg="Failed to create new network: %s" % to_native(e), exception=traceback.format_exc()
|
||||
)
|
||||
|
||||
if self.module.params['wait'] is True:
|
||||
network = self._wait_for_network_state(network.id, 'NORMAL')
|
||||
|
||||
return network
|
||||
|
||||
def _delete_network(self, network):
|
||||
try:
|
||||
if self.mcp_version == '1.0':
|
||||
deleted = self.driver.ex_delete_network(network)
|
||||
else:
|
||||
deleted = self.driver.ex_delete_network_domain(network)
|
||||
|
||||
if deleted:
|
||||
self.module.exit_json(
|
||||
changed=True,
|
||||
msg="Deleted network with id %s" % network.id
|
||||
)
|
||||
|
||||
self.module.fail_json(
|
||||
"Unexpected failure deleting network with id %s" % network.id
|
||||
)
|
||||
|
||||
except DimensionDataAPIException as e:
|
||||
self.module.fail_json(
|
||||
msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc()
|
||||
)
|
||||
|
||||
def _wait_for_network_state(self, net_id, state_to_wait_for):
|
||||
try:
|
||||
return self.driver.connection.wait_for_state(
|
||||
state_to_wait_for,
|
||||
self.driver.ex_get_network_domain,
|
||||
self.module.params['wait_poll_interval'],
|
||||
self.module.params['wait_time'],
|
||||
net_id
|
||||
)
|
||||
except DimensionDataAPIException as e:
|
||||
self.module.fail_json(
|
||||
msg='Network did not reach % state in time: %s' % (state_to_wait_for, to_native(e)),
|
||||
exception=traceback.format_exc()
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
module = DimensionDataNetworkModule()
|
||||
if module.state == 'present':
|
||||
module.state_present()
|
||||
elif module.state == 'absent':
|
||||
module.state_absent()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,558 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2016 Dimension Data
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
#
|
||||
# Authors:
|
||||
# - Adam Friedman <tintoy@tintoy.io>
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: dimensiondata_vlan
|
||||
short_description: Manage a VLAN in a Cloud Control network domain
|
||||
extends_documentation_fragment:
|
||||
- community.general.dimensiondata
|
||||
- community.general.dimensiondata_wait
|
||||
|
||||
description:
|
||||
- Manage VLANs in Cloud Control network domains.
|
||||
author: 'Adam Friedman (@tintoy)'
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the target VLAN.
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
description:
|
||||
- A description of the VLAN.
|
||||
type: str
|
||||
default: ''
|
||||
network_domain:
|
||||
description:
|
||||
- The Id or name of the target network domain.
|
||||
required: true
|
||||
type: str
|
||||
private_ipv4_base_address:
|
||||
description:
|
||||
- The base address for the VLAN's IPv4 network (e.g. 192.168.1.0).
|
||||
type: str
|
||||
default: ''
|
||||
private_ipv4_prefix_size:
|
||||
description:
|
||||
- The size of the IPv4 address space, e.g 24.
|
||||
- Required, if C(private_ipv4_base_address) is specified.
|
||||
type: int
|
||||
default: 0
|
||||
state:
|
||||
description:
|
||||
- The desired state for the target VLAN.
|
||||
- C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist).
|
||||
choices: [present, absent, readonly]
|
||||
default: present
|
||||
type: str
|
||||
allow_expand:
|
||||
description:
|
||||
- Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses.
|
||||
- If C(False), the module will fail under these conditions.
|
||||
- This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible).
|
||||
type: bool
|
||||
default: false
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Add or update VLAN
|
||||
community.general.dimensiondata_vlan:
|
||||
region: na
|
||||
location: NA5
|
||||
network_domain: test_network
|
||||
name: my_vlan1
|
||||
description: A test VLAN
|
||||
private_ipv4_base_address: 192.168.23.0
|
||||
private_ipv4_prefix_size: 24
|
||||
state: present
|
||||
wait: true
|
||||
|
||||
- name: Read / get VLAN details
|
||||
community.general.dimensiondata_vlan:
|
||||
region: na
|
||||
location: NA5
|
||||
network_domain: test_network
|
||||
name: my_vlan1
|
||||
state: readonly
|
||||
wait: true
|
||||
|
||||
- name: Delete a VLAN
|
||||
community.general.dimensiondata_vlan:
|
||||
region: na
|
||||
location: NA5
|
||||
network_domain: test_network
|
||||
name: my_vlan_1
|
||||
state: absent
|
||||
wait: true
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
vlan:
|
||||
description: Dictionary describing the VLAN.
|
||||
returned: On success when I(state) is 'present'
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description: VLAN ID.
|
||||
type: str
|
||||
sample: "aaaaa000-a000-4050-a215-2808934ccccc"
|
||||
name:
|
||||
description: VLAN name.
|
||||
type: str
|
||||
sample: "My VLAN"
|
||||
description:
|
||||
description: VLAN description.
|
||||
type: str
|
||||
sample: "My VLAN description"
|
||||
location:
|
||||
description: Datacenter location.
|
||||
type: str
|
||||
sample: NA3
|
||||
private_ipv4_base_address:
|
||||
description: The base address for the VLAN's private IPV4 network.
|
||||
type: str
|
||||
sample: 192.168.23.0
|
||||
private_ipv4_prefix_size:
|
||||
description: The prefix size for the VLAN's private IPV4 network.
|
||||
type: int
|
||||
sample: 24
|
||||
private_ipv4_gateway_address:
|
||||
description: The gateway address for the VLAN's private IPV4 network.
|
||||
type: str
|
||||
sample: 192.168.23.1
|
||||
private_ipv6_base_address:
|
||||
description: The base address for the VLAN's IPV6 network.
|
||||
type: str
|
||||
sample: 2402:9900:111:1195:0:0:0:0
|
||||
private_ipv6_prefix_size:
|
||||
description: The prefix size for the VLAN's IPV6 network.
|
||||
type: int
|
||||
sample: 64
|
||||
private_ipv6_gateway_address:
|
||||
description: The gateway address for the VLAN's IPV6 network.
|
||||
type: str
|
||||
sample: 2402:9900:111:1195:0:0:0:1
|
||||
status:
|
||||
description: VLAN status.
|
||||
type: str
|
||||
sample: NORMAL
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError
|
||||
|
||||
try:
|
||||
from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException
|
||||
|
||||
HAS_LIBCLOUD = True
|
||||
|
||||
except ImportError:
|
||||
DimensionDataVlan = None
|
||||
|
||||
HAS_LIBCLOUD = False
|
||||
|
||||
|
||||
class DimensionDataVlanModule(DimensionDataModule):
|
||||
"""
|
||||
The dimensiondata_vlan module for Ansible.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Create a new Dimension Data VLAN module.
|
||||
"""
|
||||
|
||||
super(DimensionDataVlanModule, self).__init__(
|
||||
module=AnsibleModule(
|
||||
argument_spec=DimensionDataModule.argument_spec_with_wait(
|
||||
name=dict(required=True, type='str'),
|
||||
description=dict(default='', type='str'),
|
||||
network_domain=dict(required=True, type='str'),
|
||||
private_ipv4_base_address=dict(default='', type='str'),
|
||||
private_ipv4_prefix_size=dict(default=0, type='int'),
|
||||
allow_expand=dict(required=False, default=False, type='bool'),
|
||||
state=dict(default='present', choices=['present', 'absent', 'readonly'])
|
||||
),
|
||||
required_together=DimensionDataModule.required_together()
|
||||
)
|
||||
)
|
||||
|
||||
self.name = self.module.params['name']
|
||||
self.description = self.module.params['description']
|
||||
self.network_domain_selector = self.module.params['network_domain']
|
||||
self.private_ipv4_base_address = self.module.params['private_ipv4_base_address']
|
||||
self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size']
|
||||
self.state = self.module.params['state']
|
||||
self.allow_expand = self.module.params['allow_expand']
|
||||
|
||||
if self.wait and self.state != 'present':
|
||||
self.module.fail_json(
|
||||
msg='The wait parameter is only supported when state is "present".'
|
||||
)
|
||||
|
||||
def state_present(self):
|
||||
"""
|
||||
Ensure that the target VLAN is present.
|
||||
"""
|
||||
|
||||
network_domain = self._get_network_domain()
|
||||
|
||||
vlan = self._get_vlan(network_domain)
|
||||
if not vlan:
|
||||
if self.module.check_mode:
|
||||
self.module.exit_json(
|
||||
msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format(
|
||||
self.name, self.network_domain_selector
|
||||
),
|
||||
changed=True
|
||||
)
|
||||
|
||||
vlan = self._create_vlan(network_domain)
|
||||
self.module.exit_json(
|
||||
msg='Created VLAN "{0}" in network domain "{1}".'.format(
|
||||
self.name, self.network_domain_selector
|
||||
),
|
||||
vlan=vlan_to_dict(vlan),
|
||||
changed=True
|
||||
)
|
||||
else:
|
||||
diff = VlanDiff(vlan, self.module.params)
|
||||
if not diff.has_changes():
|
||||
self.module.exit_json(
|
||||
msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format(
|
||||
self.name, self.network_domain_selector
|
||||
),
|
||||
vlan=vlan_to_dict(vlan),
|
||||
changed=False
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
try:
|
||||
diff.ensure_legal_change()
|
||||
except InvalidVlanChangeError as invalid_vlan_change:
|
||||
self.module.fail_json(
|
||||
msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format(
|
||||
self.name, self.network_domain_selector, invalid_vlan_change
|
||||
)
|
||||
)
|
||||
|
||||
if diff.needs_expand() and not self.allow_expand:
|
||||
self.module.fail_json(
|
||||
msg='The configured private IPv4 network size ({0}-bit prefix) for '.format(
|
||||
self.private_ipv4_prefix_size
|
||||
) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format(
|
||||
vlan.private_ipv4_range_size
|
||||
) + 'and needs to be expanded. Use allow_expand=true if this is what you want.'
|
||||
)
|
||||
|
||||
if self.module.check_mode:
|
||||
self.module.exit_json(
|
||||
msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format(
|
||||
self.name, self.network_domain_selector
|
||||
),
|
||||
vlan=vlan_to_dict(vlan),
|
||||
changed=True
|
||||
)
|
||||
|
||||
if diff.needs_edit():
|
||||
vlan.name = self.name
|
||||
vlan.description = self.description
|
||||
|
||||
self.driver.ex_update_vlan(vlan)
|
||||
|
||||
if diff.needs_expand():
|
||||
vlan.private_ipv4_range_size = self.private_ipv4_prefix_size
|
||||
self.driver.ex_expand_vlan(vlan)
|
||||
|
||||
self.module.exit_json(
|
||||
msg='Updated VLAN "{0}" in network domain "{1}".'.format(
|
||||
self.name, self.network_domain_selector
|
||||
),
|
||||
vlan=vlan_to_dict(vlan),
|
||||
changed=True
|
||||
)
|
||||
|
||||
def state_readonly(self):
|
||||
"""
|
||||
Read the target VLAN's state.
|
||||
"""
|
||||
|
||||
network_domain = self._get_network_domain()
|
||||
|
||||
vlan = self._get_vlan(network_domain)
|
||||
if vlan:
|
||||
self.module.exit_json(
|
||||
vlan=vlan_to_dict(vlan),
|
||||
changed=False
|
||||
)
|
||||
else:
|
||||
self.module.fail_json(
|
||||
msg='VLAN "{0}" does not exist in network domain "{1}".'.format(
|
||||
self.name, self.network_domain_selector
|
||||
)
|
||||
)
|
||||
|
||||
def state_absent(self):
|
||||
"""
|
||||
Ensure that the target VLAN is not present.
|
||||
"""
|
||||
|
||||
network_domain = self._get_network_domain()
|
||||
|
||||
vlan = self._get_vlan(network_domain)
|
||||
if not vlan:
|
||||
self.module.exit_json(
|
||||
msg='VLAN "{0}" is absent from network domain "{1}".'.format(
|
||||
self.name, self.network_domain_selector
|
||||
),
|
||||
changed=False
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
if self.module.check_mode:
|
||||
self.module.exit_json(
|
||||
msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format(
|
||||
self.name, self.network_domain_selector
|
||||
),
|
||||
vlan=vlan_to_dict(vlan),
|
||||
changed=True
|
||||
)
|
||||
|
||||
self._delete_vlan(vlan)
|
||||
|
||||
self.module.exit_json(
|
||||
msg='Deleted VLAN "{0}" from network domain "{1}".'.format(
|
||||
self.name, self.network_domain_selector
|
||||
),
|
||||
changed=True
|
||||
)
|
||||
|
||||
def _get_vlan(self, network_domain):
|
||||
"""
|
||||
Retrieve the target VLAN details from CloudControl.
|
||||
|
||||
:param network_domain: The target network domain.
|
||||
:return: The VLAN, or None if the target VLAN was not found.
|
||||
:rtype: DimensionDataVlan
|
||||
"""
|
||||
|
||||
vlans = self.driver.ex_list_vlans(
|
||||
location=self.location,
|
||||
network_domain=network_domain
|
||||
)
|
||||
matching_vlans = [vlan for vlan in vlans if vlan.name == self.name]
|
||||
if matching_vlans:
|
||||
return matching_vlans[0]
|
||||
|
||||
return None
|
||||
|
||||
def _create_vlan(self, network_domain):
|
||||
vlan = self.driver.ex_create_vlan(
|
||||
network_domain,
|
||||
self.name,
|
||||
self.private_ipv4_base_address,
|
||||
self.description,
|
||||
self.private_ipv4_prefix_size
|
||||
)
|
||||
|
||||
if self.wait:
|
||||
vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL')
|
||||
|
||||
return vlan
|
||||
|
||||
def _delete_vlan(self, vlan):
|
||||
try:
|
||||
self.driver.ex_delete_vlan(vlan)
|
||||
|
||||
# Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present").
|
||||
if self.wait:
|
||||
self._wait_for_vlan_state(vlan, 'NOT_FOUND')
|
||||
|
||||
except DimensionDataAPIException as api_exception:
|
||||
self.module.fail_json(
|
||||
msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format(
|
||||
vlan.id, api_exception.msg
|
||||
)
|
||||
)
|
||||
|
||||
def _wait_for_vlan_state(self, vlan, state_to_wait_for):
|
||||
network_domain = self._get_network_domain()
|
||||
|
||||
wait_poll_interval = self.module.params['wait_poll_interval']
|
||||
wait_time = self.module.params['wait_time']
|
||||
|
||||
# Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try.
|
||||
|
||||
try:
|
||||
return self.driver.connection.wait_for_state(
|
||||
state_to_wait_for,
|
||||
self.driver.ex_get_vlan,
|
||||
wait_poll_interval,
|
||||
wait_time,
|
||||
vlan
|
||||
)
|
||||
|
||||
except DimensionDataAPIException as api_exception:
|
||||
if api_exception.code != 'RESOURCE_NOT_FOUND':
|
||||
raise
|
||||
|
||||
return DimensionDataVlan(
|
||||
id=vlan.id,
|
||||
status='NOT_FOUND',
|
||||
name='',
|
||||
description='',
|
||||
private_ipv4_range_address='',
|
||||
private_ipv4_range_size=0,
|
||||
ipv4_gateway='',
|
||||
ipv6_range_address='',
|
||||
ipv6_range_size=0,
|
||||
ipv6_gateway='',
|
||||
location=self.location,
|
||||
network_domain=network_domain
|
||||
)
|
||||
|
||||
def _get_network_domain(self):
|
||||
"""
|
||||
Retrieve the target network domain from the Cloud Control API.
|
||||
|
||||
:return: The network domain.
|
||||
"""
|
||||
|
||||
try:
|
||||
return self.get_network_domain(
|
||||
self.network_domain_selector, self.location
|
||||
)
|
||||
except UnknownNetworkError:
|
||||
self.module.fail_json(
|
||||
msg='Cannot find network domain "{0}" in datacenter "{1}".'.format(
|
||||
self.network_domain_selector, self.location
|
||||
)
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class InvalidVlanChangeError(Exception):
|
||||
"""
|
||||
Error raised when an illegal change to VLAN state is attempted.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class VlanDiff(object):
|
||||
"""
|
||||
Represents differences between VLAN information (from CloudControl) and module parameters.
|
||||
"""
|
||||
|
||||
def __init__(self, vlan, module_params):
|
||||
"""
|
||||
|
||||
:param vlan: The VLAN information from CloudControl.
|
||||
:type vlan: DimensionDataVlan
|
||||
:param module_params: The module parameters.
|
||||
:type module_params: dict
|
||||
"""
|
||||
|
||||
self.vlan = vlan
|
||||
self.module_params = module_params
|
||||
|
||||
self.name_changed = module_params['name'] != vlan.name
|
||||
self.description_changed = module_params['description'] != vlan.description
|
||||
self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address
|
||||
self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size
|
||||
|
||||
# Is configured prefix size greater than or less than the actual prefix size?
|
||||
private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size
|
||||
self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0
|
||||
self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0
|
||||
|
||||
def has_changes(self):
|
||||
"""
|
||||
Does the VlanDiff represent any changes between the VLAN and module configuration?
|
||||
|
||||
:return: True, if there are change changes; otherwise, False.
|
||||
"""
|
||||
|
||||
return self.needs_edit() or self.needs_expand()
|
||||
|
||||
def ensure_legal_change(self):
|
||||
"""
|
||||
Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state.
|
||||
|
||||
- private_ipv4_base_address cannot be changed
|
||||
- private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size
|
||||
|
||||
:raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state.
|
||||
"""
|
||||
|
||||
# Cannot change base address for private IPv4 network.
|
||||
if self.private_ipv4_base_address_changed:
|
||||
raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.')
|
||||
|
||||
# Cannot shrink private IPv4 network (by increasing prefix size).
|
||||
if self.private_ipv4_prefix_size_increased:
|
||||
raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).')
|
||||
|
||||
def needs_edit(self):
|
||||
"""
|
||||
Is an Edit operation required to resolve the differences between the VLAN information and the module parameters?
|
||||
|
||||
:return: True, if an Edit operation is required; otherwise, False.
|
||||
"""
|
||||
|
||||
return self.name_changed or self.description_changed
|
||||
|
||||
def needs_expand(self):
|
||||
"""
|
||||
Is an Expand operation required to resolve the differences between the VLAN information and the module parameters?
|
||||
|
||||
The VLAN's network is expanded by reducing the size of its network prefix.
|
||||
|
||||
:return: True, if an Expand operation is required; otherwise, False.
|
||||
"""
|
||||
|
||||
return self.private_ipv4_prefix_size_decreased
|
||||
|
||||
|
||||
def vlan_to_dict(vlan):
|
||||
return {
|
||||
'id': vlan.id,
|
||||
'name': vlan.name,
|
||||
'description': vlan.description,
|
||||
'location': vlan.location.id,
|
||||
'private_ipv4_base_address': vlan.private_ipv4_range_address,
|
||||
'private_ipv4_prefix_size': vlan.private_ipv4_range_size,
|
||||
'private_ipv4_gateway_address': vlan.ipv4_gateway,
|
||||
'ipv6_base_address': vlan.ipv6_range_address,
|
||||
'ipv6_prefix_size': vlan.ipv6_range_size,
|
||||
'ipv6_gateway_address': vlan.ipv6_gateway,
|
||||
'status': vlan.status
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
module = DimensionDataVlanModule()
|
||||
|
||||
if module.state == 'present':
|
||||
module.state_present()
|
||||
elif module.state == 'readonly':
|
||||
module.state_readonly()
|
||||
elif module.state == 'absent':
|
||||
module.state_absent()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,216 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2021, Christian Wollinger <cwollinger@web.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: discord
|
||||
short_description: Send Discord messages
|
||||
version_added: 3.1.0
|
||||
description:
|
||||
- Sends a message to a Discord channel using the Discord webhook API.
|
||||
author: Christian Wollinger (@cwollinger)
|
||||
seealso:
|
||||
- name: API documentation
|
||||
description: Documentation for Discord API
|
||||
link: https://discord.com/developers/docs/resources/webhook#execute-webhook
|
||||
options:
|
||||
webhook_id:
|
||||
description:
|
||||
- The webhook ID.
|
||||
- "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})."
|
||||
required: true
|
||||
type: str
|
||||
webhook_token:
|
||||
description:
|
||||
- The webhook token.
|
||||
- "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})."
|
||||
required: true
|
||||
type: str
|
||||
content:
|
||||
description:
|
||||
- Content of the message to the Discord channel.
|
||||
- At least one of I(content) and I(embeds) must be specified.
|
||||
type: str
|
||||
username:
|
||||
description:
|
||||
- Overrides the default username of the webhook.
|
||||
type: str
|
||||
avatar_url:
|
||||
description:
|
||||
- Overrides the default avatar of the webhook.
|
||||
type: str
|
||||
tts:
|
||||
description:
|
||||
- Set this to C(true) if this is a TTS (Text to Speech) message.
|
||||
type: bool
|
||||
default: false
|
||||
embeds:
|
||||
description:
|
||||
- Send messages as Embeds to the Discord channel.
|
||||
- Embeds can have a colored border, embedded images, text fields and more.
|
||||
- "Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object)"
|
||||
- At least one of I(content) and I(embeds) must be specified.
|
||||
type: list
|
||||
elements: dict
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Send a message to the Discord channel
|
||||
community.general.discord:
|
||||
webhook_id: "00000"
|
||||
webhook_token: "XXXYYY"
|
||||
content: "This is a message from ansible"
|
||||
|
||||
- name: Send a message to the Discord channel with specific username and avatar
|
||||
community.general.discord:
|
||||
webhook_id: "00000"
|
||||
webhook_token: "XXXYYY"
|
||||
content: "This is a message from ansible"
|
||||
username: Ansible
|
||||
avatar_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png"
|
||||
|
||||
- name: Send a embedded message to the Discord channel
|
||||
community.general.discord:
|
||||
webhook_id: "00000"
|
||||
webhook_token: "XXXYYY"
|
||||
embeds:
|
||||
- title: "Embedded message"
|
||||
description: "This is an embedded message"
|
||||
footer:
|
||||
text: "Author: Ansible"
|
||||
image:
|
||||
url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png"
|
||||
|
||||
- name: Send two embedded messages
|
||||
community.general.discord:
|
||||
webhook_id: "00000"
|
||||
webhook_token: "XXXYYY"
|
||||
embeds:
|
||||
- title: "First message"
|
||||
description: "This is my first embedded message"
|
||||
footer:
|
||||
text: "Author: Ansible"
|
||||
image:
|
||||
url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png"
|
||||
- title: "Second message"
|
||||
description: "This is my first second message"
|
||||
footer:
|
||||
text: "Author: Ansible"
|
||||
icon_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png"
|
||||
fields:
|
||||
- name: "Field 1"
|
||||
value: "Value of my first field"
|
||||
- name: "Field 2"
|
||||
value: "Value of my second field"
|
||||
timestamp: "{{ ansible_date_time.iso8601 }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
http_code:
|
||||
description:
|
||||
- Response Code returned by Discord API.
|
||||
returned: always
|
||||
type: int
|
||||
sample: 204
|
||||
"""
|
||||
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def discord_check_mode(module):
|
||||
|
||||
webhook_id = module.params['webhook_id']
|
||||
webhook_token = module.params['webhook_token']
|
||||
|
||||
headers = {
|
||||
'content-type': 'application/json'
|
||||
}
|
||||
|
||||
url = "https://discord.com/api/webhooks/%s/%s" % (
|
||||
webhook_id, webhook_token)
|
||||
|
||||
response, info = fetch_url(module, url, method='GET', headers=headers)
|
||||
return response, info
|
||||
|
||||
|
||||
def discord_text_msg(module):
|
||||
|
||||
webhook_id = module.params['webhook_id']
|
||||
webhook_token = module.params['webhook_token']
|
||||
content = module.params['content']
|
||||
user = module.params['username']
|
||||
avatar_url = module.params['avatar_url']
|
||||
tts = module.params['tts']
|
||||
embeds = module.params['embeds']
|
||||
|
||||
headers = {
|
||||
'content-type': 'application/json'
|
||||
}
|
||||
|
||||
url = "https://discord.com/api/webhooks/%s/%s" % (
|
||||
webhook_id, webhook_token)
|
||||
|
||||
payload = {
|
||||
'content': content,
|
||||
'username': user,
|
||||
'avatar_url': avatar_url,
|
||||
'tts': tts,
|
||||
'embeds': embeds,
|
||||
}
|
||||
|
||||
payload = module.jsonify(payload)
|
||||
|
||||
response, info = fetch_url(module, url, data=payload, headers=headers, method='POST')
|
||||
return response, info
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
webhook_id=dict(type='str', required=True),
|
||||
webhook_token=dict(type='str', required=True, no_log=True),
|
||||
content=dict(type='str'),
|
||||
username=dict(type='str'),
|
||||
avatar_url=dict(type='str'),
|
||||
tts=dict(type='bool', default=False),
|
||||
embeds=dict(type='list', elements='dict'),
|
||||
),
|
||||
required_one_of=[['content', 'embeds']],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
result = dict(
|
||||
changed=False,
|
||||
http_code='',
|
||||
)
|
||||
|
||||
if module.check_mode:
|
||||
response, info = discord_check_mode(module)
|
||||
if info['status'] != 200:
|
||||
try:
|
||||
module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info)
|
||||
except Exception:
|
||||
module.fail_json(http_code=info['status'], msg=info['msg'], info=info)
|
||||
else:
|
||||
module.exit_json(msg=info['msg'], changed=False, http_code=info['status'], response=module.from_json(response.read()))
|
||||
else:
|
||||
response, info = discord_text_msg(module)
|
||||
if info['status'] != 204:
|
||||
try:
|
||||
module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info)
|
||||
except Exception:
|
||||
module.fail_json(http_code=info['status'], msg=info['msg'], info=info)
|
||||
else:
|
||||
module.exit_json(msg=info['msg'], changed=True, http_code=info['status'])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,411 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
|
||||
# Copyright (c) 2013, Scott Anderson <scottanderson42@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: django_manage
|
||||
short_description: Manages a Django application
|
||||
description:
|
||||
- Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the
|
||||
I(virtualenv) parameter, all management commands will be executed by the given C(virtualenv) installation.
|
||||
options:
|
||||
command:
|
||||
description:
|
||||
- The name of the Django management command to run. The commands listed below are built in this module and have some basic parameter validation.
|
||||
- >
|
||||
C(cleanup) - clean up old data from the database (deprecated in Django 1.5). This parameter will be
|
||||
removed in community.general 9.0.0. Use C(clearsessions) instead.
|
||||
- C(collectstatic) - Collects the static files into C(STATIC_ROOT).
|
||||
- C(createcachetable) - Creates the cache tables for use with the database cache backend.
|
||||
- C(flush) - Removes all data from the database.
|
||||
- C(loaddata) - Searches for and loads the contents of the named I(fixtures) into the database.
|
||||
- C(migrate) - Synchronizes the database state with models and migrations.
|
||||
- >
|
||||
C(syncdb) - Synchronizes the database state with models and migrations (deprecated in Django 1.7).
|
||||
This parameter will be removed in community.general 9.0.0. Use C(migrate) instead.
|
||||
- C(test) - Runs tests for all installed apps.
|
||||
- >
|
||||
C(validate) - Validates all installed models (deprecated in Django 1.7). This parameter will be
|
||||
removed in community.general 9.0.0. Use C(check) instead.
|
||||
- Other commands can be entered, but will fail if they are unknown to Django. Other commands that may
|
||||
prompt for user input should be run with the C(--noinput) flag.
|
||||
type: str
|
||||
required: true
|
||||
project_path:
|
||||
description:
|
||||
- The path to the root of the Django application where C(manage.py) lives.
|
||||
type: path
|
||||
required: true
|
||||
aliases: [app_path, chdir]
|
||||
settings:
|
||||
description:
|
||||
- The Python path to the application's settings module, such as C(myapp.settings).
|
||||
type: path
|
||||
required: false
|
||||
pythonpath:
|
||||
description:
|
||||
- A directory to add to the Python path. Typically used to include the settings module if it is located
|
||||
external to the application directory.
|
||||
- This would be equivalent to adding I(pythonpath)'s value to the C(PYTHONPATH) environment variable.
|
||||
type: path
|
||||
required: false
|
||||
aliases: [python_path]
|
||||
virtualenv:
|
||||
description:
|
||||
- An optional path to a C(virtualenv) installation to use while running the manage application.
|
||||
type: path
|
||||
aliases: [virtual_env]
|
||||
apps:
|
||||
description:
|
||||
- A list of space-delimited apps to target. Used by the C(test) command.
|
||||
type: str
|
||||
required: false
|
||||
cache_table:
|
||||
description:
|
||||
- The name of the table used for database-backed caching. Used by the C(createcachetable) command.
|
||||
type: str
|
||||
required: false
|
||||
clear:
|
||||
description:
|
||||
- Clear the existing files before trying to copy or link the original file.
|
||||
- Used only with the C(collectstatic) command. The C(--noinput) argument will be added automatically.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
database:
|
||||
description:
|
||||
- The database to target. Used by the C(createcachetable), C(flush), C(loaddata), C(syncdb),
|
||||
and C(migrate) commands.
|
||||
type: str
|
||||
required: false
|
||||
failfast:
|
||||
description:
|
||||
- Fail the command immediately if a test fails. Used by the C(test) command.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
aliases: [fail_fast]
|
||||
fixtures:
|
||||
description:
|
||||
- A space-delimited list of fixture file names to load in the database. B(Required) by the C(loaddata) command.
|
||||
type: str
|
||||
required: false
|
||||
skip:
|
||||
description:
|
||||
- Will skip over out-of-order missing migrations, you can only use this parameter with C(migrate) command.
|
||||
required: false
|
||||
type: bool
|
||||
merge:
|
||||
description:
|
||||
- Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this
|
||||
parameter with C(migrate) command.
|
||||
required: false
|
||||
type: bool
|
||||
link:
|
||||
description:
|
||||
- Will create links to the files instead of copying them, you can only use this parameter with
|
||||
C(collectstatic) command.
|
||||
required: false
|
||||
type: bool
|
||||
testrunner:
|
||||
description:
|
||||
- Controls the test runner class that is used to execute tests.
|
||||
- This parameter is passed as-is to C(manage.py).
|
||||
type: str
|
||||
required: false
|
||||
aliases: [test_runner]
|
||||
ack_venv_creation_deprecation:
|
||||
description:
|
||||
- >-
|
||||
When a I(virtualenv) is set but the virtual environment does not exist, the current behavior is
|
||||
to create a new virtual environment. That behavior is deprecated and if that case happens it will
|
||||
generate a deprecation warning. Set this flag to C(true) to suppress the deprecation warning.
|
||||
- Please note that you will receive no further warning about this being removed until the module
|
||||
will start failing in such cases from community.general 9.0.0 on.
|
||||
type: bool
|
||||
version_added: 5.8.0
|
||||
|
||||
notes:
|
||||
- >
|
||||
B(ATTENTION - DEPRECATION): Support for Django releases older than 4.1 will be removed in
|
||||
community.general version 9.0.0 (estimated to be released in May 2024).
|
||||
Please notice that Django 4.1 requires Python 3.8 or greater.
|
||||
- C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the I(virtualenv) parameter
|
||||
is specified. This requirement is deprecated and will be removed in community.general version 9.0.0.
|
||||
- This module will create a virtualenv if the I(virtualenv) parameter is specified and a virtual environment does not already
|
||||
exist at the given location. This behavior is deprecated and will be removed in community.general version 9.0.0.
|
||||
- The parameter I(virtualenv) will remain in use, but it will require the specified virtualenv to exist.
|
||||
The recommended way to create one in Ansible is by using M(ansible.builtin.pip).
|
||||
- This module assumes English error messages for the C(createcachetable) command to detect table existence,
|
||||
unfortunately.
|
||||
- To be able to use the C(migrate) command with django versions < 1.7, you must have C(south) installed and added
|
||||
as an app in your settings.
|
||||
- To be able to use the C(collectstatic) command, you must have enabled staticfiles in your settings.
|
||||
- Your C(manage.py) application must be executable (rwxr-xr-x), and must have a valid shebang,
|
||||
i.e. C(#!/usr/bin/env python), for invoking the appropriate Python interpreter.
|
||||
seealso:
|
||||
- name: django-admin and manage.py Reference
|
||||
description: Reference for C(django-admin) or C(manage.py) commands.
|
||||
link: https://docs.djangoproject.com/en/4.1/ref/django-admin/
|
||||
- name: Django Download page
|
||||
description: The page showing how to get Django and the timeline of supported releases.
|
||||
link: https://www.djangoproject.com/download/
|
||||
- name: What Python version can I use with Django?
|
||||
description: From the Django FAQ, the response to Python requirements for the framework.
|
||||
link: https://docs.djangoproject.com/en/dev/faq/install/#what-python-version-can-i-use-with-django
|
||||
requirements: [ "virtualenv", "django" ]
|
||||
author:
|
||||
- Alexei Znamensky (@russoz)
|
||||
- Scott Anderson (@tastychutney)
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Run cleanup on the application installed in django_dir
|
||||
community.general.django_manage:
|
||||
command: cleanup
|
||||
project_path: "{{ django_dir }}"
|
||||
|
||||
- name: Load the initial_data fixture into the application
|
||||
community.general.django_manage:
|
||||
command: loaddata
|
||||
project_path: "{{ django_dir }}"
|
||||
fixtures: "{{ initial_data }}"
|
||||
|
||||
- name: Run syncdb on the application
|
||||
community.general.django_manage:
|
||||
command: syncdb
|
||||
project_path: "{{ django_dir }}"
|
||||
settings: "{{ settings_app_name }}"
|
||||
pythonpath: "{{ settings_dir }}"
|
||||
virtualenv: "{{ virtualenv_dir }}"
|
||||
|
||||
- name: Run the SmokeTest test case from the main app. Useful for testing deploys
|
||||
community.general.django_manage:
|
||||
command: test
|
||||
project_path: "{{ django_dir }}"
|
||||
apps: main.SmokeTest
|
||||
|
||||
- name: Create an initial superuser
|
||||
community.general.django_manage:
|
||||
command: "createsuperuser --noinput --username=admin --email=admin@example.com"
|
||||
project_path: "{{ django_dir }}"
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import shlex
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def _fail(module, cmd, out, err, **kwargs):
|
||||
msg = ''
|
||||
if out:
|
||||
msg += "stdout: %s" % (out, )
|
||||
if err:
|
||||
msg += "\n:stderr: %s" % (err, )
|
||||
module.fail_json(cmd=cmd, msg=msg, **kwargs)
|
||||
|
||||
|
||||
def _ensure_virtualenv(module):
|
||||
|
||||
venv_param = module.params['virtualenv']
|
||||
if venv_param is None:
|
||||
return
|
||||
|
||||
vbin = os.path.join(venv_param, 'bin')
|
||||
activate = os.path.join(vbin, 'activate')
|
||||
|
||||
if not os.path.exists(activate):
|
||||
# In version 9.0.0, if the venv is not found, it should fail_json() here.
|
||||
if not module.params['ack_venv_creation_deprecation']:
|
||||
module.deprecate(
|
||||
'The behavior of "creating the virtual environment when missing" is being '
|
||||
'deprecated and will be removed in community.general version 9.0.0. '
|
||||
'Set the module parameter `ack_venv_creation_deprecation: true` to '
|
||||
'prevent this message from showing up when creating a virtualenv.',
|
||||
version='9.0.0',
|
||||
collection_name='community.general',
|
||||
)
|
||||
|
||||
virtualenv = module.get_bin_path('virtualenv', True)
|
||||
vcmd = [virtualenv, venv_param]
|
||||
rc, out_venv, err_venv = module.run_command(vcmd)
|
||||
if rc != 0:
|
||||
_fail(module, vcmd, out_venv, err_venv)
|
||||
|
||||
os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
|
||||
os.environ["VIRTUAL_ENV"] = venv_param
|
||||
|
||||
|
||||
def createcachetable_check_changed(output):
|
||||
return "already exists" not in output
|
||||
|
||||
|
||||
def flush_filter_output(line):
|
||||
return "Installed" in line and "Installed 0 object" not in line
|
||||
|
||||
|
||||
def loaddata_filter_output(line):
|
||||
return "Installed" in line and "Installed 0 object" not in line
|
||||
|
||||
|
||||
def syncdb_filter_output(line):
|
||||
return ("Creating table " in line) \
|
||||
or ("Installed" in line and "Installed 0 object" not in line)
|
||||
|
||||
|
||||
def migrate_filter_output(line):
|
||||
return ("Migrating forwards " in line) \
|
||||
or ("Installed" in line and "Installed 0 object" not in line) \
|
||||
or ("Applying" in line)
|
||||
|
||||
|
||||
def collectstatic_filter_output(line):
|
||||
return line and "0 static files" not in line
|
||||
|
||||
|
||||
def main():
|
||||
command_allowed_param_map = dict(
|
||||
cleanup=(),
|
||||
createcachetable=('cache_table', 'database', ),
|
||||
flush=('database', ),
|
||||
loaddata=('database', 'fixtures', ),
|
||||
syncdb=('database', ),
|
||||
test=('failfast', 'testrunner', 'apps', ),
|
||||
validate=(),
|
||||
migrate=('apps', 'skip', 'merge', 'database',),
|
||||
collectstatic=('clear', 'link', ),
|
||||
)
|
||||
|
||||
command_required_param_map = dict(
|
||||
loaddata=('fixtures', ),
|
||||
)
|
||||
|
||||
# forces --noinput on every command that needs it
|
||||
noinput_commands = (
|
||||
'flush',
|
||||
'syncdb',
|
||||
'migrate',
|
||||
'test',
|
||||
'collectstatic',
|
||||
)
|
||||
|
||||
# These params are allowed for certain commands only
|
||||
specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'testrunner')
|
||||
|
||||
# These params are automatically added to the command if present
|
||||
general_params = ('settings', 'pythonpath', 'database',)
|
||||
specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
|
||||
end_of_command_params = ('apps', 'cache_table', 'fixtures')
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
command=dict(required=True, type='str'),
|
||||
project_path=dict(required=True, type='path', aliases=['app_path', 'chdir']),
|
||||
settings=dict(type='path'),
|
||||
pythonpath=dict(type='path', aliases=['python_path']),
|
||||
virtualenv=dict(type='path', aliases=['virtual_env']),
|
||||
|
||||
apps=dict(),
|
||||
cache_table=dict(type='str'),
|
||||
clear=dict(default=False, type='bool'),
|
||||
database=dict(type='str'),
|
||||
failfast=dict(default=False, type='bool', aliases=['fail_fast']),
|
||||
fixtures=dict(type='str'),
|
||||
testrunner=dict(type='str', aliases=['test_runner']),
|
||||
skip=dict(type='bool'),
|
||||
merge=dict(type='bool'),
|
||||
link=dict(type='bool'),
|
||||
ack_venv_creation_deprecation=dict(type='bool'),
|
||||
),
|
||||
)
|
||||
|
||||
command_split = shlex.split(module.params['command'])
|
||||
command_bin = command_split[0]
|
||||
project_path = module.params['project_path']
|
||||
virtualenv = module.params['virtualenv']
|
||||
|
||||
try:
|
||||
_deprecation = dict(
|
||||
cleanup="clearsessions",
|
||||
syncdb="migrate",
|
||||
validate="check",
|
||||
)
|
||||
module.deprecate(
|
||||
'The command {0} has been deprecated as it is no longer supported in recent Django versions.'
|
||||
'Please use the command {1} instead that provide similar capability.'.format(command_bin, _deprecation[command_bin]),
|
||||
version='9.0.0',
|
||||
collection_name='community.general'
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
for param in specific_params:
|
||||
value = module.params[param]
|
||||
if value and param not in command_allowed_param_map[command_bin]:
|
||||
module.fail_json(msg='%s param is incompatible with command=%s' % (param, command_bin))
|
||||
|
||||
for param in command_required_param_map.get(command_bin, ()):
|
||||
if not module.params[param]:
|
||||
module.fail_json(msg='%s param is required for command=%s' % (param, command_bin))
|
||||
|
||||
_ensure_virtualenv(module)
|
||||
|
||||
run_cmd_args = ["./manage.py"] + command_split
|
||||
|
||||
if command_bin in noinput_commands and '--noinput' not in command_split:
|
||||
run_cmd_args.append("--noinput")
|
||||
|
||||
for param in general_params:
|
||||
if module.params[param]:
|
||||
run_cmd_args.append('--%s=%s' % (param, module.params[param]))
|
||||
|
||||
for param in specific_boolean_params:
|
||||
if module.params[param]:
|
||||
run_cmd_args.append('--%s' % param)
|
||||
|
||||
# these params always get tacked on the end of the command
|
||||
for param in end_of_command_params:
|
||||
if module.params[param]:
|
||||
if param in ('fixtures', 'apps'):
|
||||
run_cmd_args.extend(shlex.split(module.params[param]))
|
||||
else:
|
||||
run_cmd_args.append(module.params[param])
|
||||
|
||||
rc, out, err = module.run_command(run_cmd_args, cwd=project_path)
|
||||
if rc != 0:
|
||||
if command_bin == 'createcachetable' and 'table' in err and 'already exists' in err:
|
||||
out = 'already exists.'
|
||||
else:
|
||||
if "Unknown command:" in err:
|
||||
_fail(module, run_cmd_args, err, "Unknown django command: %s" % command_bin)
|
||||
_fail(module, run_cmd_args, out, err, path=os.environ["PATH"], syspath=sys.path)
|
||||
|
||||
changed = False
|
||||
|
||||
lines = out.split('\n')
|
||||
filt = globals().get(command_bin + "_filter_output", None)
|
||||
if filt:
|
||||
filtered_output = list(filter(filt, lines))
|
||||
if len(filtered_output):
|
||||
changed = True
|
||||
check_changed = globals().get("{0}_check_changed".format(command_bin), None)
|
||||
if check_changed:
|
||||
changed = check_changed(out)
|
||||
|
||||
module.exit_json(changed=changed, out=out, cmd=run_cmd_args, app_path=project_path, project_path=project_path,
|
||||
virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,348 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2021, Roberto Moreda <moreda@allenta.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: dnf_versionlock
|
||||
version_added: '4.0.0'
|
||||
short_description: Locks package versions in C(dnf) based systems
|
||||
description:
|
||||
- Locks package versions using the C(versionlock) plugin in C(dnf) based
|
||||
systems. This plugin takes a set of name and versions for packages and
|
||||
excludes all other versions of those packages. This allows you to for example
|
||||
protect packages from being updated by newer versions. The state of the
|
||||
plugin that reflects locking of packages is the C(locklist).
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Package name spec to add or exclude to or delete from the C(locklist)
|
||||
using the format expected by the C(dnf repoquery) command.
|
||||
- This parameter is mutually exclusive with I(state=clean).
|
||||
type: list
|
||||
required: false
|
||||
elements: str
|
||||
default: []
|
||||
raw:
|
||||
description:
|
||||
- Do not resolve package name specs to NEVRAs to find specific version
|
||||
to lock to. Instead the package name specs are used as they are. This
|
||||
enables locking to not yet available versions of the package.
|
||||
type: bool
|
||||
default: false
|
||||
state:
|
||||
description:
|
||||
- Whether to add (C(present) or C(excluded)) to or remove (C(absent) or
|
||||
C(clean)) from the C(locklist).
|
||||
- C(present) will add a package name spec to the C(locklist). If there is a
|
||||
installed package that matches, then only that version will be added.
|
||||
Otherwise, all available package versions will be added.
|
||||
- C(excluded) will add a package name spec as excluded to the
|
||||
C(locklist). It means that packages represented by the package name
|
||||
spec will be excluded from transaction operations. All available
|
||||
package versions will be added.
|
||||
- C(absent) will delete entries in the C(locklist) that match the
|
||||
package name spec.
|
||||
- C(clean) will delete all entries in the C(locklist). This option is
|
||||
mutually exclusive with C(name).
|
||||
choices: [ 'absent', 'clean', 'excluded', 'present' ]
|
||||
type: str
|
||||
default: present
|
||||
notes:
|
||||
- The logics of the C(versionlock) plugin for corner cases could be
|
||||
confusing, so please take in account that this module will do its best to
|
||||
give a C(check_mode) prediction on what is going to happen. In case of
|
||||
doubt, check the documentation of the plugin.
|
||||
- Sometimes the module could predict changes in C(check_mode) that will not
|
||||
be such because C(versionlock) concludes that there is already a entry in
|
||||
C(locklist) that already matches.
|
||||
- In an ideal world, the C(versionlock) plugin would have a dry-run option to
|
||||
know for sure what is going to happen. So far we have to work with a best
|
||||
guess as close as possible to the behaviour inferred from its code.
|
||||
- For most of cases where you want to lock and unlock specific versions of a
|
||||
package, this works fairly well.
|
||||
- Supports C(check_mode).
|
||||
requirements:
|
||||
- dnf
|
||||
- dnf-plugin-versionlock
|
||||
author:
|
||||
- Roberto Moreda (@moreda) <moreda@allenta.com>
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Prevent installed nginx from being updated
|
||||
community.general.dnf_versionlock:
|
||||
name: nginx
|
||||
state: present
|
||||
|
||||
- name: Prevent multiple packages from being updated
|
||||
community.general.dnf_versionlock:
|
||||
name:
|
||||
- nginx
|
||||
- haproxy
|
||||
state: present
|
||||
|
||||
- name: Remove lock from nginx to be updated again
|
||||
community.general.dnf_versionlock:
|
||||
package: nginx
|
||||
state: absent
|
||||
|
||||
- name: Exclude bind 32:9.11 from installs or updates
|
||||
community.general.dnf_versionlock:
|
||||
package: bind-32:9.11*
|
||||
state: excluded
|
||||
|
||||
- name: Keep bash package in major version 4
|
||||
community.general.dnf_versionlock:
|
||||
name: bash-0:4.*
|
||||
raw: true
|
||||
state: present
|
||||
|
||||
- name: Delete all entries in the locklist of versionlock
|
||||
community.general.dnf_versionlock:
|
||||
state: clean
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
locklist_pre:
|
||||
description: Locklist before module execution.
|
||||
returned: success
|
||||
type: list
|
||||
elements: str
|
||||
sample: [ 'bash-0:4.4.20-1.el8_4.*', '!bind-32:9.11.26-4.el8_4.*' ]
|
||||
locklist_post:
|
||||
description: Locklist after module execution.
|
||||
returned: success and (not check mode or state is clean)
|
||||
type: list
|
||||
elements: str
|
||||
sample: [ 'bash-0:4.4.20-1.el8_4.*' ]
|
||||
specs_toadd:
|
||||
description: Package name specs meant to be added by versionlock.
|
||||
returned: success
|
||||
type: list
|
||||
elements: str
|
||||
sample: [ 'bash' ]
|
||||
specs_todelete:
|
||||
description: Package name specs meant to be deleted by versionlock.
|
||||
returned: success
|
||||
type: list
|
||||
elements: str
|
||||
sample: [ 'bind' ]
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
import fnmatch
|
||||
import os
|
||||
import re
|
||||
|
||||
DNF_BIN = "/usr/bin/dnf"
|
||||
VERSIONLOCK_CONF = "/etc/dnf/plugins/versionlock.conf"
|
||||
# NEVRA regex.
|
||||
NEVRA_RE = re.compile(r"^(?P<name>.+)-(?P<epoch>\d+):(?P<version>.+)-"
|
||||
r"(?P<release>.+)\.(?P<arch>.+)$")
|
||||
|
||||
|
||||
def do_versionlock(module, command, patterns=None, raw=False):
|
||||
patterns = [] if not patterns else patterns
|
||||
raw_parameter = ["--raw"] if raw else []
|
||||
# Call dnf versionlock using a just one full NEVR package-name-spec each
|
||||
# time because multiple package-name-spec and globs are not well supported.
|
||||
#
|
||||
# This is a workaround for two alleged bugs in the dnf versionlock plugin:
|
||||
# * Multiple package-name-spec arguments don't lock correctly
|
||||
# (https://bugzilla.redhat.com/show_bug.cgi?id=2013324).
|
||||
# * Locking a version of a not installed package disallows locking other
|
||||
# versions later (https://bugzilla.redhat.com/show_bug.cgi?id=2013332)
|
||||
#
|
||||
# NOTE: This is suboptimal in terms of performance if there are more than a
|
||||
# few package-name-spec patterns to lock, because there is a command
|
||||
# execution per each. This will improve by changing the strategy once the
|
||||
# mentioned alleged bugs in the dnf versionlock plugin are fixed.
|
||||
if patterns:
|
||||
outs = []
|
||||
for p in patterns:
|
||||
rc, out, err = module.run_command(
|
||||
[DNF_BIN, "-q", "versionlock", command] + raw_parameter + [p],
|
||||
check_rc=True)
|
||||
outs.append(out)
|
||||
out = "\n".join(outs)
|
||||
else:
|
||||
rc, out, err = module.run_command(
|
||||
[DNF_BIN, "-q", "versionlock", command], check_rc=True)
|
||||
return out
|
||||
|
||||
|
||||
# This is equivalent to the _match function of the versionlock plugin.
|
||||
def match(entry, pattern):
|
||||
entry = entry.lstrip('!')
|
||||
if entry == pattern:
|
||||
return True
|
||||
m = NEVRA_RE.match(entry)
|
||||
if not m:
|
||||
return False
|
||||
for name in (
|
||||
'%s' % m["name"],
|
||||
'%s.%s' % (m["name"], m["arch"]),
|
||||
'%s-%s' % (m["name"], m["version"]),
|
||||
'%s-%s-%s' % (m["name"], m["version"], m["release"]),
|
||||
'%s-%s:%s' % (m["name"], m["epoch"], m["version"]),
|
||||
'%s-%s-%s.%s' % (m["name"], m["version"], m["release"], m["arch"]),
|
||||
'%s-%s:%s-%s' % (m["name"], m["epoch"], m["version"], m["release"]),
|
||||
'%s:%s-%s-%s.%s' % (m["epoch"], m["name"], m["version"], m["release"],
|
||||
m["arch"]),
|
||||
'%s-%s:%s-%s.%s' % (m["name"], m["epoch"], m["version"], m["release"],
|
||||
m["arch"])
|
||||
):
|
||||
if fnmatch.fnmatch(name, pattern):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_packages(module, patterns, only_installed=False):
|
||||
packages_available_map_name_evrs = {}
|
||||
rc, out, err = module.run_command(
|
||||
[DNF_BIN, "-q", "repoquery"] +
|
||||
(["--installed"] if only_installed else []) +
|
||||
patterns,
|
||||
check_rc=True)
|
||||
|
||||
for p in out.split():
|
||||
# Extract the NEVRA pattern.
|
||||
m = NEVRA_RE.match(p)
|
||||
if not m:
|
||||
module.fail_json(
|
||||
msg="failed to parse nevra for %s" % p,
|
||||
rc=rc, out=out, err=err)
|
||||
|
||||
evr = "%s:%s-%s" % (m["epoch"],
|
||||
m["version"],
|
||||
m["release"])
|
||||
|
||||
packages_available_map_name_evrs.setdefault(m["name"], set())
|
||||
packages_available_map_name_evrs[m["name"]].add(evr)
|
||||
return packages_available_map_name_evrs
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type="list", elements="str", default=[]),
|
||||
raw=dict(type="bool", default=False),
|
||||
state=dict(type="str", default="present",
|
||||
choices=["present", "absent", "excluded", "clean"]),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
patterns = module.params["name"]
|
||||
raw = module.params["raw"]
|
||||
state = module.params["state"]
|
||||
changed = False
|
||||
msg = ""
|
||||
|
||||
# Check module pre-requisites.
|
||||
if not os.path.exists(DNF_BIN):
|
||||
module.fail_json(msg="%s was not found" % DNF_BIN)
|
||||
if not os.path.exists(VERSIONLOCK_CONF):
|
||||
module.fail_json(msg="plugin versionlock is required")
|
||||
|
||||
# Check incompatible options.
|
||||
if state == "clean" and patterns:
|
||||
module.fail_json(msg="clean state is incompatible with a name list")
|
||||
if state != "clean" and not patterns:
|
||||
module.fail_json(msg="name list is required for %s state" % state)
|
||||
|
||||
locklist_pre = do_versionlock(module, "list").split()
|
||||
|
||||
specs_toadd = []
|
||||
specs_todelete = []
|
||||
|
||||
if state in ["present", "excluded"]:
|
||||
|
||||
if raw:
|
||||
# Add raw patterns as specs to add.
|
||||
for p in patterns:
|
||||
if ((p if state == "present" else "!" + p)
|
||||
not in locklist_pre):
|
||||
specs_toadd.append(p)
|
||||
else:
|
||||
# Get available packages that match the patterns.
|
||||
packages_map_name_evrs = get_packages(
|
||||
module,
|
||||
patterns)
|
||||
|
||||
# Get installed packages that match the patterns.
|
||||
packages_installed_map_name_evrs = get_packages(
|
||||
module,
|
||||
patterns,
|
||||
only_installed=True)
|
||||
|
||||
# Obtain the list of package specs that require an entry in the
|
||||
# locklist. This list is composed by:
|
||||
# a) the non-installed packages list with all available
|
||||
# versions
|
||||
# b) the installed packages list
|
||||
packages_map_name_evrs.update(packages_installed_map_name_evrs)
|
||||
for name in packages_map_name_evrs:
|
||||
for evr in packages_map_name_evrs[name]:
|
||||
locklist_entry = "%s-%s.*" % (name, evr)
|
||||
|
||||
if (locklist_entry if state == "present"
|
||||
else "!%s" % locklist_entry) not in locklist_pre:
|
||||
specs_toadd.append(locklist_entry)
|
||||
|
||||
if specs_toadd and not module.check_mode:
|
||||
cmd = "add" if state == "present" else "exclude"
|
||||
msg = do_versionlock(module, cmd, patterns=specs_toadd, raw=raw)
|
||||
|
||||
elif state == "absent":
|
||||
|
||||
if raw:
|
||||
# Add raw patterns as specs to delete.
|
||||
for p in patterns:
|
||||
if p in locklist_pre:
|
||||
specs_todelete.append(p)
|
||||
|
||||
else:
|
||||
# Get patterns that match the some line in the locklist.
|
||||
for p in patterns:
|
||||
for e in locklist_pre:
|
||||
if match(e, p):
|
||||
specs_todelete.append(p)
|
||||
|
||||
if specs_todelete and not module.check_mode:
|
||||
msg = do_versionlock(
|
||||
module, "delete", patterns=specs_todelete, raw=raw)
|
||||
|
||||
elif state == "clean":
|
||||
specs_todelete = locklist_pre
|
||||
|
||||
if specs_todelete and not module.check_mode:
|
||||
msg = do_versionlock(module, "clear")
|
||||
|
||||
if specs_toadd or specs_todelete:
|
||||
changed = True
|
||||
|
||||
response = {
|
||||
"changed": changed,
|
||||
"msg": msg,
|
||||
"locklist_pre": locklist_pre,
|
||||
"specs_toadd": specs_toadd,
|
||||
"specs_todelete": specs_todelete
|
||||
}
|
||||
if not module.check_mode:
|
||||
response["locklist_post"] = do_versionlock(module, "list").split()
|
||||
else:
|
||||
if state == "clean":
|
||||
response["locklist_post"] = []
|
||||
|
||||
module.exit_json(**response)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,427 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright Ansible Project
|
||||
#
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: dnsimple
|
||||
short_description: Interface with dnsimple.com (a DNS hosting service)
|
||||
description:
|
||||
- "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)."
|
||||
options:
|
||||
account_email:
|
||||
description:
|
||||
- Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for.
|
||||
- "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)."
|
||||
- "C(.dnsimple) config files are only supported in dnsimple-python<2.0.0"
|
||||
type: str
|
||||
account_api_token:
|
||||
description:
|
||||
- Account API token. See I(account_email) for more information.
|
||||
type: str
|
||||
domain:
|
||||
description:
|
||||
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple.
|
||||
- If omitted, a list of domains will be returned.
|
||||
- If domain is present but the domain doesn't exist, it will be created.
|
||||
type: str
|
||||
record:
|
||||
description:
|
||||
- Record to add, if blank a record for the domain will be created, supports the wildcard (*).
|
||||
type: str
|
||||
record_ids:
|
||||
description:
|
||||
- List of records to ensure they either exist or do not exist.
|
||||
type: list
|
||||
elements: str
|
||||
type:
|
||||
description:
|
||||
- The type of DNS record to create.
|
||||
choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL', 'CAA' ]
|
||||
type: str
|
||||
ttl:
|
||||
description:
|
||||
- The TTL to give the new record in seconds.
|
||||
default: 3600
|
||||
type: int
|
||||
value:
|
||||
description:
|
||||
- Record value.
|
||||
- Must be specified when trying to ensure a record exists.
|
||||
type: str
|
||||
priority:
|
||||
description:
|
||||
- Record priority.
|
||||
type: int
|
||||
state:
|
||||
description:
|
||||
- whether the record should exist or not.
|
||||
choices: [ 'present', 'absent' ]
|
||||
default: present
|
||||
type: str
|
||||
solo:
|
||||
description:
|
||||
- Whether the record should be the only one for that record type and record name.
|
||||
- Only use with C(state) is set to C(present) on a record.
|
||||
type: 'bool'
|
||||
default: false
|
||||
sandbox:
|
||||
description:
|
||||
- Use the DNSimple sandbox environment.
|
||||
- Requires a dedicated account in the dnsimple sandbox environment.
|
||||
- Check U(https://developer.dnsimple.com/sandbox/) for more information.
|
||||
type: 'bool'
|
||||
default: false
|
||||
version_added: 3.5.0
|
||||
requirements:
|
||||
- "dnsimple >= 2.0.0"
|
||||
author: "Alex Coomans (@drcapulet)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Authenticate using email and API token and fetch all domains
|
||||
community.general.dnsimple:
|
||||
account_email: test@example.com
|
||||
account_api_token: dummyapitoken
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Delete a domain
|
||||
community.general.dnsimple:
|
||||
domain: my.com
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create a test.my.com A record to point to 127.0.0.1
|
||||
community.general.dnsimple:
|
||||
domain: my.com
|
||||
record: test
|
||||
type: A
|
||||
value: 127.0.0.1
|
||||
delegate_to: localhost
|
||||
register: record
|
||||
|
||||
- name: Delete record using record_ids
|
||||
community.general.dnsimple:
|
||||
domain: my.com
|
||||
record_ids: '{{ record["id"] }}'
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create a my.com CNAME record to example.com
|
||||
community.general.dnsimple:
|
||||
domain: my.com
|
||||
record: ''
|
||||
type: CNAME
|
||||
value: example.com
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Change TTL value for a record
|
||||
community.general.dnsimple:
|
||||
domain: my.com
|
||||
record: ''
|
||||
type: CNAME
|
||||
value: example.com
|
||||
ttl: 600
|
||||
state: present
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Delete the record
|
||||
community.general.dnsimple:
|
||||
domain: my.com
|
||||
record: ''
|
||||
type: CNAME
|
||||
value: example.com
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = r"""# """
|
||||
|
||||
import traceback
|
||||
import re
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
|
||||
class DNSimpleV2():
|
||||
"""class which uses dnsimple-python >= 2"""
|
||||
|
||||
def __init__(self, account_email, account_api_token, sandbox, module):
|
||||
"""init"""
|
||||
self.module = module
|
||||
self.account_email = account_email
|
||||
self.account_api_token = account_api_token
|
||||
self.sandbox = sandbox
|
||||
self.pagination_per_page = 30
|
||||
self.dnsimple_client()
|
||||
self.dnsimple_account()
|
||||
|
||||
def dnsimple_client(self):
|
||||
"""creates a dnsimple client object"""
|
||||
if self.account_email and self.account_api_token:
|
||||
client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token)
|
||||
else:
|
||||
msg = "Option account_email or account_api_token not provided. " \
|
||||
"Dnsimple authentiction with a .dnsimple config file is not " \
|
||||
"supported with dnsimple-python>=2.0.0"
|
||||
raise DNSimpleException(msg)
|
||||
client.identity.whoami()
|
||||
self.client = client
|
||||
|
||||
def dnsimple_account(self):
|
||||
"""select a dnsimple account. If a user token is used for authentication,
|
||||
this user must only have access to a single account"""
|
||||
account = self.client.identity.whoami().data.account
|
||||
# user supplied a user token instead of account api token
|
||||
if not account:
|
||||
accounts = Accounts(self.client).list_accounts().data
|
||||
if len(accounts) != 1:
|
||||
msg = "The provided dnsimple token is a user token with multiple accounts." \
|
||||
"Use an account token or a user token with access to a single account." \
|
||||
"See https://support.dnsimple.com/articles/api-access-token/"
|
||||
raise DNSimpleException(msg)
|
||||
account = accounts[0]
|
||||
self.account = account
|
||||
|
||||
def get_all_domains(self):
|
||||
"""returns a list of all domains"""
|
||||
domain_list = self._get_paginated_result(self.client.domains.list_domains, account_id=self.account.id)
|
||||
return [d.__dict__ for d in domain_list]
|
||||
|
||||
def get_domain(self, domain):
|
||||
"""returns a single domain by name or id"""
|
||||
try:
|
||||
dr = self.client.domains.get_domain(self.account.id, domain).data.__dict__
|
||||
except DNSimpleException as e:
|
||||
exception_string = str(e.message)
|
||||
if re.match(r"^Domain .+ not found$", exception_string):
|
||||
dr = None
|
||||
else:
|
||||
raise
|
||||
return dr
|
||||
|
||||
def create_domain(self, domain):
|
||||
"""create a single domain"""
|
||||
return self.client.domains.create_domain(self.account.id, domain).data.__dict__
|
||||
|
||||
def delete_domain(self, domain):
|
||||
"""delete a single domain"""
|
||||
self.client.domains.delete_domain(self.account.id, domain)
|
||||
|
||||
def get_records(self, zone, dnsimple_filter=None):
|
||||
"""return dns ressource records which match a specified filter"""
|
||||
records_list = self._get_paginated_result(self.client.zones.list_records,
|
||||
account_id=self.account.id,
|
||||
zone=zone, filter=dnsimple_filter)
|
||||
return [d.__dict__ for d in records_list]
|
||||
|
||||
def delete_record(self, domain, rid):
|
||||
"""delete a single dns ressource record"""
|
||||
self.client.zones.delete_record(self.account.id, domain, rid)
|
||||
|
||||
def update_record(self, domain, rid, ttl=None, priority=None):
|
||||
"""update a single dns ressource record"""
|
||||
zr = ZoneRecordUpdateInput(ttl=ttl, priority=priority)
|
||||
result = self.client.zones.update_record(self.account.id, str(domain), str(rid), zr).data.__dict__
|
||||
return result
|
||||
|
||||
def create_record(self, domain, name, record_type, content, ttl=None, priority=None):
|
||||
"""create a single dns ressource record"""
|
||||
zr = ZoneRecordInput(name=name, type=record_type, content=content, ttl=ttl, priority=priority)
|
||||
return self.client.zones.create_record(self.account.id, str(domain), zr).data.__dict__
|
||||
|
||||
def _get_paginated_result(self, operation, **options):
|
||||
"""return all results of a paginated api response"""
|
||||
records_pagination = operation(per_page=self.pagination_per_page, **options).pagination
|
||||
result_list = []
|
||||
for page in range(1, records_pagination.total_pages + 1):
|
||||
page_data = operation(per_page=self.pagination_per_page, page=page, **options).data
|
||||
result_list.extend(page_data)
|
||||
return result_list
|
||||
|
||||
|
||||
DNSIMPLE_IMP_ERR = []
|
||||
HAS_DNSIMPLE = False
|
||||
try:
|
||||
# try to import dnsimple >= 2.0.0
|
||||
from dnsimple import Client, DNSimpleException
|
||||
from dnsimple.service import Accounts
|
||||
from dnsimple.version import version as dnsimple_version
|
||||
from dnsimple.struct.zone_record import ZoneRecordUpdateInput, ZoneRecordInput
|
||||
HAS_DNSIMPLE = True
|
||||
except ImportError:
|
||||
DNSIMPLE_IMP_ERR.append(traceback.format_exc())
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
account_email=dict(type='str', fallback=(env_fallback, ['DNSIMPLE_EMAIL'])),
|
||||
account_api_token=dict(type='str',
|
||||
no_log=True,
|
||||
fallback=(env_fallback, ['DNSIMPLE_API_TOKEN'])),
|
||||
domain=dict(type='str'),
|
||||
record=dict(type='str'),
|
||||
record_ids=dict(type='list', elements='str'),
|
||||
type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF',
|
||||
'URL', 'TXT', 'NS', 'SRV', 'NAPTR',
|
||||
'PTR', 'AAAA', 'SSHFP', 'HINFO',
|
||||
'POOL', 'CAA']),
|
||||
ttl=dict(type='int', default=3600),
|
||||
value=dict(type='str'),
|
||||
priority=dict(type='int'),
|
||||
state=dict(type='str', choices=['present', 'absent'], default='present'),
|
||||
solo=dict(type='bool', default=False),
|
||||
sandbox=dict(type='bool', default=False),
|
||||
),
|
||||
required_together=[
|
||||
['record', 'value']
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not HAS_DNSIMPLE:
|
||||
module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR[0])
|
||||
|
||||
account_email = module.params.get('account_email')
|
||||
account_api_token = module.params.get('account_api_token')
|
||||
domain = module.params.get('domain')
|
||||
record = module.params.get('record')
|
||||
record_ids = module.params.get('record_ids')
|
||||
record_type = module.params.get('type')
|
||||
ttl = module.params.get('ttl')
|
||||
value = module.params.get('value')
|
||||
priority = module.params.get('priority')
|
||||
state = module.params.get('state')
|
||||
is_solo = module.params.get('solo')
|
||||
sandbox = module.params.get('sandbox')
|
||||
|
||||
DNSIMPLE_MAJOR_VERSION = LooseVersion(dnsimple_version).version[0]
|
||||
|
||||
try:
|
||||
if DNSIMPLE_MAJOR_VERSION < 2:
|
||||
module.fail_json(
|
||||
msg='Support for python-dnsimple < 2 has been removed in community.general 5.0.0. Update python-dnsimple to version >= 2.0.0.')
|
||||
ds = DNSimpleV2(account_email, account_api_token, sandbox, module)
|
||||
# Let's figure out what operation we want to do
|
||||
# No domain, return a list
|
||||
if not domain:
|
||||
all_domains = ds.get_all_domains()
|
||||
module.exit_json(changed=False, result=all_domains)
|
||||
|
||||
# Domain & No record
|
||||
if record is None and not record_ids:
|
||||
if domain.isdigit():
|
||||
typed_domain = int(domain)
|
||||
else:
|
||||
typed_domain = str(domain)
|
||||
dr = ds.get_domain(typed_domain)
|
||||
# domain does not exist
|
||||
if state == 'present':
|
||||
if dr:
|
||||
module.exit_json(changed=False, result=dr)
|
||||
else:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
response = ds.create_domain(domain)
|
||||
module.exit_json(changed=True, result=response)
|
||||
# state is absent
|
||||
else:
|
||||
if dr:
|
||||
if not module.check_mode:
|
||||
ds.delete_domain(domain)
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
# need the not none check since record could be an empty string
|
||||
if record is not None:
|
||||
if not record_type:
|
||||
module.fail_json(msg="Missing the record type")
|
||||
if not value:
|
||||
module.fail_json(msg="Missing the record value")
|
||||
|
||||
records_list = ds.get_records(domain, dnsimple_filter={'name': record})
|
||||
rr = next((r for r in records_list if r['name'] == record and r['type'] == record_type and r['content'] == value), None)
|
||||
if state == 'present':
|
||||
changed = False
|
||||
if is_solo:
|
||||
# delete any records that have the same name and record type
|
||||
same_type = [r['id'] for r in records_list if r['name'] == record and r['type'] == record_type]
|
||||
if rr:
|
||||
same_type = [rid for rid in same_type if rid != rr['id']]
|
||||
if same_type:
|
||||
if not module.check_mode:
|
||||
for rid in same_type:
|
||||
ds.delete_record(domain, rid)
|
||||
changed = True
|
||||
if rr:
|
||||
# check if we need to update
|
||||
if rr['ttl'] != ttl or rr['priority'] != priority:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
response = ds.update_record(domain, rr['id'], ttl, priority)
|
||||
module.exit_json(changed=True, result=response)
|
||||
else:
|
||||
module.exit_json(changed=changed, result=rr)
|
||||
else:
|
||||
# create it
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
response = ds.create_record(domain, record, record_type, value, ttl, priority)
|
||||
module.exit_json(changed=True, result=response)
|
||||
# state is absent
|
||||
else:
|
||||
if rr:
|
||||
if not module.check_mode:
|
||||
ds.delete_record(domain, rr['id'])
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
# Make sure these record_ids either all exist or none
|
||||
if record_ids:
|
||||
current_records = ds.get_records(domain, dnsimple_filter=None)
|
||||
current_record_ids = [str(d['id']) for d in current_records]
|
||||
wanted_record_ids = [str(r) for r in record_ids]
|
||||
if state == 'present':
|
||||
difference = list(set(wanted_record_ids) - set(current_record_ids))
|
||||
if difference:
|
||||
module.fail_json(msg="Missing the following records: %s" % difference)
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
# state is absent
|
||||
else:
|
||||
difference = list(set(wanted_record_ids) & set(current_record_ids))
|
||||
if difference:
|
||||
if not module.check_mode:
|
||||
for rid in difference:
|
||||
ds.delete_record(domain, rid)
|
||||
module.exit_json(changed=True)
|
||||
else:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
except DNSimpleException as e:
|
||||
if DNSIMPLE_MAJOR_VERSION > 1:
|
||||
module.fail_json(msg="DNSimple exception: %s" % e.message)
|
||||
else:
|
||||
module.fail_json(msg="DNSimple exception: %s" % str(e.args[0]['message']))
|
||||
module.fail_json(msg="Unknown what you wanted me to do")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,339 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright Edward Hilgendorf, <edward@hilgendorf.me>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: dnsimple_info
|
||||
|
||||
short_description: Pull basic info from DNSimple API
|
||||
|
||||
version_added: "4.2.0"
|
||||
|
||||
description: Retrieve existing records and domains from DNSimple API.
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
- community.general.attributes.info_module
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The domain name to retrieve info from.
|
||||
- Will return all associated records for this domain if specified.
|
||||
- If not specified, will return all domains associated with the account ID.
|
||||
type: str
|
||||
|
||||
account_id:
|
||||
description: The account ID to query.
|
||||
required: true
|
||||
type: str
|
||||
|
||||
api_key:
|
||||
description: The API key to use.
|
||||
required: true
|
||||
type: str
|
||||
|
||||
record:
|
||||
description:
|
||||
- The record to find.
|
||||
- If specified, only this record will be returned instead of all records.
|
||||
required: false
|
||||
type: str
|
||||
|
||||
sandbox:
|
||||
description: Whether or not to use sandbox environment.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
author:
|
||||
- Edward Hilgendorf (@edhilgendorf)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Get all domains from an account
|
||||
community.general.dnsimple_info:
|
||||
account_id: "1234"
|
||||
api_key: "1234"
|
||||
|
||||
- name: Get all records from a domain
|
||||
community.general.dnsimple_info:
|
||||
name: "example.com"
|
||||
account_id: "1234"
|
||||
api_key: "1234"
|
||||
|
||||
- name: Get all info from a matching record
|
||||
community.general.dnsimple_info:
|
||||
name: "example.com"
|
||||
record: "subdomain"
|
||||
account_id: "1234"
|
||||
api_key: "1234"
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
dnsimple_domain_info:
|
||||
description: Returns a list of dictionaries of all domains associated with the supplied account ID.
|
||||
type: list
|
||||
elements: dict
|
||||
returned: success when I(name) is not specified
|
||||
sample:
|
||||
- account_id: 1234
|
||||
created_at: '2021-10-16T21:25:42Z'
|
||||
id: 123456
|
||||
last_transferred_at:
|
||||
name: example.com
|
||||
reverse: false
|
||||
secondary: false
|
||||
updated_at: '2021-11-10T20:22:50Z'
|
||||
contains:
|
||||
account_id:
|
||||
description: The account ID.
|
||||
type: int
|
||||
created_at:
|
||||
description: When the domain entry was created.
|
||||
type: str
|
||||
id:
|
||||
description: ID of the entry.
|
||||
type: int
|
||||
last_transferred_at:
|
||||
description: Date the domain was transferred, or empty if not.
|
||||
type: str
|
||||
name:
|
||||
description: Name of the record.
|
||||
type: str
|
||||
reverse:
|
||||
description: Whether or not it is a reverse zone record.
|
||||
type: bool
|
||||
updated_at:
|
||||
description: When the domain entry was updated.
|
||||
type: str
|
||||
|
||||
dnsimple_records_info:
|
||||
description: Returns a list of dictionaries with all records for the domain supplied.
|
||||
type: list
|
||||
elements: dict
|
||||
returned: success when I(name) is specified, but I(record) is not
|
||||
sample:
|
||||
- content: ns1.dnsimple.com admin.dnsimple.com
|
||||
created_at: '2021-10-16T19:07:34Z'
|
||||
id: 12345
|
||||
name: 'catheadbiscuit'
|
||||
parent_id: null
|
||||
priority: null
|
||||
regions:
|
||||
- global
|
||||
system_record: true
|
||||
ttl: 3600
|
||||
type: SOA
|
||||
updated_at: '2021-11-15T23:55:51Z'
|
||||
zone_id: example.com
|
||||
contains:
|
||||
content:
|
||||
description: Content of the returned record.
|
||||
type: str
|
||||
created_at:
|
||||
description: When the domain entry was created.
|
||||
type: str
|
||||
id:
|
||||
description: ID of the entry.
|
||||
type: int
|
||||
name:
|
||||
description: Name of the record.
|
||||
type: str
|
||||
parent_id:
|
||||
description: Parent record or null.
|
||||
type: int
|
||||
priority:
|
||||
description: Priority setting of the record.
|
||||
type: str
|
||||
regions:
|
||||
description: List of regions where the record is available.
|
||||
type: list
|
||||
system_record:
|
||||
description: Whether or not it is a system record.
|
||||
type: bool
|
||||
ttl:
|
||||
description: Record TTL.
|
||||
type: int
|
||||
type:
|
||||
description: Record type.
|
||||
type: str
|
||||
updated_at:
|
||||
description: When the domain entry was updated.
|
||||
type: str
|
||||
zone_id:
|
||||
description: ID of the zone that the record is associated with.
|
||||
type: str
|
||||
dnsimple_record_info:
|
||||
description: Returns a list of dictionaries that match the record supplied.
|
||||
returned: success when I(name) and I(record) are specified
|
||||
type: list
|
||||
elements: dict
|
||||
sample:
|
||||
- content: 1.2.3.4
|
||||
created_at: '2021-11-15T23:55:51Z'
|
||||
id: 123456
|
||||
name: catheadbiscuit
|
||||
parent_id: null
|
||||
priority: null
|
||||
regions:
|
||||
- global
|
||||
system_record: false
|
||||
ttl: 3600
|
||||
type: A
|
||||
updated_at: '2021-11-15T23:55:51Z'
|
||||
zone_id: example.com
|
||||
contains:
|
||||
content:
|
||||
description: Content of the returned record.
|
||||
type: str
|
||||
created_at:
|
||||
description: When the domain entry was created.
|
||||
type: str
|
||||
id:
|
||||
description: ID of the entry.
|
||||
type: int
|
||||
name:
|
||||
description: Name of the record.
|
||||
type: str
|
||||
parent_id:
|
||||
description: Parent record or null.
|
||||
type: int
|
||||
priority:
|
||||
description: Priority setting of the record.
|
||||
type: str
|
||||
regions:
|
||||
description: List of regions where the record is available.
|
||||
type: list
|
||||
system_record:
|
||||
description: Whether or not it is a system record.
|
||||
type: bool
|
||||
ttl:
|
||||
description: Record TTL.
|
||||
type: int
|
||||
type:
|
||||
description: Record type.
|
||||
type: str
|
||||
updated_at:
|
||||
description: When the domain entry was updated.
|
||||
type: str
|
||||
zone_id:
|
||||
description: ID of the zone that the record is associated with.
|
||||
type: str
|
||||
'''
|
||||
|
||||
import traceback
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.basic import missing_required_lib
|
||||
|
||||
try:
|
||||
from requests import Request, Session
|
||||
except ImportError:
|
||||
HAS_REQUESTS = False
|
||||
REQUESTS_IMPORT_ERROR = traceback.format_exc()
|
||||
else:
|
||||
HAS_REQUESTS = True
|
||||
REQUESTS_IMPORT_ERROR = None
|
||||
|
||||
|
||||
def build_url(account, key, is_sandbox):
|
||||
headers = {'Accept': 'application/json',
|
||||
'Authorization': 'Bearer ' + key}
|
||||
url = 'https://api{sandbox}.dnsimple.com/'.format(
|
||||
sandbox=".sandbox" if is_sandbox else "") + 'v2/' + account
|
||||
req = Request(url=url, headers=headers)
|
||||
prepped_request = req.prepare()
|
||||
return prepped_request
|
||||
|
||||
|
||||
def iterate_data(module, request_object):
|
||||
base_url = request_object.url
|
||||
response = Session().send(request_object)
|
||||
if 'pagination' in response.json():
|
||||
data = response.json()["data"]
|
||||
pages = response.json()["pagination"]["total_pages"]
|
||||
if int(pages) > 1:
|
||||
for page in range(1, pages):
|
||||
page = page + 1
|
||||
request_object.url = base_url + '&page=' + str(page)
|
||||
new_results = Session().send(request_object)
|
||||
data = data + new_results.json()["data"]
|
||||
return data
|
||||
else:
|
||||
module.fail_json('API Call failed, check ID, key and sandbox values')
|
||||
|
||||
|
||||
def record_info(dnsimple_mod, req_obj):
|
||||
req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?name=' + dnsimple_mod.params["record"], 'GET'
|
||||
return iterate_data(dnsimple_mod, req_obj)
|
||||
|
||||
|
||||
def domain_info(dnsimple_mod, req_obj):
|
||||
req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?per_page=100', 'GET'
|
||||
return iterate_data(dnsimple_mod, req_obj)
|
||||
|
||||
|
||||
def account_info(dnsimple_mod, req_obj):
|
||||
req_obj.url, req_obj.method = req_obj.url + '/zones/?per_page=100', 'GET'
|
||||
return iterate_data(dnsimple_mod, req_obj)
|
||||
|
||||
|
||||
def main():
|
||||
# define available arguments/parameters a user can pass to the module
|
||||
fields = {
|
||||
"account_id": {"required": True, "type": "str"},
|
||||
"api_key": {"required": True, "type": "str", "no_log": True},
|
||||
"name": {"required": False, "type": "str"},
|
||||
"record": {"required": False, "type": "str"},
|
||||
"sandbox": {"required": False, "type": "bool", "default": False}
|
||||
}
|
||||
|
||||
result = {
|
||||
'changed': False
|
||||
}
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=fields,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
params = module.params
|
||||
req = build_url(params['account_id'],
|
||||
params['api_key'],
|
||||
params['sandbox'])
|
||||
|
||||
if not HAS_REQUESTS:
|
||||
module.exit_json(
|
||||
msg=missing_required_lib('requests'),
|
||||
exception=REQUESTS_IMPORT_ERROR)
|
||||
|
||||
# At minimum we need account and key
|
||||
if params['account_id'] and params['api_key']:
|
||||
# If we have a record return info on that record
|
||||
if params['name'] and params['record']:
|
||||
result['dnsimple_record_info'] = record_info(module, req)
|
||||
module.exit_json(**result)
|
||||
|
||||
# If we have the account only and domain, return records for the domain
|
||||
elif params['name']:
|
||||
result['dnsimple_records_info'] = domain_info(module, req)
|
||||
module.exit_json(**result)
|
||||
|
||||
# If we have the account only, return domains
|
||||
else:
|
||||
result['dnsimple_domain_info'] = account_info(module, req)
|
||||
module.exit_json(**result)
|
||||
else:
|
||||
module.fail_json(msg="Need at least account_id and api_key")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,717 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: dnsmadeeasy
|
||||
short_description: Interface with dnsmadeeasy.com (a DNS hosting service)
|
||||
description:
|
||||
- >
|
||||
Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or
|
||||
monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)
|
||||
options:
|
||||
account_key:
|
||||
description:
|
||||
- Account API Key.
|
||||
required: true
|
||||
type: str
|
||||
|
||||
account_secret:
|
||||
description:
|
||||
- Account Secret Key.
|
||||
required: true
|
||||
type: str
|
||||
|
||||
domain:
|
||||
description:
|
||||
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster
|
||||
resolution
|
||||
required: true
|
||||
type: str
|
||||
|
||||
sandbox:
|
||||
description:
|
||||
- Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
record_name:
|
||||
description:
|
||||
- Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless
|
||||
of the state argument.
|
||||
type: str
|
||||
|
||||
record_type:
|
||||
description:
|
||||
- Record type.
|
||||
choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ]
|
||||
type: str
|
||||
|
||||
record_value:
|
||||
description:
|
||||
- >
|
||||
Record value. HTTPRED: <redirection URL>, MX: <priority> <target name>, NS: <name server>, PTR: <target name>,
|
||||
SRV: <priority> <weight> <port> <target name>, TXT: <text value>"
|
||||
- >
|
||||
If record_value is not specified; no changes will be made and the record will be returned in 'result'
|
||||
(in other words, this module can be used to fetch a record's current id, type, and ttl)
|
||||
type: str
|
||||
|
||||
record_ttl:
|
||||
description:
|
||||
- record's "Time to live". Number of seconds the record remains cached in DNS servers.
|
||||
default: 1800
|
||||
type: int
|
||||
|
||||
state:
|
||||
description:
|
||||
- whether the record should exist or not
|
||||
required: true
|
||||
choices: [ 'present', 'absent' ]
|
||||
type: str
|
||||
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(false), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
type: bool
|
||||
default: true
|
||||
|
||||
monitor:
|
||||
description:
|
||||
- If C(true), add or change the monitor. This is applicable only for A records.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
systemDescription:
|
||||
description:
|
||||
- Description used by the monitor.
|
||||
default: ''
|
||||
type: str
|
||||
|
||||
maxEmails:
|
||||
description:
|
||||
- Number of emails sent to the contact list by the monitor.
|
||||
default: 1
|
||||
type: int
|
||||
|
||||
protocol:
|
||||
description:
|
||||
- Protocol used by the monitor.
|
||||
default: 'HTTP'
|
||||
choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']
|
||||
type: str
|
||||
|
||||
port:
|
||||
description:
|
||||
- Port used by the monitor.
|
||||
default: 80
|
||||
type: int
|
||||
|
||||
sensitivity:
|
||||
description:
|
||||
- Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3.
|
||||
default: 'Medium'
|
||||
choices: ['Low', 'Medium', 'High']
|
||||
type: str
|
||||
|
||||
contactList:
|
||||
description:
|
||||
- Name or id of the contact list that the monitor will notify.
|
||||
- The default C('') means the Account Owner.
|
||||
type: str
|
||||
|
||||
httpFqdn:
|
||||
description:
|
||||
- The fully qualified domain name used by the monitor.
|
||||
type: str
|
||||
|
||||
httpFile:
|
||||
description:
|
||||
- The file at the Fqdn that the monitor queries for HTTP or HTTPS.
|
||||
type: str
|
||||
|
||||
httpQueryString:
|
||||
description:
|
||||
- The string in the httpFile that the monitor queries for HTTP or HTTPS.
|
||||
type: str
|
||||
|
||||
failover:
|
||||
description:
|
||||
- If C(true), add or change the failover. This is applicable only for A records.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
autoFailover:
|
||||
description:
|
||||
- If true, fallback to the primary IP address is manual after a failover.
|
||||
- If false, fallback to the primary IP address is automatic after a failover.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
ip1:
|
||||
description:
|
||||
- Primary IP address for the failover.
|
||||
- Required if adding or changing the monitor or failover.
|
||||
type: str
|
||||
|
||||
ip2:
|
||||
description:
|
||||
- Secondary IP address for the failover.
|
||||
- Required if adding or changing the failover.
|
||||
type: str
|
||||
|
||||
ip3:
|
||||
description:
|
||||
- Tertiary IP address for the failover.
|
||||
type: str
|
||||
|
||||
ip4:
|
||||
description:
|
||||
- Quaternary IP address for the failover.
|
||||
type: str
|
||||
|
||||
ip5:
|
||||
description:
|
||||
- Quinary IP address for the failover.
|
||||
type: str
|
||||
|
||||
notes:
|
||||
- The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few
|
||||
seconds of actual time by using NTP.
|
||||
- This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'.
|
||||
These values can be be registered and used in your playbooks.
|
||||
- Only A records can have a monitor or failover.
|
||||
- To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required.
|
||||
- To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required.
|
||||
- The monitor and the failover will share 'port', 'protocol', and 'ip1' options.
|
||||
|
||||
requirements: [ hashlib, hmac ]
|
||||
author: "Brice Burgess (@briceburg)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Fetch my.com domain records
|
||||
community.general.dnsmadeeasy:
|
||||
account_key: key
|
||||
account_secret: secret
|
||||
domain: my.com
|
||||
state: present
|
||||
register: response
|
||||
|
||||
- name: Create a record
|
||||
community.general.dnsmadeeasy:
|
||||
account_key: key
|
||||
account_secret: secret
|
||||
domain: my.com
|
||||
state: present
|
||||
record_name: test
|
||||
record_type: A
|
||||
record_value: 127.0.0.1
|
||||
|
||||
- name: Update the previously created record
|
||||
community.general.dnsmadeeasy:
|
||||
account_key: key
|
||||
account_secret: secret
|
||||
domain: my.com
|
||||
state: present
|
||||
record_name: test
|
||||
record_value: 192.0.2.23
|
||||
|
||||
- name: Fetch a specific record
|
||||
community.general.dnsmadeeasy:
|
||||
account_key: key
|
||||
account_secret: secret
|
||||
domain: my.com
|
||||
state: present
|
||||
record_name: test
|
||||
register: response
|
||||
|
||||
- name: Delete a record
|
||||
community.general.dnsmadeeasy:
|
||||
account_key: key
|
||||
account_secret: secret
|
||||
domain: my.com
|
||||
record_type: A
|
||||
state: absent
|
||||
record_name: test
|
||||
|
||||
- name: Add a failover
|
||||
community.general.dnsmadeeasy:
|
||||
account_key: key
|
||||
account_secret: secret
|
||||
domain: my.com
|
||||
state: present
|
||||
record_name: test
|
||||
record_type: A
|
||||
record_value: 127.0.0.1
|
||||
failover: true
|
||||
ip1: 127.0.0.2
|
||||
ip2: 127.0.0.3
|
||||
|
||||
- name: Add a failover
|
||||
community.general.dnsmadeeasy:
|
||||
account_key: key
|
||||
account_secret: secret
|
||||
domain: my.com
|
||||
state: present
|
||||
record_name: test
|
||||
record_type: A
|
||||
record_value: 127.0.0.1
|
||||
failover: true
|
||||
ip1: 127.0.0.2
|
||||
ip2: 127.0.0.3
|
||||
ip3: 127.0.0.4
|
||||
ip4: 127.0.0.5
|
||||
ip5: 127.0.0.6
|
||||
|
||||
- name: Add a monitor
|
||||
community.general.dnsmadeeasy:
|
||||
account_key: key
|
||||
account_secret: secret
|
||||
domain: my.com
|
||||
state: present
|
||||
record_name: test
|
||||
record_type: A
|
||||
record_value: 127.0.0.1
|
||||
monitor: true
|
||||
ip1: 127.0.0.2
|
||||
protocol: HTTP # default
|
||||
port: 80 # default
|
||||
maxEmails: 1
|
||||
systemDescription: Monitor Test A record
|
||||
contactList: my contact list
|
||||
|
||||
- name: Add a monitor with http options
|
||||
community.general.dnsmadeeasy:
|
||||
account_key: key
|
||||
account_secret: secret
|
||||
domain: my.com
|
||||
state: present
|
||||
record_name: test
|
||||
record_type: A
|
||||
record_value: 127.0.0.1
|
||||
monitor: true
|
||||
ip1: 127.0.0.2
|
||||
protocol: HTTP # default
|
||||
port: 80 # default
|
||||
maxEmails: 1
|
||||
systemDescription: Monitor Test A record
|
||||
contactList: 1174 # contact list id
|
||||
httpFqdn: http://my.com
|
||||
httpFile: example
|
||||
httpQueryString: some string
|
||||
|
||||
- name: Add a monitor and a failover
|
||||
community.general.dnsmadeeasy:
|
||||
account_key: key
|
||||
account_secret: secret
|
||||
domain: my.com
|
||||
state: present
|
||||
record_name: test
|
||||
record_type: A
|
||||
record_value: 127.0.0.1
|
||||
failover: true
|
||||
ip1: 127.0.0.2
|
||||
ip2: 127.0.0.3
|
||||
monitor: true
|
||||
protocol: HTTPS
|
||||
port: 443
|
||||
maxEmails: 1
|
||||
systemDescription: monitoring my.com status
|
||||
contactList: emergencycontacts
|
||||
|
||||
- name: Remove a failover
|
||||
community.general.dnsmadeeasy:
|
||||
account_key: key
|
||||
account_secret: secret
|
||||
domain: my.com
|
||||
state: present
|
||||
record_name: test
|
||||
record_type: A
|
||||
record_value: 127.0.0.1
|
||||
failover: false
|
||||
|
||||
- name: Remove a monitor
|
||||
community.general.dnsmadeeasy:
|
||||
account_key: key
|
||||
account_secret: secret
|
||||
domain: my.com
|
||||
state: present
|
||||
record_name: test
|
||||
record_type: A
|
||||
record_value: 127.0.0.1
|
||||
monitor: false
|
||||
'''
|
||||
|
||||
# ============================================
|
||||
# DNSMadeEasy module specific support methods.
|
||||
#
|
||||
|
||||
import json
|
||||
import hashlib
|
||||
import hmac
|
||||
import locale
|
||||
from time import strftime, gmtime
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from ansible.module_utils.six import string_types
|
||||
|
||||
|
||||
class DME2(object):
|
||||
|
||||
def __init__(self, apikey, secret, domain, sandbox, module):
|
||||
self.module = module
|
||||
|
||||
self.api = apikey
|
||||
self.secret = secret
|
||||
|
||||
if sandbox:
|
||||
self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/'
|
||||
self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl)
|
||||
else:
|
||||
self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/'
|
||||
|
||||
self.domain = str(domain)
|
||||
self.domain_map = None # ["domain_name"] => ID
|
||||
self.record_map = None # ["record_name"] => ID
|
||||
self.records = None # ["record_ID"] => <record>
|
||||
self.all_records = None
|
||||
self.contactList_map = None # ["contactList_name"] => ID
|
||||
|
||||
# Lookup the domain ID if passed as a domain name vs. ID
|
||||
if not self.domain.isdigit():
|
||||
self.domain = self.getDomainByName(self.domain)['id']
|
||||
|
||||
self.record_url = 'dns/managed/' + str(self.domain) + '/records'
|
||||
self.monitor_url = 'monitor'
|
||||
self.contactList_url = 'contactList'
|
||||
|
||||
def _headers(self):
|
||||
currTime = self._get_date()
|
||||
hashstring = self._create_hash(currTime)
|
||||
headers = {'x-dnsme-apiKey': self.api,
|
||||
'x-dnsme-hmac': hashstring,
|
||||
'x-dnsme-requestDate': currTime,
|
||||
'content-type': 'application/json'}
|
||||
return headers
|
||||
|
||||
def _get_date(self):
|
||||
locale.setlocale(locale.LC_TIME, 'C')
|
||||
return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())
|
||||
|
||||
def _create_hash(self, rightnow):
|
||||
return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest()
|
||||
|
||||
def query(self, resource, method, data=None):
|
||||
url = self.baseurl + resource
|
||||
if data and not isinstance(data, string_types):
|
||||
data = urlencode(data)
|
||||
|
||||
response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers())
|
||||
if info['status'] not in (200, 201, 204):
|
||||
self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
|
||||
|
||||
try:
|
||||
return json.load(response)
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
def getDomain(self, domain_id):
|
||||
if not self.domain_map:
|
||||
self._instMap('domain')
|
||||
|
||||
return self.domains.get(domain_id, False)
|
||||
|
||||
def getDomainByName(self, domain_name):
|
||||
if not self.domain_map:
|
||||
self._instMap('domain')
|
||||
|
||||
return self.getDomain(self.domain_map.get(domain_name, 0))
|
||||
|
||||
def getDomains(self):
|
||||
return self.query('dns/managed', 'GET')['data']
|
||||
|
||||
def getRecord(self, record_id):
|
||||
if not self.record_map:
|
||||
self._instMap('record')
|
||||
|
||||
return self.records.get(record_id, False)
|
||||
|
||||
# Try to find a single record matching this one.
|
||||
# How we do this depends on the type of record. For instance, there
|
||||
# can be several MX records for a single record_name while there can
|
||||
# only be a single CNAME for a particular record_name. Note also that
|
||||
# there can be several records with different types for a single name.
|
||||
def getMatchingRecord(self, record_name, record_type, record_value):
|
||||
# Get all the records if not already cached
|
||||
if not self.all_records:
|
||||
self.all_records = self.getRecords()
|
||||
|
||||
if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]:
|
||||
for result in self.all_records:
|
||||
if result['name'] == record_name and result['type'] == record_type:
|
||||
return result
|
||||
return False
|
||||
elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]:
|
||||
for result in self.all_records:
|
||||
if record_type == "MX":
|
||||
value = record_value.split(" ")[1]
|
||||
# Note that TXT records are surrounded by quotes in the API response.
|
||||
elif record_type == "TXT":
|
||||
value = '"{0}"'.format(record_value)
|
||||
elif record_type == "SRV":
|
||||
value = record_value.split(" ")[3]
|
||||
else:
|
||||
value = record_value
|
||||
if result['name'] == record_name and result['type'] == record_type and result['value'] == value:
|
||||
return result
|
||||
return False
|
||||
else:
|
||||
raise Exception('record_type not yet supported')
|
||||
|
||||
def getRecords(self):
|
||||
return self.query(self.record_url, 'GET')['data']
|
||||
|
||||
def _instMap(self, type):
|
||||
# @TODO cache this call so it's executed only once per ansible execution
|
||||
map = {}
|
||||
results = {}
|
||||
|
||||
# iterate over e.g. self.getDomains() || self.getRecords()
|
||||
for result in getattr(self, 'get' + type.title() + 's')():
|
||||
|
||||
map[result['name']] = result['id']
|
||||
results[result['id']] = result
|
||||
|
||||
# e.g. self.domain_map || self.record_map
|
||||
setattr(self, type + '_map', map)
|
||||
setattr(self, type + 's', results) # e.g. self.domains || self.records
|
||||
|
||||
def prepareRecord(self, data):
|
||||
return json.dumps(data, separators=(',', ':'))
|
||||
|
||||
def createRecord(self, data):
|
||||
# @TODO update the cache w/ resultant record + id when impleneted
|
||||
return self.query(self.record_url, 'POST', data)
|
||||
|
||||
def updateRecord(self, record_id, data):
|
||||
# @TODO update the cache w/ resultant record + id when impleneted
|
||||
return self.query(self.record_url + '/' + str(record_id), 'PUT', data)
|
||||
|
||||
def deleteRecord(self, record_id):
|
||||
# @TODO remove record from the cache when impleneted
|
||||
return self.query(self.record_url + '/' + str(record_id), 'DELETE')
|
||||
|
||||
def getMonitor(self, record_id):
|
||||
return self.query(self.monitor_url + '/' + str(record_id), 'GET')
|
||||
|
||||
def updateMonitor(self, record_id, data):
|
||||
return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data)
|
||||
|
||||
def prepareMonitor(self, data):
|
||||
return json.dumps(data, separators=(',', ':'))
|
||||
|
||||
def getContactList(self, contact_list_id):
|
||||
if not self.contactList_map:
|
||||
self._instMap('contactList')
|
||||
|
||||
return self.contactLists.get(contact_list_id, False)
|
||||
|
||||
def getContactlists(self):
|
||||
return self.query(self.contactList_url, 'GET')['data']
|
||||
|
||||
def getContactListByName(self, name):
|
||||
if not self.contactList_map:
|
||||
self._instMap('contactList')
|
||||
|
||||
return self.getContactList(self.contactList_map.get(name, 0))
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
account_key=dict(required=True, no_log=True),
|
||||
account_secret=dict(required=True, no_log=True),
|
||||
domain=dict(required=True),
|
||||
sandbox=dict(default=False, type='bool'),
|
||||
state=dict(required=True, choices=['present', 'absent']),
|
||||
record_name=dict(required=False),
|
||||
record_type=dict(required=False, choices=[
|
||||
'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
|
||||
record_value=dict(required=False),
|
||||
record_ttl=dict(required=False, default=1800, type='int'),
|
||||
monitor=dict(default=False, type='bool'),
|
||||
systemDescription=dict(default=''),
|
||||
maxEmails=dict(default=1, type='int'),
|
||||
protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']),
|
||||
port=dict(default=80, type='int'),
|
||||
sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']),
|
||||
contactList=dict(default=None),
|
||||
httpFqdn=dict(required=False),
|
||||
httpFile=dict(required=False),
|
||||
httpQueryString=dict(required=False),
|
||||
failover=dict(default=False, type='bool'),
|
||||
autoFailover=dict(default=False, type='bool'),
|
||||
ip1=dict(required=False),
|
||||
ip2=dict(required=False),
|
||||
ip3=dict(required=False),
|
||||
ip4=dict(required=False),
|
||||
ip5=dict(required=False),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
),
|
||||
required_together=[
|
||||
['record_value', 'record_ttl', 'record_type']
|
||||
],
|
||||
required_if=[
|
||||
['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']],
|
||||
['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']]
|
||||
]
|
||||
)
|
||||
|
||||
protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6)
|
||||
sensitivities = dict(Low=8, Medium=5, High=3)
|
||||
|
||||
DME = DME2(module.params["account_key"], module.params[
|
||||
"account_secret"], module.params["domain"], module.params["sandbox"], module)
|
||||
state = module.params["state"]
|
||||
record_name = module.params["record_name"]
|
||||
record_type = module.params["record_type"]
|
||||
record_value = module.params["record_value"]
|
||||
|
||||
# Follow Keyword Controlled Behavior
|
||||
if record_name is None:
|
||||
domain_records = DME.getRecords()
|
||||
if not domain_records:
|
||||
module.fail_json(
|
||||
msg="The requested domain name is not accessible with this api_key; try using its ID if known.")
|
||||
module.exit_json(changed=False, result=domain_records)
|
||||
|
||||
# Fetch existing record + Build new one
|
||||
current_record = DME.getMatchingRecord(record_name, record_type, record_value)
|
||||
new_record = {'name': record_name}
|
||||
for i in ["record_value", "record_type", "record_ttl"]:
|
||||
if not module.params[i] is None:
|
||||
new_record[i[len("record_"):]] = module.params[i]
|
||||
# Special handling for mx record
|
||||
if new_record["type"] == "MX":
|
||||
new_record["mxLevel"] = new_record["value"].split(" ")[0]
|
||||
new_record["value"] = new_record["value"].split(" ")[1]
|
||||
|
||||
# Special handling for SRV records
|
||||
if new_record["type"] == "SRV":
|
||||
new_record["priority"] = new_record["value"].split(" ")[0]
|
||||
new_record["weight"] = new_record["value"].split(" ")[1]
|
||||
new_record["port"] = new_record["value"].split(" ")[2]
|
||||
new_record["value"] = new_record["value"].split(" ")[3]
|
||||
|
||||
# Fetch existing monitor if the A record indicates it should exist and build the new monitor
|
||||
current_monitor = dict()
|
||||
new_monitor = dict()
|
||||
if current_record and current_record['type'] == 'A' and current_record.get('monitor'):
|
||||
current_monitor = DME.getMonitor(current_record['id'])
|
||||
|
||||
# Build the new monitor
|
||||
for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails',
|
||||
'contactList', 'httpFqdn', 'httpFile', 'httpQueryString',
|
||||
'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']:
|
||||
if module.params[i] is not None:
|
||||
if i == 'protocol':
|
||||
# The API requires protocol to be a numeric in the range 1-6
|
||||
new_monitor['protocolId'] = protocols[module.params[i]]
|
||||
elif i == 'sensitivity':
|
||||
# The API requires sensitivity to be a numeric of 8, 5, or 3
|
||||
new_monitor[i] = sensitivities[module.params[i]]
|
||||
elif i == 'contactList':
|
||||
# The module accepts either the name or the id of the contact list
|
||||
contact_list_id = module.params[i]
|
||||
if not contact_list_id.isdigit() and contact_list_id != '':
|
||||
contact_list = DME.getContactListByName(contact_list_id)
|
||||
if not contact_list:
|
||||
module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id))
|
||||
contact_list_id = contact_list.get('id', '')
|
||||
new_monitor['contactListId'] = contact_list_id
|
||||
else:
|
||||
# The module option names match the API field names
|
||||
new_monitor[i] = module.params[i]
|
||||
|
||||
# Compare new record against existing one
|
||||
record_changed = False
|
||||
if current_record:
|
||||
for i in new_record:
|
||||
# Remove leading and trailing quote character from values because TXT records
|
||||
# are surrounded by quotes.
|
||||
if str(current_record[i]).strip('"') != str(new_record[i]):
|
||||
record_changed = True
|
||||
new_record['id'] = str(current_record['id'])
|
||||
|
||||
monitor_changed = False
|
||||
if current_monitor:
|
||||
for i in new_monitor:
|
||||
if str(current_monitor.get(i)) != str(new_monitor[i]):
|
||||
monitor_changed = True
|
||||
|
||||
# Follow Keyword Controlled Behavior
|
||||
if state == 'present':
|
||||
# return the record if no value is specified
|
||||
if "value" not in new_record:
|
||||
if not current_record:
|
||||
module.fail_json(
|
||||
msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain']))
|
||||
module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
|
||||
|
||||
# create record and monitor as the record does not exist
|
||||
if not current_record:
|
||||
record = DME.createRecord(DME.prepareRecord(new_record))
|
||||
if new_monitor.get('monitor') and record_type == "A":
|
||||
monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor))
|
||||
module.exit_json(changed=True, result=dict(record=record, monitor=monitor))
|
||||
else:
|
||||
module.exit_json(changed=True, result=dict(record=record, monitor=current_monitor))
|
||||
|
||||
# update the record
|
||||
updated = False
|
||||
if record_changed:
|
||||
DME.updateRecord(current_record['id'], DME.prepareRecord(new_record))
|
||||
updated = True
|
||||
if monitor_changed:
|
||||
DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor))
|
||||
updated = True
|
||||
if updated:
|
||||
module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor))
|
||||
|
||||
# return the record (no changes)
|
||||
module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
|
||||
|
||||
elif state == 'absent':
|
||||
changed = False
|
||||
# delete the record (and the monitor/failover) if it exists
|
||||
if current_record:
|
||||
DME.deleteRecord(current_record['id'])
|
||||
module.exit_json(changed=True)
|
||||
|
||||
# record does not exist, return w/o change.
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
else:
|
||||
module.fail_json(
|
||||
msg="'%s' is an unknown value for the state argument" % state)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,364 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017-2020, Yann Amar <quidame@poivron.org>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: dpkg_divert
|
||||
short_description: Override a debian package's version of a file
|
||||
version_added: '0.2.0'
|
||||
author:
|
||||
- quidame (@quidame)
|
||||
description:
|
||||
- A diversion is for C(dpkg) the knowledge that only a given package
|
||||
(or the local administrator) is allowed to install a file at a given
|
||||
location. Other packages shipping their own version of this file will
|
||||
be forced to I(divert) it, i.e. to install it at another location. It
|
||||
allows one to keep changes in a file provided by a debian package by
|
||||
preventing its overwrite at package upgrade.
|
||||
- This module manages diversions of debian packages files using the
|
||||
C(dpkg-divert) commandline tool. It can either create or remove a
|
||||
diversion for a given file, but also update an existing diversion
|
||||
to modify its I(holder) and/or its I(divert) location.
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
- The original and absolute path of the file to be diverted or
|
||||
undiverted. This path is unique, i.e. it is not possible to get
|
||||
two diversions for the same I(path).
|
||||
required: true
|
||||
type: path
|
||||
state:
|
||||
description:
|
||||
- When I(state=absent), remove the diversion of the specified
|
||||
I(path); when I(state=present), create the diversion if it does
|
||||
not exist, or update its package I(holder) or I(divert) location,
|
||||
if it already exists.
|
||||
type: str
|
||||
default: present
|
||||
choices: [absent, present]
|
||||
holder:
|
||||
description:
|
||||
- The name of the package whose copy of file is not diverted, also
|
||||
known as the diversion holder or the package the diversion belongs
|
||||
to.
|
||||
- The actual package does not have to be installed or even to exist
|
||||
for its name to be valid. If not specified, the diversion is hold
|
||||
by 'LOCAL', that is reserved by/for dpkg for local diversions.
|
||||
- This parameter is ignored when I(state=absent).
|
||||
type: str
|
||||
divert:
|
||||
description:
|
||||
- The location where the versions of file will be diverted.
|
||||
- Default is to add suffix C(.distrib) to the file path.
|
||||
- This parameter is ignored when I(state=absent).
|
||||
type: path
|
||||
rename:
|
||||
description:
|
||||
- Actually move the file aside (when I(state=present)) or back (when
|
||||
I(state=absent)), but only when changing the state of the diversion.
|
||||
This parameter has no effect when attempting to add a diversion that
|
||||
already exists or when removing an unexisting one.
|
||||
- Unless I(force=true), renaming fails if the destination file already
|
||||
exists (this lock being a dpkg-divert feature, and bypassing it being
|
||||
a module feature).
|
||||
type: bool
|
||||
default: false
|
||||
force:
|
||||
description:
|
||||
- When I(rename=true) and I(force=true), renaming is performed even if
|
||||
the target of the renaming exists, i.e. the existing contents of the
|
||||
file at this location will be lost.
|
||||
- This parameter is ignored when I(rename=false).
|
||||
type: bool
|
||||
default: false
|
||||
notes:
|
||||
- This module supports I(check_mode) and I(diff).
|
||||
requirements:
|
||||
- dpkg-divert >= 1.15.0 (Debian family)
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Divert /usr/bin/busybox to /usr/bin/busybox.distrib and keep file in place
|
||||
community.general.dpkg_divert:
|
||||
path: /usr/bin/busybox
|
||||
|
||||
- name: Divert /usr/bin/busybox by package 'branding'
|
||||
community.general.dpkg_divert:
|
||||
path: /usr/bin/busybox
|
||||
holder: branding
|
||||
|
||||
- name: Divert and rename busybox to busybox.dpkg-divert
|
||||
community.general.dpkg_divert:
|
||||
path: /usr/bin/busybox
|
||||
divert: /usr/bin/busybox.dpkg-divert
|
||||
rename: true
|
||||
|
||||
- name: Remove the busybox diversion and move the diverted file back
|
||||
community.general.dpkg_divert:
|
||||
path: /usr/bin/busybox
|
||||
state: absent
|
||||
rename: true
|
||||
force: true
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
commands:
|
||||
description: The dpkg-divert commands ran internally by the module.
|
||||
type: list
|
||||
returned: on_success
|
||||
elements: str
|
||||
sample: "/usr/bin/dpkg-divert --no-rename --remove /etc/foobarrc"
|
||||
messages:
|
||||
description: The dpkg-divert relevant messages (stdout or stderr).
|
||||
type: list
|
||||
returned: on_success
|
||||
elements: str
|
||||
sample: "Removing 'local diversion of /etc/foobarrc to /etc/foobarrc.distrib'"
|
||||
diversion:
|
||||
description: The status of the diversion after task execution.
|
||||
type: dict
|
||||
returned: always
|
||||
contains:
|
||||
divert:
|
||||
description: The location of the diverted file.
|
||||
type: str
|
||||
holder:
|
||||
description: The package holding the diversion.
|
||||
type: str
|
||||
path:
|
||||
description: The path of the file to divert/undivert.
|
||||
type: str
|
||||
state:
|
||||
description: The state of the diversion.
|
||||
type: str
|
||||
sample:
|
||||
{
|
||||
"divert": "/etc/foobarrc.distrib",
|
||||
"holder": "LOCAL",
|
||||
"path": "/etc/foobarrc",
|
||||
"state": "present"
|
||||
}
|
||||
'''
|
||||
|
||||
|
||||
import re
|
||||
import os
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
|
||||
def diversion_state(module, command, path):
|
||||
diversion = dict(path=path, state='absent', divert=None, holder=None)
|
||||
rc, out, err = module.run_command([command, '--listpackage', path], check_rc=True)
|
||||
if out:
|
||||
diversion['state'] = 'present'
|
||||
diversion['holder'] = out.rstrip()
|
||||
rc, out, err = module.run_command([command, '--truename', path], check_rc=True)
|
||||
diversion['divert'] = out.rstrip()
|
||||
return diversion
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
path=dict(required=True, type='path'),
|
||||
state=dict(required=False, type='str', default='present', choices=['absent', 'present']),
|
||||
holder=dict(required=False, type='str'),
|
||||
divert=dict(required=False, type='path'),
|
||||
rename=dict(required=False, type='bool', default=False),
|
||||
force=dict(required=False, type='bool', default=False),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
path = module.params['path']
|
||||
state = module.params['state']
|
||||
holder = module.params['holder']
|
||||
divert = module.params['divert']
|
||||
rename = module.params['rename']
|
||||
force = module.params['force']
|
||||
|
||||
diversion_wanted = dict(path=path, state=state)
|
||||
changed = False
|
||||
|
||||
DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True)
|
||||
MAINCOMMAND = [DPKG_DIVERT]
|
||||
|
||||
# Option --listpackage is needed and comes with 1.15.0
|
||||
rc, stdout, stderr = module.run_command([DPKG_DIVERT, '--version'], check_rc=True)
|
||||
[current_version] = [x for x in stdout.splitlines()[0].split() if re.match('^[0-9]+[.][0-9]', x)]
|
||||
if LooseVersion(current_version) < LooseVersion("1.15.0"):
|
||||
module.fail_json(msg="Unsupported dpkg version (<1.15.0).")
|
||||
no_rename_is_supported = (LooseVersion(current_version) >= LooseVersion("1.19.1"))
|
||||
|
||||
b_path = to_bytes(path, errors='surrogate_or_strict')
|
||||
path_exists = os.path.exists(b_path)
|
||||
# Used for things not doable with a single dpkg-divert command (as forced
|
||||
# renaming of files, and diversion's 'holder' or 'divert' updates).
|
||||
target_exists = False
|
||||
truename_exists = False
|
||||
|
||||
diversion_before = diversion_state(module, DPKG_DIVERT, path)
|
||||
if diversion_before['state'] == 'present':
|
||||
b_divert = to_bytes(diversion_before['divert'], errors='surrogate_or_strict')
|
||||
truename_exists = os.path.exists(b_divert)
|
||||
|
||||
# Append options as requested in the task parameters, but ignore some of
|
||||
# them when removing the diversion.
|
||||
if rename:
|
||||
MAINCOMMAND.append('--rename')
|
||||
elif no_rename_is_supported:
|
||||
MAINCOMMAND.append('--no-rename')
|
||||
|
||||
if state == 'present':
|
||||
if holder and holder != 'LOCAL':
|
||||
MAINCOMMAND.extend(['--package', holder])
|
||||
diversion_wanted['holder'] = holder
|
||||
else:
|
||||
MAINCOMMAND.append('--local')
|
||||
diversion_wanted['holder'] = 'LOCAL'
|
||||
|
||||
if divert:
|
||||
MAINCOMMAND.extend(['--divert', divert])
|
||||
target = divert
|
||||
else:
|
||||
target = '%s.distrib' % path
|
||||
|
||||
MAINCOMMAND.extend(['--add', path])
|
||||
diversion_wanted['divert'] = target
|
||||
b_target = to_bytes(target, errors='surrogate_or_strict')
|
||||
target_exists = os.path.exists(b_target)
|
||||
|
||||
else:
|
||||
MAINCOMMAND.extend(['--remove', path])
|
||||
diversion_wanted['divert'] = None
|
||||
diversion_wanted['holder'] = None
|
||||
|
||||
# Start to populate the returned objects.
|
||||
diversion = diversion_before.copy()
|
||||
maincommand = ' '.join(MAINCOMMAND)
|
||||
commands = [maincommand]
|
||||
|
||||
if module.check_mode or diversion_wanted == diversion_before:
|
||||
MAINCOMMAND.insert(1, '--test')
|
||||
diversion_after = diversion_wanted
|
||||
|
||||
# Just try and see
|
||||
rc, stdout, stderr = module.run_command(MAINCOMMAND)
|
||||
|
||||
if rc == 0:
|
||||
messages = [stdout.rstrip()]
|
||||
|
||||
# else... cases of failure with dpkg-divert are:
|
||||
# - The diversion does not belong to the same package (or LOCAL)
|
||||
# - The divert filename is not the same (e.g. path.distrib != path.divert)
|
||||
# - The renaming is forbidden by dpkg-divert (i.e. both the file and the
|
||||
# diverted file exist)
|
||||
|
||||
elif state != diversion_before['state']:
|
||||
# There should be no case with 'divert' and 'holder' when creating the
|
||||
# diversion from none, and they're ignored when removing the diversion.
|
||||
# So this is all about renaming...
|
||||
if rename and path_exists and (
|
||||
(state == 'absent' and truename_exists) or
|
||||
(state == 'present' and target_exists)):
|
||||
if not force:
|
||||
msg = "Set 'force' param to True to force renaming of files."
|
||||
module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
|
||||
stderr=stderr, stdout=stdout, diversion=diversion)
|
||||
else:
|
||||
msg = "Unexpected error while changing state of the diversion."
|
||||
module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
|
||||
stderr=stderr, stdout=stdout, diversion=diversion)
|
||||
|
||||
to_remove = path
|
||||
if state == 'present':
|
||||
to_remove = target
|
||||
|
||||
if not module.check_mode:
|
||||
try:
|
||||
b_remove = to_bytes(to_remove, errors='surrogate_or_strict')
|
||||
os.unlink(b_remove)
|
||||
except OSError as e:
|
||||
msg = 'Failed to remove %s: %s' % (to_remove, to_native(e))
|
||||
module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
|
||||
stderr=stderr, stdout=stdout, diversion=diversion)
|
||||
rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
|
||||
|
||||
messages = [stdout.rstrip()]
|
||||
|
||||
# The situation is that we want to modify the settings (holder or divert)
|
||||
# of an existing diversion. dpkg-divert does not handle this, and we have
|
||||
# to remove the existing diversion first, and then set a new one.
|
||||
else:
|
||||
RMDIVERSION = [DPKG_DIVERT, '--remove', path]
|
||||
if no_rename_is_supported:
|
||||
RMDIVERSION.insert(1, '--no-rename')
|
||||
rmdiversion = ' '.join(RMDIVERSION)
|
||||
|
||||
if module.check_mode:
|
||||
RMDIVERSION.insert(1, '--test')
|
||||
|
||||
if rename:
|
||||
MAINCOMMAND.remove('--rename')
|
||||
if no_rename_is_supported:
|
||||
MAINCOMMAND.insert(1, '--no-rename')
|
||||
maincommand = ' '.join(MAINCOMMAND)
|
||||
|
||||
commands = [rmdiversion, maincommand]
|
||||
rc, rmdout, rmderr = module.run_command(RMDIVERSION, check_rc=True)
|
||||
|
||||
if module.check_mode:
|
||||
messages = [rmdout.rstrip(), 'Running in check mode']
|
||||
else:
|
||||
rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
|
||||
messages = [rmdout.rstrip(), stdout.rstrip()]
|
||||
|
||||
# Avoid if possible to orphan files (i.e. to dereference them in diversion
|
||||
# database but let them in place), but do not make renaming issues fatal.
|
||||
# BTW, this module is not about state of files involved in the diversion.
|
||||
old = diversion_before['divert']
|
||||
new = diversion_wanted['divert']
|
||||
if new != old:
|
||||
b_old = to_bytes(old, errors='surrogate_or_strict')
|
||||
b_new = to_bytes(new, errors='surrogate_or_strict')
|
||||
if os.path.exists(b_old) and not os.path.exists(b_new):
|
||||
try:
|
||||
os.rename(b_old, b_new)
|
||||
except OSError as e:
|
||||
pass
|
||||
|
||||
if not module.check_mode:
|
||||
diversion_after = diversion_state(module, DPKG_DIVERT, path)
|
||||
|
||||
diversion = diversion_after.copy()
|
||||
diff = dict()
|
||||
if module._diff:
|
||||
diff['before'] = diversion_before
|
||||
diff['after'] = diversion_after
|
||||
|
||||
if diversion_after != diversion_before:
|
||||
changed = True
|
||||
|
||||
if diversion_after == diversion_wanted:
|
||||
module.exit_json(changed=changed, diversion=diversion,
|
||||
commands=commands, messages=messages, diff=diff)
|
||||
else:
|
||||
msg = "Unexpected error: see stdout and stderr for details."
|
||||
module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
|
||||
stderr=stderr, stdout=stdout, diversion=diversion)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,199 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2012, Matt Wright <matt@nobien.net>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: easy_install
|
||||
short_description: Installs Python libraries
|
||||
description:
|
||||
- Installs Python libraries, optionally in a I(virtualenv)
|
||||
options:
|
||||
name:
|
||||
type: str
|
||||
description:
|
||||
- A Python library name
|
||||
required: true
|
||||
virtualenv:
|
||||
type: str
|
||||
description:
|
||||
- an optional I(virtualenv) directory path to install into. If the
|
||||
I(virtualenv) does not exist, it is created automatically
|
||||
virtualenv_site_packages:
|
||||
description:
|
||||
- Whether the virtual environment will inherit packages from the
|
||||
global site-packages directory. Note that if this setting is
|
||||
changed on an already existing virtual environment it will not
|
||||
have any effect, the environment must be deleted and newly
|
||||
created.
|
||||
type: bool
|
||||
default: false
|
||||
virtualenv_command:
|
||||
type: str
|
||||
description:
|
||||
- The command to create the virtual environment with. For example
|
||||
C(pyvenv), C(virtualenv), C(virtualenv2).
|
||||
default: virtualenv
|
||||
executable:
|
||||
type: str
|
||||
description:
|
||||
- The explicit executable or a pathname to the executable to be used to
|
||||
run easy_install for a specific version of Python installed in the
|
||||
system. For example C(easy_install-3.3), if there are both Python 2.7
|
||||
and 3.3 installations in the system and you want to run easy_install
|
||||
for the Python 3.3 installation.
|
||||
default: easy_install
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- The desired state of the library. C(latest) ensures that the latest version is installed.
|
||||
choices: [present, latest]
|
||||
default: present
|
||||
notes:
|
||||
- Please note that the C(easy_install) module can only install Python
|
||||
libraries. Thus this module is not able to remove libraries. It is
|
||||
generally recommended to use the M(ansible.builtin.pip) module which you can first install
|
||||
using M(community.general.easy_install).
|
||||
- Also note that I(virtualenv) must be installed on the remote host if the
|
||||
C(virtualenv) parameter is specified.
|
||||
requirements: [ "virtualenv" ]
|
||||
author: "Matt Wright (@mattupstate)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install or update pip
|
||||
community.general.easy_install:
|
||||
name: pip
|
||||
state: latest
|
||||
|
||||
- name: Install Bottle into the specified virtualenv
|
||||
community.general.easy_install:
|
||||
name: bottle
|
||||
virtualenv: /webapps/myapp/venv
|
||||
'''
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import tempfile
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def install_package(module, name, easy_install, executable_arguments):
|
||||
cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name)
|
||||
rc, out, err = module.run_command(cmd)
|
||||
return rc, out, err
|
||||
|
||||
|
||||
def _is_package_installed(module, name, easy_install, executable_arguments):
|
||||
# Copy and add to the arguments
|
||||
executable_arguments = executable_arguments[:]
|
||||
executable_arguments.append('--dry-run')
|
||||
rc, out, err = install_package(module, name, easy_install, executable_arguments)
|
||||
if rc:
|
||||
module.fail_json(msg=err)
|
||||
return 'Downloading' not in out
|
||||
|
||||
|
||||
def _get_easy_install(module, env=None, executable=None):
|
||||
candidate_easy_inst_basenames = ['easy_install']
|
||||
easy_install = None
|
||||
if executable is not None:
|
||||
if os.path.isabs(executable):
|
||||
easy_install = executable
|
||||
else:
|
||||
candidate_easy_inst_basenames.insert(0, executable)
|
||||
if easy_install is None:
|
||||
if env is None:
|
||||
opt_dirs = []
|
||||
else:
|
||||
# Try easy_install with the virtualenv directory first.
|
||||
opt_dirs = ['%s/bin' % env]
|
||||
for basename in candidate_easy_inst_basenames:
|
||||
easy_install = module.get_bin_path(basename, False, opt_dirs)
|
||||
if easy_install is not None:
|
||||
break
|
||||
# easy_install should have been found by now. The final call to
|
||||
# get_bin_path will trigger fail_json.
|
||||
if easy_install is None:
|
||||
basename = candidate_easy_inst_basenames[0]
|
||||
easy_install = module.get_bin_path(basename, True, opt_dirs)
|
||||
return easy_install
|
||||
|
||||
|
||||
def main():
|
||||
arg_spec = dict(
|
||||
name=dict(required=True),
|
||||
state=dict(required=False,
|
||||
default='present',
|
||||
choices=['present', 'latest'],
|
||||
type='str'),
|
||||
virtualenv=dict(default=None, required=False),
|
||||
virtualenv_site_packages=dict(default=False, type='bool'),
|
||||
virtualenv_command=dict(default='virtualenv', required=False),
|
||||
executable=dict(default='easy_install', required=False),
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
|
||||
|
||||
name = module.params['name']
|
||||
env = module.params['virtualenv']
|
||||
executable = module.params['executable']
|
||||
site_packages = module.params['virtualenv_site_packages']
|
||||
virtualenv_command = module.params['virtualenv_command']
|
||||
executable_arguments = []
|
||||
if module.params['state'] == 'latest':
|
||||
executable_arguments.append('--upgrade')
|
||||
|
||||
rc = 0
|
||||
err = ''
|
||||
out = ''
|
||||
|
||||
if env:
|
||||
virtualenv = module.get_bin_path(virtualenv_command, True)
|
||||
|
||||
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
command = '%s %s' % (virtualenv, env)
|
||||
if site_packages:
|
||||
command += ' --system-site-packages'
|
||||
cwd = tempfile.gettempdir()
|
||||
rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd)
|
||||
|
||||
rc += rc_venv
|
||||
out += out_venv
|
||||
err += err_venv
|
||||
|
||||
easy_install = _get_easy_install(module, env, executable)
|
||||
|
||||
cmd = None
|
||||
changed = False
|
||||
installed = _is_package_installed(module, name, easy_install, executable_arguments)
|
||||
|
||||
if not installed:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
rc_easy_inst, out_easy_inst, err_easy_inst = install_package(module, name, easy_install, executable_arguments)
|
||||
|
||||
rc += rc_easy_inst
|
||||
out += out_easy_inst
|
||||
err += err_easy_inst
|
||||
|
||||
changed = True
|
||||
|
||||
if rc != 0:
|
||||
module.fail_json(msg=err, cmd=cmd)
|
||||
|
||||
module.exit_json(changed=changed, binary=easy_install,
|
||||
name=name, virtualenv=env)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,188 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013, Peter Sprygada <sprygada@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ejabberd_user
|
||||
author: "Peter Sprygada (@privateip)"
|
||||
short_description: Manages users for ejabberd servers
|
||||
requirements:
|
||||
- ejabberd with mod_admin_extra
|
||||
description:
|
||||
- This module provides user management for ejabberd servers
|
||||
options:
|
||||
username:
|
||||
type: str
|
||||
description:
|
||||
- the name of the user to manage
|
||||
required: true
|
||||
host:
|
||||
type: str
|
||||
description:
|
||||
- the ejabberd host associated with this username
|
||||
required: true
|
||||
password:
|
||||
type: str
|
||||
description:
|
||||
- the password to assign to the username
|
||||
required: false
|
||||
logging:
|
||||
description:
|
||||
- enables or disables the local syslog facility for this module
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- describe the desired state of the user to be managed
|
||||
required: false
|
||||
default: 'present'
|
||||
choices: [ 'present', 'absent' ]
|
||||
notes:
|
||||
- Password parameter is required for state == present only
|
||||
- Passwords must be stored in clear text for this release
|
||||
- The ejabberd configuration file must include mod_admin_extra as a module.
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
# Example playbook entries using the ejabberd_user module to manage users state.
|
||||
|
||||
- name: Create a user if it does not exist
|
||||
community.general.ejabberd_user:
|
||||
username: test
|
||||
host: server
|
||||
password: password
|
||||
|
||||
- name: Delete a user if it exists
|
||||
community.general.ejabberd_user:
|
||||
username: test
|
||||
host: server
|
||||
state: absent
|
||||
'''
|
||||
|
||||
import syslog
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class EjabberdUser(object):
|
||||
""" This object represents a user resource for an ejabberd server. The
|
||||
object manages user creation and deletion using ejabberdctl. The following
|
||||
commands are currently supported:
|
||||
* ejabberdctl register
|
||||
* ejabberdctl deregister
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.logging = module.params.get('logging')
|
||||
self.state = module.params.get('state')
|
||||
self.host = module.params.get('host')
|
||||
self.user = module.params.get('username')
|
||||
self.pwd = module.params.get('password')
|
||||
|
||||
@property
|
||||
def changed(self):
|
||||
""" This method will check the current user and see if the password has
|
||||
changed. It will return True if the user does not match the supplied
|
||||
credentials and False if it does not
|
||||
"""
|
||||
return self.run_command('check_password', [self.user, self.host, self.pwd])
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
""" This method will check to see if the supplied username exists for
|
||||
host specified. If the user exists True is returned, otherwise False
|
||||
is returned
|
||||
"""
|
||||
return self.run_command('check_account', [self.user, self.host])
|
||||
|
||||
def log(self, entry):
|
||||
""" This method will log information to the local syslog facility """
|
||||
if self.logging:
|
||||
syslog.openlog('ansible-%s' % self.module._name)
|
||||
syslog.syslog(syslog.LOG_NOTICE, entry)
|
||||
|
||||
def run_command(self, cmd, options):
|
||||
""" This method will run the any command specified and return the
|
||||
returns using the Ansible common module
|
||||
"""
|
||||
cmd = [self.module.get_bin_path('ejabberdctl'), cmd] + options
|
||||
self.log('command: %s' % " ".join(cmd))
|
||||
return self.module.run_command(cmd)
|
||||
|
||||
def update(self):
|
||||
""" The update method will update the credentials for the user provided
|
||||
"""
|
||||
return self.run_command('change_password', [self.user, self.host, self.pwd])
|
||||
|
||||
def create(self):
|
||||
""" The create method will create a new user on the host with the
|
||||
password provided
|
||||
"""
|
||||
return self.run_command('register', [self.user, self.host, self.pwd])
|
||||
|
||||
def delete(self):
|
||||
""" The delete method will delete the user from the host
|
||||
"""
|
||||
return self.run_command('unregister', [self.user, self.host])
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
host=dict(required=True, type='str'),
|
||||
username=dict(required=True, type='str'),
|
||||
password=dict(type='str', no_log=True),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
logging=dict(default=False, type='bool') # deprecate in favour of c.g.syslogger?
|
||||
),
|
||||
required_if=[
|
||||
('state', 'present', ['password']),
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
obj = EjabberdUser(module)
|
||||
|
||||
rc = None
|
||||
result = dict(changed=False)
|
||||
|
||||
if obj.state == 'absent':
|
||||
if obj.exists:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
(rc, out, err) = obj.delete()
|
||||
if rc != 0:
|
||||
module.fail_json(msg=err, rc=rc)
|
||||
|
||||
elif obj.state == 'present':
|
||||
if not obj.exists:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
(rc, out, err) = obj.create()
|
||||
elif obj.changed:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
(rc, out, err) = obj.update()
|
||||
if rc is not None and rc != 0:
|
||||
module.fail_json(msg=err, rc=rc)
|
||||
|
||||
if rc is None:
|
||||
result['changed'] = False
|
||||
else:
|
||||
result['changed'] = True
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,302 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2015, Mathew Davies <thepixeldeveloper@googlemail.com>
|
||||
# Copyright (c) 2017, Sam Doran <sdoran@redhat.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: elasticsearch_plugin
|
||||
short_description: Manage Elasticsearch plugins
|
||||
description:
|
||||
- Manages Elasticsearch plugins.
|
||||
author:
|
||||
- Mathew Davies (@ThePixelDeveloper)
|
||||
- Sam Doran (@samdoran)
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the plugin to install.
|
||||
required: true
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Desired state of a plugin.
|
||||
choices: ["present", "absent"]
|
||||
default: present
|
||||
type: str
|
||||
src:
|
||||
description:
|
||||
- Optionally set the source location to retrieve the plugin from. This can be a file://
|
||||
URL to install from a local file, or a remote URL. If this is not set, the plugin
|
||||
location is just based on the name.
|
||||
- The name parameter must match the descriptor in the plugin ZIP specified.
|
||||
- Is only used if the state would change, which is solely checked based on the name
|
||||
parameter. If, for example, the plugin is already installed, changing this has no
|
||||
effect.
|
||||
- For ES 1.x use url.
|
||||
required: false
|
||||
type: str
|
||||
url:
|
||||
description:
|
||||
- Set exact URL to download the plugin from (Only works for ES 1.x).
|
||||
- For ES 2.x and higher, use src.
|
||||
required: false
|
||||
type: str
|
||||
timeout:
|
||||
description:
|
||||
- "Timeout setting: 30s, 1m, 1h..."
|
||||
- Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0.
|
||||
default: 1m
|
||||
type: str
|
||||
force:
|
||||
description:
|
||||
- "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails."
|
||||
default: false
|
||||
type: bool
|
||||
plugin_bin:
|
||||
description:
|
||||
- Location of the plugin binary. If this file is not found, the default plugin binaries will be used.
|
||||
- The default changed in Ansible 2.4 to None.
|
||||
type: path
|
||||
plugin_dir:
|
||||
description:
|
||||
- Your configured plugin directory specified in Elasticsearch
|
||||
default: /usr/share/elasticsearch/plugins/
|
||||
type: path
|
||||
proxy_host:
|
||||
description:
|
||||
- Proxy host to use during plugin installation
|
||||
type: str
|
||||
proxy_port:
|
||||
description:
|
||||
- Proxy port to use during plugin installation
|
||||
type: str
|
||||
version:
|
||||
description:
|
||||
- Version of the plugin to be installed.
|
||||
If plugin exists with previous version, it will NOT be updated
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install Elasticsearch Head plugin in Elasticsearch 2.x
|
||||
community.general.elasticsearch_plugin:
|
||||
name: mobz/elasticsearch-head
|
||||
state: present
|
||||
|
||||
- name: Install a specific version of Elasticsearch Head in Elasticsearch 2.x
|
||||
community.general.elasticsearch_plugin:
|
||||
name: mobz/elasticsearch-head
|
||||
version: 2.0.0
|
||||
|
||||
- name: Uninstall Elasticsearch head plugin in Elasticsearch 2.x
|
||||
community.general.elasticsearch_plugin:
|
||||
name: mobz/elasticsearch-head
|
||||
state: absent
|
||||
|
||||
- name: Install a specific plugin in Elasticsearch >= 5.0
|
||||
community.general.elasticsearch_plugin:
|
||||
name: analysis-icu
|
||||
state: present
|
||||
|
||||
- name: Install the ingest-geoip plugin with a forced installation
|
||||
community.general.elasticsearch_plugin:
|
||||
name: ingest-geoip
|
||||
state: present
|
||||
force: true
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
PACKAGE_STATE_MAP = dict(
|
||||
present="install",
|
||||
absent="remove"
|
||||
)
|
||||
|
||||
PLUGIN_BIN_PATHS = tuple([
|
||||
'/usr/share/elasticsearch/bin/elasticsearch-plugin',
|
||||
'/usr/share/elasticsearch/bin/plugin'
|
||||
])
|
||||
|
||||
|
||||
def parse_plugin_repo(string):
|
||||
elements = string.split("/")
|
||||
|
||||
# We first consider the simplest form: pluginname
|
||||
repo = elements[0]
|
||||
|
||||
# We consider the form: username/pluginname
|
||||
if len(elements) > 1:
|
||||
repo = elements[1]
|
||||
|
||||
# remove elasticsearch- prefix
|
||||
# remove es- prefix
|
||||
for string in ("elasticsearch-", "es-"):
|
||||
if repo.startswith(string):
|
||||
return repo[len(string):]
|
||||
|
||||
return repo
|
||||
|
||||
|
||||
def is_plugin_present(plugin_name, plugin_dir):
|
||||
return os.path.isdir(os.path.join(plugin_dir, plugin_name))
|
||||
|
||||
|
||||
def parse_error(string):
|
||||
reason = "ERROR: "
|
||||
try:
|
||||
return string[string.index(reason) + len(reason):].strip()
|
||||
except ValueError:
|
||||
return string
|
||||
|
||||
|
||||
def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force):
|
||||
cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]]
|
||||
is_old_command = (os.path.basename(plugin_bin) == 'plugin')
|
||||
|
||||
# Timeout and version are only valid for plugin, not elasticsearch-plugin
|
||||
if is_old_command:
|
||||
if timeout:
|
||||
cmd_args.append("--timeout %s" % timeout)
|
||||
|
||||
if version:
|
||||
plugin_name = plugin_name + '/' + version
|
||||
cmd_args[2] = plugin_name
|
||||
|
||||
if proxy_host and proxy_port:
|
||||
cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
|
||||
|
||||
# Legacy ES 1.x
|
||||
if url:
|
||||
cmd_args.append("--url %s" % url)
|
||||
|
||||
if force:
|
||||
cmd_args.append("--batch")
|
||||
if src:
|
||||
cmd_args.append(src)
|
||||
else:
|
||||
cmd_args.append(plugin_name)
|
||||
|
||||
cmd = " ".join(cmd_args)
|
||||
|
||||
if module.check_mode:
|
||||
rc, out, err = 0, "check mode", ""
|
||||
else:
|
||||
rc, out, err = module.run_command(cmd)
|
||||
|
||||
if rc != 0:
|
||||
reason = parse_error(out)
|
||||
module.fail_json(msg="Installing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
|
||||
|
||||
return True, cmd, out, err
|
||||
|
||||
|
||||
def remove_plugin(module, plugin_bin, plugin_name):
|
||||
cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)]
|
||||
|
||||
cmd = " ".join(cmd_args)
|
||||
|
||||
if module.check_mode:
|
||||
rc, out, err = 0, "check mode", ""
|
||||
else:
|
||||
rc, out, err = module.run_command(cmd)
|
||||
|
||||
if rc != 0:
|
||||
reason = parse_error(out)
|
||||
module.fail_json(msg="Removing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
|
||||
|
||||
return True, cmd, out, err
|
||||
|
||||
|
||||
def get_plugin_bin(module, plugin_bin=None):
|
||||
# Use the plugin_bin that was supplied first before trying other options
|
||||
valid_plugin_bin = None
|
||||
if plugin_bin and os.path.isfile(plugin_bin):
|
||||
valid_plugin_bin = plugin_bin
|
||||
|
||||
else:
|
||||
# Add the plugin_bin passed into the module to the top of the list of paths to test,
|
||||
# testing for that binary name first before falling back to the default paths.
|
||||
bin_paths = list(PLUGIN_BIN_PATHS)
|
||||
if plugin_bin and plugin_bin not in bin_paths:
|
||||
bin_paths.insert(0, plugin_bin)
|
||||
|
||||
# Get separate lists of dirs and binary names from the full paths to the
|
||||
# plugin binaries.
|
||||
plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths]))
|
||||
plugin_bins = list(set([os.path.basename(x) for x in bin_paths]))
|
||||
|
||||
# Check for the binary names in the default system paths as well as the path
|
||||
# specified in the module arguments.
|
||||
for bin_file in plugin_bins:
|
||||
valid_plugin_bin = module.get_bin_path(bin_file, opt_dirs=plugin_dirs)
|
||||
if valid_plugin_bin:
|
||||
break
|
||||
|
||||
if not valid_plugin_bin:
|
||||
module.fail_json(msg='%s does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.' % plugin_bin)
|
||||
|
||||
return valid_plugin_bin
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=True),
|
||||
state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())),
|
||||
src=dict(default=None),
|
||||
url=dict(default=None),
|
||||
timeout=dict(default="1m"),
|
||||
force=dict(type='bool', default=False),
|
||||
plugin_bin=dict(type="path"),
|
||||
plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"),
|
||||
proxy_host=dict(default=None),
|
||||
proxy_port=dict(default=None),
|
||||
version=dict(default=None)
|
||||
),
|
||||
mutually_exclusive=[("src", "url")],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
name = module.params["name"]
|
||||
state = module.params["state"]
|
||||
url = module.params["url"]
|
||||
src = module.params["src"]
|
||||
timeout = module.params["timeout"]
|
||||
force = module.params["force"]
|
||||
plugin_bin = module.params["plugin_bin"]
|
||||
plugin_dir = module.params["plugin_dir"]
|
||||
proxy_host = module.params["proxy_host"]
|
||||
proxy_port = module.params["proxy_port"]
|
||||
version = module.params["version"]
|
||||
|
||||
# Search provided path and system paths for valid binary
|
||||
plugin_bin = get_plugin_bin(module, plugin_bin)
|
||||
|
||||
repo = parse_plugin_repo(name)
|
||||
present = is_plugin_present(repo, plugin_dir)
|
||||
|
||||
# skip if the state is correct
|
||||
if (present and state == "present") or (state == "absent" and not present):
|
||||
module.exit_json(changed=False, name=name, state=state)
|
||||
|
||||
if state == "present":
|
||||
changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force)
|
||||
|
||||
elif state == "absent":
|
||||
changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
|
||||
|
||||
module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,175 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto <lorenzetto.luca@gmail.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
#
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: emc_vnx_sg_member
|
||||
|
||||
short_description: Manage storage group member on EMC VNX
|
||||
|
||||
|
||||
description:
|
||||
- "This module manages the members of an existing storage group."
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.general.emc.emc_vnx
|
||||
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the Storage group to manage.
|
||||
required: true
|
||||
type: str
|
||||
lunid:
|
||||
description:
|
||||
- Lun id to be added.
|
||||
required: true
|
||||
type: int
|
||||
state:
|
||||
description:
|
||||
- Indicates the desired lunid state.
|
||||
- C(present) ensures specified lunid is present in the Storage Group.
|
||||
- C(absent) ensures specified lunid is absent from Storage Group.
|
||||
default: present
|
||||
choices: [ "present", "absent"]
|
||||
type: str
|
||||
|
||||
|
||||
author:
|
||||
- Luca 'remix_tj' Lorenzetto (@remixtj)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Add lun to storage group
|
||||
community.general.emc_vnx_sg_member:
|
||||
name: sg01
|
||||
sp_address: sp1a.fqdn
|
||||
sp_user: sysadmin
|
||||
sp_password: sysadmin
|
||||
lunid: 100
|
||||
state: present
|
||||
|
||||
- name: Remove lun from storage group
|
||||
community.general.emc_vnx_sg_member:
|
||||
name: sg01
|
||||
sp_address: sp1a.fqdn
|
||||
sp_user: sysadmin
|
||||
sp_password: sysadmin
|
||||
lunid: 100
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
hluid:
|
||||
description: LUNID that hosts attached to the storage group will see.
|
||||
type: int
|
||||
returned: success
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec
|
||||
|
||||
LIB_IMP_ERR = None
|
||||
try:
|
||||
from storops import VNXSystem
|
||||
from storops.exception import VNXCredentialError, VNXStorageGroupError, \
|
||||
VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError
|
||||
HAS_LIB = True
|
||||
except Exception:
|
||||
LIB_IMP_ERR = traceback.format_exc()
|
||||
HAS_LIB = False
|
||||
|
||||
|
||||
def run_module():
|
||||
module_args = dict(
|
||||
name=dict(type='str', required=True),
|
||||
lunid=dict(type='int', required=True),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
)
|
||||
|
||||
module_args.update(emc_vnx_argument_spec)
|
||||
|
||||
result = dict(
|
||||
changed=False,
|
||||
hluid=None
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
if not HAS_LIB:
|
||||
module.fail_json(msg=missing_required_lib('storops >= 0.5.10'),
|
||||
exception=LIB_IMP_ERR)
|
||||
|
||||
sp_user = module.params['sp_user']
|
||||
sp_address = module.params['sp_address']
|
||||
sp_password = module.params['sp_password']
|
||||
alu = module.params['lunid']
|
||||
|
||||
# if the user is working with this module in only check mode we do not
|
||||
# want to make any changes to the environment, just return the current
|
||||
# state with no modifications
|
||||
if module.check_mode:
|
||||
return result
|
||||
|
||||
try:
|
||||
vnx = VNXSystem(sp_address, sp_user, sp_password)
|
||||
sg = vnx.get_sg(module.params['name'])
|
||||
if sg.existed:
|
||||
if module.params['state'] == 'present':
|
||||
if not sg.has_alu(alu):
|
||||
try:
|
||||
result['hluid'] = sg.attach_alu(alu)
|
||||
result['changed'] = True
|
||||
except VNXAluAlreadyAttachedError:
|
||||
result['hluid'] = sg.get_hlu(alu)
|
||||
except (VNXAttachAluError, VNXStorageGroupError) as e:
|
||||
module.fail_json(msg='Error attaching {0}: '
|
||||
'{1} '.format(alu, to_native(e)),
|
||||
**result)
|
||||
else:
|
||||
result['hluid'] = sg.get_hlu(alu)
|
||||
if module.params['state'] == 'absent' and sg.has_alu(alu):
|
||||
try:
|
||||
sg.detach_alu(alu)
|
||||
result['changed'] = True
|
||||
except VNXDetachAluNotFoundError:
|
||||
# being not attached when using absent is OK
|
||||
pass
|
||||
except VNXStorageGroupError as e:
|
||||
module.fail_json(msg='Error detaching alu {0}: '
|
||||
'{1} '.format(alu, to_native(e)),
|
||||
**result)
|
||||
else:
|
||||
module.fail_json(msg='No such storage group named '
|
||||
'{0}'.format(module.params['name']),
|
||||
**result)
|
||||
except VNXCredentialError as e:
|
||||
module.fail_json(msg='{0}'.format(to_native(e)), **result)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,254 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2018, Jean-Philippe Evrard <jean-philippe@evrard.me>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: etcd3
|
||||
short_description: Set or delete key value pairs from an etcd3 cluster
|
||||
requirements:
|
||||
- etcd3
|
||||
description:
|
||||
- Sets or deletes values in etcd3 cluster using its v3 api.
|
||||
- Needs python etcd3 lib to work
|
||||
options:
|
||||
key:
|
||||
type: str
|
||||
description:
|
||||
- the key where the information is stored in the cluster
|
||||
required: true
|
||||
value:
|
||||
type: str
|
||||
description:
|
||||
- the information stored
|
||||
required: true
|
||||
host:
|
||||
type: str
|
||||
description:
|
||||
- the IP address of the cluster
|
||||
default: 'localhost'
|
||||
port:
|
||||
type: int
|
||||
description:
|
||||
- the port number used to connect to the cluster
|
||||
default: 2379
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- the state of the value for the key.
|
||||
- can be present or absent
|
||||
required: true
|
||||
choices: [ present, absent ]
|
||||
user:
|
||||
type: str
|
||||
description:
|
||||
- The etcd user to authenticate with.
|
||||
password:
|
||||
type: str
|
||||
description:
|
||||
- The password to use for authentication.
|
||||
- Required if I(user) is defined.
|
||||
ca_cert:
|
||||
type: path
|
||||
description:
|
||||
- The Certificate Authority to use to verify the etcd host.
|
||||
- Required if I(client_cert) and I(client_key) are defined.
|
||||
client_cert:
|
||||
type: path
|
||||
description:
|
||||
- PEM formatted certificate chain file to be used for SSL client authentication.
|
||||
- Required if I(client_key) is defined.
|
||||
client_key:
|
||||
type: path
|
||||
description:
|
||||
- PEM formatted file that contains your private key to be used for SSL client authentication.
|
||||
- Required if I(client_cert) is defined.
|
||||
timeout:
|
||||
type: int
|
||||
description:
|
||||
- The socket level timeout in seconds.
|
||||
author:
|
||||
- Jean-Philippe Evrard (@evrardjp)
|
||||
- Victor Fauth (@vfauth)
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379"
|
||||
community.general.etcd3:
|
||||
key: "foo"
|
||||
value: "baz3"
|
||||
host: "localhost"
|
||||
port: 2379
|
||||
state: "present"
|
||||
|
||||
- name: Authenticate using user/password combination with a timeout of 10 seconds
|
||||
community.general.etcd3:
|
||||
key: "foo"
|
||||
value: "baz3"
|
||||
state: "present"
|
||||
user: "someone"
|
||||
password: "password123"
|
||||
timeout: 10
|
||||
|
||||
- name: Authenticate using TLS certificates
|
||||
community.general.etcd3:
|
||||
key: "foo"
|
||||
value: "baz3"
|
||||
state: "present"
|
||||
ca_cert: "/etc/ssl/certs/CA_CERT.pem"
|
||||
client_cert: "/etc/ssl/certs/cert.crt"
|
||||
client_key: "/etc/ssl/private/key.pem"
|
||||
"""
|
||||
|
||||
RETURN = '''
|
||||
key:
|
||||
description: The key that was queried
|
||||
returned: always
|
||||
type: str
|
||||
old_value:
|
||||
description: The previous value in the cluster
|
||||
returned: always
|
||||
type: str
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
try:
|
||||
import etcd3
|
||||
HAS_ETCD = True
|
||||
ETCD_IMP_ERR = None
|
||||
except ImportError:
|
||||
ETCD_IMP_ERR = traceback.format_exc()
|
||||
HAS_ETCD = False
|
||||
|
||||
|
||||
def run_module():
|
||||
# define the available arguments/parameters that a user can pass to
|
||||
# the module
|
||||
module_args = dict(
|
||||
key=dict(type='str', required=True, no_log=False),
|
||||
value=dict(type='str', required=True),
|
||||
host=dict(type='str', default='localhost'),
|
||||
port=dict(type='int', default=2379),
|
||||
state=dict(type='str', required=True, choices=['present', 'absent']),
|
||||
user=dict(type='str'),
|
||||
password=dict(type='str', no_log=True),
|
||||
ca_cert=dict(type='path'),
|
||||
client_cert=dict(type='path'),
|
||||
client_key=dict(type='path'),
|
||||
timeout=dict(type='int'),
|
||||
)
|
||||
|
||||
# seed the result dict in the object
|
||||
# we primarily care about changed and state
|
||||
# change is if this module effectively modified the target
|
||||
# state will include any data that you want your module to pass back
|
||||
# for consumption, for example, in a subsequent task
|
||||
result = dict(
|
||||
changed=False,
|
||||
)
|
||||
|
||||
# the AnsibleModule object will be our abstraction working with Ansible
|
||||
# this includes instantiation, a couple of common attr would be the
|
||||
# args/params passed to the execution, as well as if the module
|
||||
# supports check mode
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True,
|
||||
required_together=[['client_cert', 'client_key'], ['user', 'password']],
|
||||
)
|
||||
|
||||
# It is possible to set `ca_cert` to verify the server identity without
|
||||
# setting `client_cert` or `client_key` to authenticate the client
|
||||
# so required_together is enough
|
||||
# Due to `required_together=[['client_cert', 'client_key']]`, checking the presence
|
||||
# of either `client_cert` or `client_key` is enough
|
||||
if module.params['ca_cert'] is None and module.params['client_cert'] is not None:
|
||||
module.fail_json(msg="The 'ca_cert' parameter must be defined when 'client_cert' and 'client_key' are present.")
|
||||
|
||||
result['key'] = module.params.get('key')
|
||||
module.params['cert_cert'] = module.params.pop('client_cert')
|
||||
module.params['cert_key'] = module.params.pop('client_key')
|
||||
|
||||
if not HAS_ETCD:
|
||||
module.fail_json(msg=missing_required_lib('etcd3'), exception=ETCD_IMP_ERR)
|
||||
|
||||
allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key',
|
||||
'timeout', 'user', 'password']
|
||||
# TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is
|
||||
# the minimum supported version
|
||||
# client_params = {key: value for key, value in module.params.items() if key in allowed_keys}
|
||||
client_params = dict()
|
||||
for key, value in module.params.items():
|
||||
if key in allowed_keys:
|
||||
client_params[key] = value
|
||||
try:
|
||||
etcd = etcd3.client(**client_params)
|
||||
except Exception as exp:
|
||||
module.fail_json(msg='Cannot connect to etcd cluster: %s' % (to_native(exp)),
|
||||
exception=traceback.format_exc())
|
||||
try:
|
||||
cluster_value = etcd.get(module.params['key'])
|
||||
except Exception as exp:
|
||||
module.fail_json(msg='Cannot reach data: %s' % (to_native(exp)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
# Make the cluster_value[0] a string for string comparisons
|
||||
result['old_value'] = to_native(cluster_value[0])
|
||||
|
||||
if module.params['state'] == 'absent':
|
||||
if cluster_value[0] is not None:
|
||||
if module.check_mode:
|
||||
result['changed'] = True
|
||||
else:
|
||||
try:
|
||||
etcd.delete(module.params['key'])
|
||||
except Exception as exp:
|
||||
module.fail_json(msg='Cannot delete %s: %s' % (module.params['key'], to_native(exp)),
|
||||
exception=traceback.format_exc())
|
||||
else:
|
||||
result['changed'] = True
|
||||
elif module.params['state'] == 'present':
|
||||
if result['old_value'] != module.params['value']:
|
||||
if module.check_mode:
|
||||
result['changed'] = True
|
||||
else:
|
||||
try:
|
||||
etcd.put(module.params['key'], module.params['value'])
|
||||
except Exception as exp:
|
||||
module.fail_json(msg='Cannot add or edit key %s: %s' % (module.params['key'], to_native(exp)),
|
||||
exception=traceback.format_exc())
|
||||
else:
|
||||
result['changed'] = True
|
||||
else:
|
||||
module.fail_json(msg="State not recognized")
|
||||
|
||||
# manipulate or modify the state as needed (this is going to be the
|
||||
# part where your module will do what it needs to do)
|
||||
|
||||
# during the execution of the module, if there is an exception or a
|
||||
# conditional state that effectively causes a failure, run
|
||||
# AnsibleModule.fail_json() to pass in the message and the result
|
||||
|
||||
# in the event of a successful module execution, you will want to
|
||||
# simple AnsibleModule.exit_json(), passing the key/value results
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
def main():
|
||||
run_module()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,73 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: facter
|
||||
short_description: Runs the discovery program I(facter) on the remote system
|
||||
description:
|
||||
- Runs the C(facter) discovery program
|
||||
(U(https://github.com/puppetlabs/facter)) on the remote system, returning
|
||||
JSON data that can be useful for inventory purposes.
|
||||
options:
|
||||
arguments:
|
||||
description:
|
||||
- Specifies arguments for facter.
|
||||
type: list
|
||||
elements: str
|
||||
requirements:
|
||||
- facter
|
||||
- ruby-json
|
||||
author:
|
||||
- Ansible Core Team
|
||||
- Michael DeHaan
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Example command-line invocation
|
||||
# ansible www.example.net -m facter
|
||||
|
||||
- name: Execute facter no arguments
|
||||
community.general.facter:
|
||||
|
||||
- name: Execute facter with arguments
|
||||
community.general.facter:
|
||||
arguments:
|
||||
- -p
|
||||
- system_uptime
|
||||
- timezone
|
||||
- is_virtual
|
||||
'''
|
||||
import json
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
arguments=dict(required=False, type='list', elements='str')
|
||||
)
|
||||
)
|
||||
|
||||
facter_path = module.get_bin_path(
|
||||
'facter',
|
||||
opt_dirs=['/opt/puppetlabs/bin'])
|
||||
|
||||
cmd = [facter_path, "--json"]
|
||||
if module.params['arguments']:
|
||||
cmd += module.params['arguments']
|
||||
|
||||
rc, out, err = module.run_command(cmd, check_rc=True)
|
||||
module.exit_json(**json.loads(out))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,488 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2021, quidame <quidame@poivron.org>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: filesize
|
||||
|
||||
short_description: Create a file with a given size, or resize it if it exists
|
||||
|
||||
description:
|
||||
- This module is a simple wrapper around C(dd) to create, extend or truncate
|
||||
a file, given its size. It can be used to manage swap files (that require
|
||||
contiguous blocks) or alternatively, huge sparse files.
|
||||
|
||||
author:
|
||||
- quidame (@quidame)
|
||||
|
||||
version_added: "3.0.0"
|
||||
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
- Path of the regular file to create or resize.
|
||||
type: path
|
||||
required: true
|
||||
size:
|
||||
description:
|
||||
- Requested size of the file.
|
||||
- The value is a number (either C(int) or C(float)) optionally followed
|
||||
by a multiplicative suffix, that can be one of C(B) (bytes), C(KB) or
|
||||
C(kB) (= 1000B), C(MB) or C(mB) (= 1000kB), C(GB) or C(gB) (= 1000MB),
|
||||
and so on for C(T), C(P), C(E), C(Z) and C(Y); or alternatively one of
|
||||
C(K), C(k) or C(KiB) (= 1024B); C(M), C(m) or C(MiB) (= 1024KiB);
|
||||
C(G), C(g) or C(GiB) (= 1024MiB); and so on.
|
||||
- If the multiplicative suffix is not provided, the value is treated as
|
||||
an integer number of blocks of I(blocksize) bytes each (float values
|
||||
are rounded to the closest integer).
|
||||
- When the I(size) value is equal to the current file size, does nothing.
|
||||
- When the I(size) value is bigger than the current file size, bytes from
|
||||
I(source) (if I(sparse) is not C(false)) are appended to the file
|
||||
without truncating it, in other words, without modifying the existing
|
||||
bytes of the file.
|
||||
- When the I(size) value is smaller than the current file size, it is
|
||||
truncated to the requested value without modifying bytes before this
|
||||
value.
|
||||
- That means that a file of any arbitrary size can be grown to any other
|
||||
arbitrary size, and then resized down to its initial size without
|
||||
modifying its initial content.
|
||||
type: raw
|
||||
required: true
|
||||
blocksize:
|
||||
description:
|
||||
- Size of blocks, in bytes if not followed by a multiplicative suffix.
|
||||
- The numeric value (before the unit) C(MUST) be an integer (or a C(float)
|
||||
if it equals an integer).
|
||||
- If not set, the size of blocks is guessed from the OS and commonly
|
||||
results in C(512) or C(4096) bytes, that is used internally by the
|
||||
module or when I(size) has no unit.
|
||||
type: raw
|
||||
source:
|
||||
description:
|
||||
- Device or file that provides input data to provision the file.
|
||||
- This parameter is ignored when I(sparse=true).
|
||||
type: path
|
||||
default: /dev/zero
|
||||
force:
|
||||
description:
|
||||
- Whether or not to overwrite the file if it exists, in other words, to
|
||||
truncate it from 0. When C(true), the module is not idempotent, that
|
||||
means it always reports I(changed=true).
|
||||
- I(force=true) and I(sparse=true) are mutually exclusive.
|
||||
type: bool
|
||||
default: false
|
||||
sparse:
|
||||
description:
|
||||
- Whether or not the file to create should be a sparse file.
|
||||
- This option is effective only on newly created files, or when growing a
|
||||
file, only for the bytes to append.
|
||||
- This option is not supported on OSes or filesystems not supporting sparse files.
|
||||
- I(force=true) and I(sparse=true) are mutually exclusive.
|
||||
type: bool
|
||||
default: false
|
||||
unsafe_writes:
|
||||
description:
|
||||
- This option is silently ignored. This module always modifies file
|
||||
size in-place.
|
||||
|
||||
notes:
|
||||
- This module supports C(check_mode) and C(diff).
|
||||
|
||||
requirements:
|
||||
- dd (Data Duplicator) in PATH
|
||||
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.files
|
||||
|
||||
seealso:
|
||||
- name: dd(1) manpage for Linux
|
||||
description: Manual page of the GNU/Linux's dd implementation (from GNU coreutils).
|
||||
link: https://man7.org/linux/man-pages/man1/dd.1.html
|
||||
|
||||
- name: dd(1) manpage for IBM AIX
|
||||
description: Manual page of the IBM AIX's dd implementation.
|
||||
link: https://www.ibm.com/support/knowledgecenter/ssw_aix_72/d_commands/dd.html
|
||||
|
||||
- name: dd(1) manpage for Mac OSX
|
||||
description: Manual page of the Mac OSX's dd implementation.
|
||||
link: https://www.unix.com/man-page/osx/1/dd/
|
||||
|
||||
- name: dd(1M) manpage for Solaris
|
||||
description: Manual page of the Oracle Solaris's dd implementation.
|
||||
link: https://docs.oracle.com/cd/E36784_01/html/E36871/dd-1m.html
|
||||
|
||||
- name: dd(1) manpage for FreeBSD
|
||||
description: Manual page of the FreeBSD's dd implementation.
|
||||
link: https://www.freebsd.org/cgi/man.cgi?dd(1)
|
||||
|
||||
- name: dd(1) manpage for OpenBSD
|
||||
description: Manual page of the OpenBSD's dd implementation.
|
||||
link: https://man.openbsd.org/dd
|
||||
|
||||
- name: dd(1) manpage for NetBSD
|
||||
description: Manual page of the NetBSD's dd implementation.
|
||||
link: https://man.netbsd.org/dd.1
|
||||
|
||||
- name: busybox(1) manpage for Linux
|
||||
description: Manual page of the GNU/Linux's busybox, that provides its own dd implementation.
|
||||
link: https://www.unix.com/man-page/linux/1/busybox
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a file of 1G filled with null bytes
|
||||
community.general.filesize:
|
||||
path: /var/bigfile
|
||||
size: 1G
|
||||
|
||||
- name: Extend the file to 2G (2*1024^3)
|
||||
community.general.filesize:
|
||||
path: /var/bigfile
|
||||
size: 2G
|
||||
|
||||
- name: Reduce the file to 2GB (2*1000^3)
|
||||
community.general.filesize:
|
||||
path: /var/bigfile
|
||||
size: 2GB
|
||||
|
||||
- name: Fill a file with random bytes for backing a LUKS device
|
||||
community.general.filesize:
|
||||
path: ~/diskimage.luks
|
||||
size: 512.0 MiB
|
||||
source: /dev/urandom
|
||||
|
||||
- name: Take a backup of MBR boot code into a file, overwriting it if it exists
|
||||
community.general.filesize:
|
||||
path: /media/sdb1/mbr.bin
|
||||
size: 440B
|
||||
source: /dev/sda
|
||||
force: true
|
||||
|
||||
- name: Create/resize a sparse file of/to 8TB
|
||||
community.general.filesize:
|
||||
path: /var/local/sparsefile
|
||||
size: 8TB
|
||||
sparse: true
|
||||
|
||||
- name: Create a file with specific size and attributes, to be used as swap space
|
||||
community.general.filesize:
|
||||
path: /var/swapfile
|
||||
size: 2G
|
||||
blocksize: 512B
|
||||
mode: u=rw,go=
|
||||
owner: root
|
||||
group: root
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
cmd:
|
||||
description: Command executed to create or resize the file.
|
||||
type: str
|
||||
returned: when changed or failed
|
||||
sample: /usr/bin/dd if=/dev/zero of=/var/swapfile bs=1048576 seek=3072 count=1024
|
||||
|
||||
filesize:
|
||||
description: Dictionary of sizes related to the file.
|
||||
type: dict
|
||||
returned: always
|
||||
contains:
|
||||
blocks:
|
||||
description: Number of blocks in the file.
|
||||
type: int
|
||||
sample: 500
|
||||
blocksize:
|
||||
description: Size of the blocks in bytes.
|
||||
type: int
|
||||
sample: 1024
|
||||
bytes:
|
||||
description: Size of the file, in bytes, as the product of C(blocks) and C(blocksize).
|
||||
type: int
|
||||
sample: 512000
|
||||
iec:
|
||||
description: Size of the file, in human-readable format, following IEC standard.
|
||||
type: str
|
||||
sample: 500.0 KiB
|
||||
si:
|
||||
description: Size of the file, in human-readable format, following SI standard.
|
||||
type: str
|
||||
sample: 512.0 kB
|
||||
|
||||
size_diff:
|
||||
description: Difference (positive or negative) between old size and new size, in bytes.
|
||||
type: int
|
||||
sample: -1234567890
|
||||
returned: always
|
||||
|
||||
path:
|
||||
description: Realpath of the file if it is a symlink, otherwise the same than module's param.
|
||||
type: str
|
||||
sample: /var/swap0
|
||||
returned: always
|
||||
'''
|
||||
|
||||
|
||||
import re
|
||||
import os
|
||||
import math
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
# These are the multiplicative suffixes understood (or returned) by dd and
|
||||
# others (ls, df, lvresize, lsblk...).
|
||||
SIZE_UNITS = dict(
|
||||
B=1,
|
||||
kB=1000**1, KB=1000**1, KiB=1024**1, K=1024**1, k=1024**1,
|
||||
MB=1000**2, mB=1000**2, MiB=1024**2, M=1024**2, m=1024**2,
|
||||
GB=1000**3, gB=1000**3, GiB=1024**3, G=1024**3, g=1024**3,
|
||||
TB=1000**4, tB=1000**4, TiB=1024**4, T=1024**4, t=1024**4,
|
||||
PB=1000**5, pB=1000**5, PiB=1024**5, P=1024**5, p=1024**5,
|
||||
EB=1000**6, eB=1000**6, EiB=1024**6, E=1024**6, e=1024**6,
|
||||
ZB=1000**7, zB=1000**7, ZiB=1024**7, Z=1024**7, z=1024**7,
|
||||
YB=1000**8, yB=1000**8, YiB=1024**8, Y=1024**8, y=1024**8,
|
||||
)
|
||||
|
||||
|
||||
def bytes_to_human(size, iec=False):
|
||||
"""Return human-readable size (with SI or IEC suffix) from bytes. This is
|
||||
only to populate the returned result of the module, not to handle the
|
||||
file itself (we only rely on bytes for that).
|
||||
"""
|
||||
unit = 'B'
|
||||
for (u, v) in SIZE_UNITS.items():
|
||||
if size < v:
|
||||
continue
|
||||
if iec:
|
||||
if 'i' not in u or size / v >= 1024:
|
||||
continue
|
||||
else:
|
||||
if v % 5 or size / v >= 1000:
|
||||
continue
|
||||
unit = u
|
||||
|
||||
hsize = round(size / SIZE_UNITS[unit], 2)
|
||||
if unit == 'B':
|
||||
hsize = int(hsize)
|
||||
|
||||
unit = re.sub(r'^(.)', lambda m: m.expand(r'\1').upper(), unit)
|
||||
if unit == 'KB':
|
||||
unit = 'kB'
|
||||
|
||||
return '%s %s' % (str(hsize), unit)
|
||||
|
||||
|
||||
def smart_blocksize(size, unit, product, bsize):
|
||||
"""Ensure the total size can be written as blocks*blocksize, with blocks
|
||||
and blocksize being integers.
|
||||
"""
|
||||
if not product % bsize:
|
||||
return bsize
|
||||
|
||||
# Basically, for a file of 8kB (=8000B), system's block size of 4096 bytes
|
||||
# is not usable. The smallest integer number of kB to work with 512B blocks
|
||||
# is 64, the nexts are 128, 192, 256, and so on.
|
||||
|
||||
unit_size = SIZE_UNITS[unit]
|
||||
|
||||
if size == int(size):
|
||||
if unit_size > SIZE_UNITS['MiB']:
|
||||
if unit_size % 5:
|
||||
return SIZE_UNITS['MiB']
|
||||
return SIZE_UNITS['MB']
|
||||
return unit_size
|
||||
|
||||
if unit == 'B':
|
||||
raise AssertionError("byte is the smallest unit and requires an integer value")
|
||||
|
||||
if 0 < product < bsize:
|
||||
return product
|
||||
|
||||
for bsz in (1024, 1000, 512, 256, 128, 100, 64, 32, 16, 10, 8, 4, 2):
|
||||
if not product % bsz:
|
||||
return bsz
|
||||
return 1
|
||||
|
||||
|
||||
def split_size_unit(string, isint=False):
|
||||
"""Split a string between the size value (int or float) and the unit.
|
||||
Support optional space(s) between the numeric value and the unit.
|
||||
"""
|
||||
unit = re.sub(r'(\d|\.)', r'', string).strip()
|
||||
value = float(re.sub(r'%s' % unit, r'', string).strip())
|
||||
if isint and unit in ('B', ''):
|
||||
if int(value) != value:
|
||||
raise AssertionError("invalid blocksize value: bytes require an integer value")
|
||||
|
||||
if not unit:
|
||||
unit = None
|
||||
product = int(round(value))
|
||||
else:
|
||||
if unit not in SIZE_UNITS.keys():
|
||||
raise AssertionError("invalid size unit (%s): unit must be one of %s, or none." %
|
||||
(unit, ', '.join(sorted(SIZE_UNITS, key=SIZE_UNITS.get))))
|
||||
product = int(round(value * SIZE_UNITS[unit]))
|
||||
return value, unit, product
|
||||
|
||||
|
||||
def size_string(value):
|
||||
"""Convert a raw value to a string, but only if it is an integer, a float
|
||||
or a string itself.
|
||||
"""
|
||||
if not isinstance(value, (int, float, str)):
|
||||
raise AssertionError("invalid value type (%s): size must be integer, float or string" % type(value))
|
||||
return str(value)
|
||||
|
||||
|
||||
def size_spec(args):
|
||||
"""Return a dictionary with size specifications, especially the size in
|
||||
bytes (after rounding it to an integer number of blocks).
|
||||
"""
|
||||
blocksize_in_bytes = split_size_unit(args['blocksize'], True)[2]
|
||||
if blocksize_in_bytes == 0:
|
||||
raise AssertionError("block size cannot be equal to zero")
|
||||
|
||||
size_value, size_unit, size_result = split_size_unit(args['size'])
|
||||
if not size_unit:
|
||||
blocks = int(math.ceil(size_value))
|
||||
else:
|
||||
blocksize_in_bytes = smart_blocksize(size_value, size_unit, size_result, blocksize_in_bytes)
|
||||
blocks = int(math.ceil(size_result / blocksize_in_bytes))
|
||||
|
||||
args['size_diff'] = round_bytes = int(blocks * blocksize_in_bytes)
|
||||
args['size_spec'] = dict(blocks=blocks, blocksize=blocksize_in_bytes, bytes=round_bytes,
|
||||
iec=bytes_to_human(round_bytes, True),
|
||||
si=bytes_to_human(round_bytes))
|
||||
return args['size_spec']
|
||||
|
||||
|
||||
def current_size(args):
|
||||
"""Return the size of the file at the given location if it exists, or None."""
|
||||
path = args['path']
|
||||
if os.path.exists(path):
|
||||
if not os.path.isfile(path):
|
||||
raise AssertionError("%s exists but is not a regular file" % path)
|
||||
args['file_size'] = os.stat(path).st_size
|
||||
else:
|
||||
args['file_size'] = None
|
||||
return args['file_size']
|
||||
|
||||
|
||||
def complete_dd_cmdline(args, dd_cmd):
|
||||
"""Compute dd options to grow or truncate a file."""
|
||||
if args['file_size'] == args['size_spec']['bytes'] and not args['force']:
|
||||
# Nothing to do.
|
||||
return list()
|
||||
|
||||
bs = args['size_spec']['blocksize']
|
||||
|
||||
# For sparse files (create, truncate, grow): write count=0 block.
|
||||
if args['sparse']:
|
||||
seek = args['size_spec']['blocks']
|
||||
elif args['force'] or not os.path.exists(args['path']): # Create file
|
||||
seek = 0
|
||||
elif args['size_diff'] < 0: # Truncate file
|
||||
seek = args['size_spec']['blocks']
|
||||
elif args['size_diff'] % bs: # Grow file
|
||||
seek = int(args['file_size'] / bs) + 1
|
||||
else:
|
||||
seek = int(args['file_size'] / bs)
|
||||
|
||||
count = args['size_spec']['blocks'] - seek
|
||||
dd_cmd += ['bs=%s' % str(bs), 'seek=%s' % str(seek), 'count=%s' % str(count)]
|
||||
|
||||
return dd_cmd
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
path=dict(type='path', required=True),
|
||||
size=dict(type='raw', required=True),
|
||||
blocksize=dict(type='raw'),
|
||||
source=dict(type='path', default='/dev/zero'),
|
||||
sparse=dict(type='bool', default=False),
|
||||
force=dict(type='bool', default=False),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
add_file_common_args=True,
|
||||
)
|
||||
args = dict(**module.params)
|
||||
diff = dict(before=dict(), after=dict())
|
||||
|
||||
if args['sparse'] and args['force']:
|
||||
module.fail_json(msg='parameters values are mutually exclusive: force=true|sparse=true')
|
||||
if not os.path.exists(os.path.dirname(args['path'])):
|
||||
module.fail_json(msg='parent directory of the file must exist prior to run this module')
|
||||
if not args['blocksize']:
|
||||
args['blocksize'] = str(os.statvfs(os.path.dirname(args['path'])).f_frsize)
|
||||
|
||||
try:
|
||||
args['size'] = size_string(args['size'])
|
||||
args['blocksize'] = size_string(args['blocksize'])
|
||||
initial_filesize = current_size(args)
|
||||
size_descriptors = size_spec(args)
|
||||
except AssertionError as err:
|
||||
module.fail_json(msg=to_native(err))
|
||||
|
||||
expected_filesize = size_descriptors['bytes']
|
||||
if initial_filesize:
|
||||
args['size_diff'] = expected_filesize - initial_filesize
|
||||
diff['after']['size'] = expected_filesize
|
||||
diff['before']['size'] = initial_filesize
|
||||
|
||||
result = dict(
|
||||
changed=args['force'],
|
||||
size_diff=args['size_diff'],
|
||||
path=args['path'],
|
||||
filesize=size_descriptors)
|
||||
|
||||
dd_bin = module.get_bin_path('dd', True)
|
||||
dd_cmd = [dd_bin, 'if=%s' % args['source'], 'of=%s' % args['path']]
|
||||
|
||||
if expected_filesize != initial_filesize or args['force']:
|
||||
result['cmd'] = ' '.join(complete_dd_cmdline(args, dd_cmd))
|
||||
if module.check_mode:
|
||||
result['changed'] = True
|
||||
else:
|
||||
result['rc'], dummy, result['stderr'] = module.run_command(dd_cmd)
|
||||
|
||||
diff['after']['size'] = result_filesize = result['size_diff'] = current_size(args)
|
||||
if initial_filesize:
|
||||
result['size_diff'] = result_filesize - initial_filesize
|
||||
if not args['force']:
|
||||
result['changed'] = result_filesize != initial_filesize
|
||||
|
||||
if result['rc']:
|
||||
msg = "dd error while creating file %s with size %s from source %s: see stderr for details" % (
|
||||
args['path'], args['size'], args['source'])
|
||||
module.fail_json(msg=msg, **result)
|
||||
if result_filesize != expected_filesize:
|
||||
msg = "module error while creating file %s with size %s from source %s: file is %s bytes long" % (
|
||||
args['path'], args['size'], args['source'], result_filesize)
|
||||
module.fail_json(msg=msg, **result)
|
||||
|
||||
# dd follows symlinks, and so does this module, while file module doesn't.
|
||||
# If we call it, this is to manage file's mode, owner and so on, not the
|
||||
# symlink's ones.
|
||||
file_params = dict(**module.params)
|
||||
if os.path.islink(args['path']):
|
||||
file_params['path'] = result['path'] = os.path.realpath(args['path'])
|
||||
|
||||
if args['file_size'] is not None:
|
||||
file_args = module.load_file_common_arguments(file_params)
|
||||
result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff)
|
||||
result['diff'] = diff
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,600 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2021, quidame <quidame@poivron.org>
|
||||
# Copyright (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
author:
|
||||
- Alexander Bulimov (@abulimov)
|
||||
- quidame (@quidame)
|
||||
module: filesystem
|
||||
short_description: Makes a filesystem
|
||||
description:
|
||||
- This module creates a filesystem.
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- If I(state=present), the filesystem is created if it doesn't already
|
||||
exist, that is the default behaviour if I(state) is omitted.
|
||||
- If I(state=absent), filesystem signatures on I(dev) are wiped if it
|
||||
contains a filesystem (as known by C(blkid)).
|
||||
- When I(state=absent), all other options but I(dev) are ignored, and the
|
||||
module doesn't fail if the device I(dev) doesn't actually exist.
|
||||
type: str
|
||||
choices: [ present, absent ]
|
||||
default: present
|
||||
version_added: 1.3.0
|
||||
fstype:
|
||||
choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs ]
|
||||
description:
|
||||
- Filesystem type to be created. This option is required with
|
||||
I(state=present) (or if I(state) is omitted).
|
||||
- ufs support has been added in community.general 3.4.0.
|
||||
type: str
|
||||
aliases: [type]
|
||||
dev:
|
||||
description:
|
||||
- Target path to block device (Linux) or character device (FreeBSD) or
|
||||
regular file (both).
|
||||
- When setting Linux-specific filesystem types on FreeBSD, this module
|
||||
only works when applying to regular files, aka disk images.
|
||||
- Currently C(lvm) (Linux-only) and C(ufs) (FreeBSD-only) don't support
|
||||
a regular file as their target I(dev).
|
||||
- Support for character devices on FreeBSD has been added in community.general 3.4.0.
|
||||
type: path
|
||||
required: true
|
||||
aliases: [device]
|
||||
force:
|
||||
description:
|
||||
- If C(true), allows to create new filesystem on devices that already has filesystem.
|
||||
type: bool
|
||||
default: false
|
||||
resizefs:
|
||||
description:
|
||||
- If C(true), if the block device and filesystem size differ, grow the filesystem into the space.
|
||||
- Supported for C(btrfs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) filesystems.
|
||||
Attempts to resize other filesystem types will fail.
|
||||
- XFS Will only grow if mounted. Currently, the module is based on commands
|
||||
from C(util-linux) package to perform operations, so resizing of XFS is
|
||||
not supported on FreeBSD systems.
|
||||
- vFAT will likely fail if C(fatresize < 1.04).
|
||||
type: bool
|
||||
default: false
|
||||
opts:
|
||||
description:
|
||||
- List of options to be passed to C(mkfs) command.
|
||||
type: str
|
||||
requirements:
|
||||
- Uses specific tools related to the I(fstype) for creating or resizing a
|
||||
filesystem (from packages e2fsprogs, xfsprogs, dosfstools, and so on).
|
||||
- Uses generic tools mostly related to the Operating System (Linux or
|
||||
FreeBSD) or available on both, as C(blkid).
|
||||
- On FreeBSD, either C(util-linux) or C(e2fsprogs) package is required.
|
||||
notes:
|
||||
- Potential filesystems on I(dev) are checked using C(blkid). In case C(blkid)
|
||||
is unable to detect a filesystem (and in case C(fstyp) on FreeBSD is also
|
||||
unable to detect a filesystem), this filesystem is overwritten even if
|
||||
I(force) is C(false).
|
||||
- On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide
|
||||
a C(blkid) command that is compatible with this module. However, these
|
||||
packages conflict with each other, and only the C(util-linux) package
|
||||
provides the command required to not fail when I(state=absent).
|
||||
- This module supports I(check_mode).
|
||||
seealso:
|
||||
- module: community.general.filesize
|
||||
- module: ansible.posix.mount
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a ext2 filesystem on /dev/sdb1
|
||||
community.general.filesystem:
|
||||
fstype: ext2
|
||||
dev: /dev/sdb1
|
||||
|
||||
- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks
|
||||
community.general.filesystem:
|
||||
fstype: ext4
|
||||
dev: /dev/sdb1
|
||||
opts: -cc
|
||||
|
||||
- name: Blank filesystem signature on /dev/sdb1
|
||||
community.general.filesystem:
|
||||
dev: /dev/sdb1
|
||||
state: absent
|
||||
|
||||
- name: Create a filesystem on top of a regular file
|
||||
community.general.filesystem:
|
||||
dev: /path/to/disk.img
|
||||
fstype: vfat
|
||||
'''
|
||||
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import stat
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
|
||||
class Device(object):
|
||||
def __init__(self, module, path):
|
||||
self.module = module
|
||||
self.path = path
|
||||
|
||||
def size(self):
|
||||
""" Return size in bytes of device. Returns int """
|
||||
statinfo = os.stat(self.path)
|
||||
if stat.S_ISBLK(statinfo.st_mode):
|
||||
blockdev_cmd = self.module.get_bin_path("blockdev", required=True)
|
||||
dummy, out, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True)
|
||||
devsize_in_bytes = int(out)
|
||||
elif stat.S_ISCHR(statinfo.st_mode) and platform.system() == 'FreeBSD':
|
||||
diskinfo_cmd = self.module.get_bin_path("diskinfo", required=True)
|
||||
dummy, out, dummy = self.module.run_command([diskinfo_cmd, self.path], check_rc=True)
|
||||
devsize_in_bytes = int(out.split()[2])
|
||||
elif os.path.isfile(self.path):
|
||||
devsize_in_bytes = os.path.getsize(self.path)
|
||||
else:
|
||||
self.module.fail_json(changed=False, msg="Target device not supported: %s" % self)
|
||||
|
||||
return devsize_in_bytes
|
||||
|
||||
def get_mountpoint(self):
|
||||
"""Return (first) mountpoint of device. Returns None when not mounted."""
|
||||
cmd_findmnt = self.module.get_bin_path("findmnt", required=True)
|
||||
|
||||
# find mountpoint
|
||||
rc, mountpoint, dummy = self.module.run_command([cmd_findmnt, "--mtab", "--noheadings", "--output",
|
||||
"TARGET", "--source", self.path], check_rc=False)
|
||||
if rc != 0:
|
||||
mountpoint = None
|
||||
else:
|
||||
mountpoint = mountpoint.split('\n')[0]
|
||||
|
||||
return mountpoint
|
||||
|
||||
def __str__(self):
|
||||
return self.path
|
||||
|
||||
|
||||
class Filesystem(object):
|
||||
|
||||
MKFS = None
|
||||
MKFS_FORCE_FLAGS = []
|
||||
INFO = None
|
||||
GROW = None
|
||||
GROW_MAX_SPACE_FLAGS = []
|
||||
GROW_MOUNTPOINT_ONLY = False
|
||||
|
||||
LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}
|
||||
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
@property
|
||||
def fstype(self):
|
||||
return type(self).__name__
|
||||
|
||||
def get_fs_size(self, dev):
|
||||
"""Return size in bytes of filesystem on device (integer).
|
||||
Should query the info with a per-fstype command that can access the
|
||||
device whenever it is mounted or not, and parse the command output.
|
||||
Parser must ensure to return an integer, or raise a ValueError.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create(self, opts, dev):
|
||||
if self.module.check_mode:
|
||||
return
|
||||
|
||||
mkfs = self.module.get_bin_path(self.MKFS, required=True)
|
||||
cmd = [mkfs] + self.MKFS_FORCE_FLAGS + opts + [str(dev)]
|
||||
self.module.run_command(cmd, check_rc=True)
|
||||
|
||||
def wipefs(self, dev):
|
||||
if self.module.check_mode:
|
||||
return
|
||||
|
||||
# wipefs comes with util-linux package (as 'blockdev' & 'findmnt' above)
|
||||
# that is ported to FreeBSD. The use of dd as a portable fallback is
|
||||
# not doable here if it needs get_mountpoint() (to prevent corruption of
|
||||
# a mounted filesystem), since 'findmnt' is not available on FreeBSD,
|
||||
# even in util-linux port for this OS.
|
||||
wipefs = self.module.get_bin_path('wipefs', required=True)
|
||||
cmd = [wipefs, "--all", str(dev)]
|
||||
self.module.run_command(cmd, check_rc=True)
|
||||
|
||||
def grow_cmd(self, target):
|
||||
"""Build and return the resizefs commandline as list."""
|
||||
cmdline = [self.module.get_bin_path(self.GROW, required=True)]
|
||||
cmdline += self.GROW_MAX_SPACE_FLAGS + [target]
|
||||
return cmdline
|
||||
|
||||
def grow(self, dev):
|
||||
"""Get dev and fs size and compare. Returns stdout of used command."""
|
||||
devsize_in_bytes = dev.size()
|
||||
|
||||
try:
|
||||
fssize_in_bytes = self.get_fs_size(dev)
|
||||
except NotImplementedError:
|
||||
self.module.fail_json(msg="module does not support resizing %s filesystem yet" % self.fstype)
|
||||
except ValueError as err:
|
||||
self.module.warn("unable to process %s output '%s'" % (self.INFO, to_native(err)))
|
||||
self.module.fail_json(msg="unable to process %s output for %s" % (self.INFO, dev))
|
||||
|
||||
if not fssize_in_bytes < devsize_in_bytes:
|
||||
self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev))
|
||||
elif self.module.check_mode:
|
||||
self.module.exit_json(changed=True, msg="resizing filesystem %s on device %s" % (self.fstype, dev))
|
||||
|
||||
if self.GROW_MOUNTPOINT_ONLY:
|
||||
mountpoint = dev.get_mountpoint()
|
||||
if not mountpoint:
|
||||
self.module.fail_json(msg="%s needs to be mounted for %s operations" % (dev, self.fstype))
|
||||
grow_target = mountpoint
|
||||
else:
|
||||
grow_target = str(dev)
|
||||
|
||||
dummy, out, dummy = self.module.run_command(self.grow_cmd(grow_target), check_rc=True)
|
||||
return out
|
||||
|
||||
|
||||
class Ext(Filesystem):
|
||||
MKFS_FORCE_FLAGS = ['-F']
|
||||
INFO = 'tune2fs'
|
||||
GROW = 'resize2fs'
|
||||
|
||||
def get_fs_size(self, dev):
|
||||
"""Get Block count and Block size and return their product."""
|
||||
cmd = self.module.get_bin_path(self.INFO, required=True)
|
||||
dummy, out, dummy = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
|
||||
|
||||
block_count = block_size = None
|
||||
for line in out.splitlines():
|
||||
if 'Block count:' in line:
|
||||
block_count = int(line.split(':')[1].strip())
|
||||
elif 'Block size:' in line:
|
||||
block_size = int(line.split(':')[1].strip())
|
||||
if None not in (block_size, block_count):
|
||||
break
|
||||
else:
|
||||
raise ValueError(repr(out))
|
||||
|
||||
return block_size * block_count
|
||||
|
||||
|
||||
class Ext2(Ext):
|
||||
MKFS = 'mkfs.ext2'
|
||||
|
||||
|
||||
class Ext3(Ext):
|
||||
MKFS = 'mkfs.ext3'
|
||||
|
||||
|
||||
class Ext4(Ext):
|
||||
MKFS = 'mkfs.ext4'
|
||||
|
||||
|
||||
class XFS(Filesystem):
|
||||
MKFS = 'mkfs.xfs'
|
||||
MKFS_FORCE_FLAGS = ['-f']
|
||||
INFO = 'xfs_info'
|
||||
GROW = 'xfs_growfs'
|
||||
GROW_MOUNTPOINT_ONLY = True
|
||||
|
||||
def get_fs_size(self, dev):
|
||||
"""Get bsize and blocks and return their product."""
|
||||
cmdline = [self.module.get_bin_path(self.INFO, required=True)]
|
||||
|
||||
# Depending on the versions, xfs_info is able to get info from the
|
||||
# device, whenever it is mounted or not, or only if unmounted, or
|
||||
# only if mounted, or not at all. For any version until now, it is
|
||||
# able to query info from the mountpoint. So try it first, and use
|
||||
# device as the last resort: it may or may not work.
|
||||
mountpoint = dev.get_mountpoint()
|
||||
if mountpoint:
|
||||
cmdline += [mountpoint]
|
||||
else:
|
||||
cmdline += [str(dev)]
|
||||
dummy, out, dummy = self.module.run_command(cmdline, check_rc=True, environ_update=self.LANG_ENV)
|
||||
|
||||
block_size = block_count = None
|
||||
for line in out.splitlines():
|
||||
col = line.split('=')
|
||||
if col[0].strip() == 'data':
|
||||
if col[1].strip() == 'bsize':
|
||||
block_size = int(col[2].split()[0])
|
||||
if col[2].split()[1] == 'blocks':
|
||||
block_count = int(col[3].split(',')[0])
|
||||
if None not in (block_size, block_count):
|
||||
break
|
||||
else:
|
||||
raise ValueError(repr(out))
|
||||
|
||||
return block_size * block_count
|
||||
|
||||
|
||||
class Reiserfs(Filesystem):
|
||||
MKFS = 'mkfs.reiserfs'
|
||||
MKFS_FORCE_FLAGS = ['-q']
|
||||
|
||||
|
||||
class Btrfs(Filesystem):
|
||||
MKFS = 'mkfs.btrfs'
|
||||
INFO = 'btrfs'
|
||||
GROW = 'btrfs'
|
||||
GROW_MAX_SPACE_FLAGS = ['filesystem', 'resize', 'max']
|
||||
GROW_MOUNTPOINT_ONLY = True
|
||||
|
||||
def __init__(self, module):
|
||||
super(Btrfs, self).__init__(module)
|
||||
mkfs = self.module.get_bin_path(self.MKFS, required=True)
|
||||
dummy, stdout, stderr = self.module.run_command([mkfs, '--version'], check_rc=True)
|
||||
match = re.search(r" v([0-9.]+)", stdout)
|
||||
if not match:
|
||||
# v0.20-rc1 use stderr
|
||||
match = re.search(r" v([0-9.]+)", stderr)
|
||||
if match:
|
||||
# v0.20-rc1 doesn't have --force parameter added in following version v3.12
|
||||
if LooseVersion(match.group(1)) >= LooseVersion('3.12'):
|
||||
self.MKFS_FORCE_FLAGS = ['-f']
|
||||
else:
|
||||
# assume version is greater or equal to 3.12
|
||||
self.MKFS_FORCE_FLAGS = ['-f']
|
||||
self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr))
|
||||
|
||||
def get_fs_size(self, dev):
|
||||
"""Return size in bytes of filesystem on device (integer)."""
|
||||
mountpoint = dev.get_mountpoint()
|
||||
if not mountpoint:
|
||||
self.module.fail_json(msg="%s needs to be mounted for %s operations" % (dev, self.fstype))
|
||||
|
||||
dummy, stdout, dummy = self.module.run_command([self.module.get_bin_path(self.INFO),
|
||||
'filesystem', 'usage', '-b', mountpoint], check_rc=True)
|
||||
for line in stdout.splitlines():
|
||||
if "Device size" in line:
|
||||
return int(line.split()[-1])
|
||||
raise ValueError(repr(stdout))
|
||||
|
||||
|
||||
class Ocfs2(Filesystem):
|
||||
MKFS = 'mkfs.ocfs2'
|
||||
MKFS_FORCE_FLAGS = ['-Fx']
|
||||
|
||||
|
||||
class F2fs(Filesystem):
|
||||
MKFS = 'mkfs.f2fs'
|
||||
INFO = 'dump.f2fs'
|
||||
GROW = 'resize.f2fs'
|
||||
|
||||
def __init__(self, module):
|
||||
super(F2fs, self).__init__(module)
|
||||
mkfs = self.module.get_bin_path(self.MKFS, required=True)
|
||||
dummy, out, dummy = self.module.run_command([mkfs, os.devnull], check_rc=False, environ_update=self.LANG_ENV)
|
||||
# Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)"
|
||||
# mkfs.f2fs displays version since v1.2.0
|
||||
match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out)
|
||||
if match is not None:
|
||||
# Since 1.9.0, mkfs.f2fs check overwrite before make filesystem
|
||||
# before that version -f switch wasn't used
|
||||
if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'):
|
||||
self.MKFS_FORCE_FLAGS = ['-f']
|
||||
|
||||
def get_fs_size(self, dev):
|
||||
"""Get sector size and total FS sectors and return their product."""
|
||||
cmd = self.module.get_bin_path(self.INFO, required=True)
|
||||
dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV)
|
||||
sector_size = sector_count = None
|
||||
for line in out.splitlines():
|
||||
if 'Info: sector size = ' in line:
|
||||
# expected: 'Info: sector size = 512'
|
||||
sector_size = int(line.split()[4])
|
||||
elif 'Info: total FS sectors = ' in line:
|
||||
# expected: 'Info: total FS sectors = 102400 (50 MB)'
|
||||
sector_count = int(line.split()[5])
|
||||
if None not in (sector_size, sector_count):
|
||||
break
|
||||
else:
|
||||
raise ValueError(repr(out))
|
||||
|
||||
return sector_size * sector_count
|
||||
|
||||
|
||||
class VFAT(Filesystem):
|
||||
INFO = 'fatresize'
|
||||
GROW = 'fatresize'
|
||||
GROW_MAX_SPACE_FLAGS = ['-s', 'max']
|
||||
|
||||
def __init__(self, module):
|
||||
super(VFAT, self).__init__(module)
|
||||
if platform.system() == 'FreeBSD':
|
||||
self.MKFS = 'newfs_msdos'
|
||||
else:
|
||||
self.MKFS = 'mkfs.vfat'
|
||||
|
||||
def get_fs_size(self, dev):
|
||||
"""Get and return size of filesystem, in bytes."""
|
||||
cmd = self.module.get_bin_path(self.INFO, required=True)
|
||||
dummy, out, dummy = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
|
||||
fssize = None
|
||||
for line in out.splitlines()[1:]:
|
||||
parts = line.split(':', 1)
|
||||
if len(parts) < 2:
|
||||
continue
|
||||
param, value = parts
|
||||
if param.strip() in ('Size', 'Cur size'):
|
||||
fssize = int(value.strip())
|
||||
break
|
||||
else:
|
||||
raise ValueError(repr(out))
|
||||
|
||||
return fssize
|
||||
|
||||
|
||||
class LVM(Filesystem):
|
||||
MKFS = 'pvcreate'
|
||||
MKFS_FORCE_FLAGS = ['-f']
|
||||
INFO = 'pvs'
|
||||
GROW = 'pvresize'
|
||||
|
||||
def get_fs_size(self, dev):
|
||||
"""Get and return PV size, in bytes."""
|
||||
cmd = self.module.get_bin_path(self.INFO, required=True)
|
||||
dummy, size, dummy = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True)
|
||||
pv_size = int(size)
|
||||
return pv_size
|
||||
|
||||
|
||||
class Swap(Filesystem):
|
||||
MKFS = 'mkswap'
|
||||
MKFS_FORCE_FLAGS = ['-f']
|
||||
|
||||
|
||||
class UFS(Filesystem):
|
||||
MKFS = 'newfs'
|
||||
INFO = 'dumpfs'
|
||||
GROW = 'growfs'
|
||||
GROW_MAX_SPACE_FLAGS = ['-y']
|
||||
|
||||
def get_fs_size(self, dev):
|
||||
"""Get providersize and fragment size and return their product."""
|
||||
cmd = self.module.get_bin_path(self.INFO, required=True)
|
||||
dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV)
|
||||
|
||||
fragmentsize = providersize = None
|
||||
for line in out.splitlines():
|
||||
if line.startswith('fsize'):
|
||||
fragmentsize = int(line.split()[1])
|
||||
elif 'providersize' in line:
|
||||
providersize = int(line.split()[-1])
|
||||
if None not in (fragmentsize, providersize):
|
||||
break
|
||||
else:
|
||||
raise ValueError(repr(out))
|
||||
|
||||
return fragmentsize * providersize
|
||||
|
||||
|
||||
FILESYSTEMS = {
|
||||
'ext2': Ext2,
|
||||
'ext3': Ext3,
|
||||
'ext4': Ext4,
|
||||
'ext4dev': Ext4,
|
||||
'f2fs': F2fs,
|
||||
'reiserfs': Reiserfs,
|
||||
'xfs': XFS,
|
||||
'btrfs': Btrfs,
|
||||
'vfat': VFAT,
|
||||
'ocfs2': Ocfs2,
|
||||
'LVM2_member': LVM,
|
||||
'swap': Swap,
|
||||
'ufs': UFS,
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
friendly_names = {
|
||||
'lvm': 'LVM2_member',
|
||||
}
|
||||
|
||||
fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys())
|
||||
|
||||
# There is no "single command" to manipulate filesystems, so we map them all out and their options
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
fstype=dict(type='str', aliases=['type'], choices=list(fstypes)),
|
||||
dev=dict(type='path', required=True, aliases=['device']),
|
||||
opts=dict(type='str'),
|
||||
force=dict(type='bool', default=False),
|
||||
resizefs=dict(type='bool', default=False),
|
||||
),
|
||||
required_if=[
|
||||
('state', 'present', ['fstype'])
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
state = module.params['state']
|
||||
dev = module.params['dev']
|
||||
fstype = module.params['fstype']
|
||||
opts = module.params['opts']
|
||||
force = module.params['force']
|
||||
resizefs = module.params['resizefs']
|
||||
|
||||
mkfs_opts = []
|
||||
if opts is not None:
|
||||
mkfs_opts = opts.split()
|
||||
|
||||
changed = False
|
||||
|
||||
if not os.path.exists(dev):
|
||||
msg = "Device %s not found." % dev
|
||||
if state == "present":
|
||||
module.fail_json(msg=msg)
|
||||
else:
|
||||
module.exit_json(msg=msg)
|
||||
|
||||
dev = Device(module, dev)
|
||||
|
||||
# In case blkid/fstyp isn't able to identify an existing filesystem, device
|
||||
# is considered as empty, then this existing filesystem would be overwritten
|
||||
# even if force isn't enabled.
|
||||
cmd = module.get_bin_path('blkid', required=True)
|
||||
rc, raw_fs, err = module.run_command([cmd, '-c', os.devnull, '-o', 'value', '-s', 'TYPE', str(dev)])
|
||||
fs = raw_fs.strip()
|
||||
if not fs and platform.system() == 'FreeBSD':
|
||||
cmd = module.get_bin_path('fstyp', required=True)
|
||||
rc, raw_fs, err = module.run_command([cmd, str(dev)])
|
||||
fs = raw_fs.strip()
|
||||
|
||||
if state == "present":
|
||||
if fstype in friendly_names:
|
||||
fstype = friendly_names[fstype]
|
||||
|
||||
try:
|
||||
klass = FILESYSTEMS[fstype]
|
||||
except KeyError:
|
||||
module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype)
|
||||
|
||||
filesystem = klass(module)
|
||||
|
||||
same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype]
|
||||
if same_fs and not resizefs and not force:
|
||||
module.exit_json(changed=False)
|
||||
elif same_fs and resizefs:
|
||||
if not filesystem.GROW:
|
||||
module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype)
|
||||
|
||||
out = filesystem.grow(dev)
|
||||
|
||||
module.exit_json(changed=True, msg=out)
|
||||
elif fs and not force:
|
||||
module.fail_json(msg="'%s' is already used as %s, use force=true to overwrite" % (dev, fs), rc=rc, err=err)
|
||||
|
||||
# create fs
|
||||
filesystem.create(mkfs_opts, dev)
|
||||
changed = True
|
||||
|
||||
elif fs:
|
||||
# wipe fs signatures
|
||||
filesystem = Filesystem(module)
|
||||
filesystem.wipefs(dev)
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,343 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
|
||||
# Copyright (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: flatpak
|
||||
short_description: Manage flatpaks
|
||||
description:
|
||||
- Allows users to add or remove flatpaks.
|
||||
- See the M(community.general.flatpak_remote) module for managing flatpak remotes.
|
||||
author:
|
||||
- John Kwiatkoski (@JayKayy)
|
||||
- Alexander Bethke (@oolongbrothers)
|
||||
requirements:
|
||||
- flatpak
|
||||
options:
|
||||
executable:
|
||||
description:
|
||||
- The path to the C(flatpak) executable to use.
|
||||
- By default, this module looks for the C(flatpak) executable on the path.
|
||||
type: path
|
||||
default: flatpak
|
||||
method:
|
||||
description:
|
||||
- The installation method to use.
|
||||
- Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
|
||||
or only for the current C(user).
|
||||
type: str
|
||||
choices: [ system, user ]
|
||||
default: system
|
||||
name:
|
||||
description:
|
||||
- The name of the flatpak to manage. To operate on several packages this
|
||||
can accept a list of packages.
|
||||
- When used with I(state=present), I(name) can be specified as a URL to a
|
||||
C(flatpakref) file or the unique reverse DNS name that identifies a flatpak.
|
||||
- Both C(https://) and C(http://) URLs are supported.
|
||||
- When supplying a reverse DNS name, you can use the I(remote) option to specify on what remote
|
||||
to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit).
|
||||
- When used with I(state=absent), it is recommended to specify the name in the reverse DNS
|
||||
format.
|
||||
- When supplying a URL with I(state=absent), the module will try to match the
|
||||
installed flatpak based on the name of the flatpakref to remove it. However, there is no
|
||||
guarantee that the names of the flatpakref file and the reverse DNS name of the installed
|
||||
flatpak do match.
|
||||
type: list
|
||||
elements: str
|
||||
required: true
|
||||
no_dependencies:
|
||||
description:
|
||||
- If installing runtime dependencies should be omitted or not
|
||||
- This parameter is primarily implemented for integration testing this module.
|
||||
There might however be some use cases where you would want to have this, like when you are
|
||||
packaging your own flatpaks.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 3.2.0
|
||||
remote:
|
||||
description:
|
||||
- The flatpak remote (repository) to install the flatpak from.
|
||||
- By default, C(flathub) is assumed, but you do need to add the flathub flatpak_remote before
|
||||
you can use this.
|
||||
- See the M(community.general.flatpak_remote) module for managing flatpak remotes.
|
||||
type: str
|
||||
default: flathub
|
||||
state:
|
||||
description:
|
||||
- Indicates the desired package state.
|
||||
choices: [ absent, present ]
|
||||
type: str
|
||||
default: present
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Install the spotify flatpak
|
||||
community.general.flatpak:
|
||||
name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref
|
||||
state: present
|
||||
|
||||
- name: Install the gedit flatpak package without dependencies (not recommended)
|
||||
community.general.flatpak:
|
||||
name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref
|
||||
state: present
|
||||
no_dependencies: true
|
||||
|
||||
- name: Install the gedit package from flathub for current user
|
||||
community.general.flatpak:
|
||||
name: org.gnome.gedit
|
||||
state: present
|
||||
method: user
|
||||
|
||||
- name: Install the Gnome Calendar flatpak from the gnome remote system-wide
|
||||
community.general.flatpak:
|
||||
name: org.gnome.Calendar
|
||||
state: present
|
||||
remote: gnome
|
||||
|
||||
- name: Install multiple packages
|
||||
community.general.flatpak:
|
||||
name:
|
||||
- org.gimp.GIMP
|
||||
- org.inkscape.Inkscape
|
||||
- org.mozilla.firefox
|
||||
|
||||
- name: Remove the gedit flatpak
|
||||
community.general.flatpak:
|
||||
name: org.gnome.gedit
|
||||
state: absent
|
||||
|
||||
- name: Remove multiple packages
|
||||
community.general.flatpak:
|
||||
name:
|
||||
- org.gimp.GIMP
|
||||
- org.inkscape.Inkscape
|
||||
- org.mozilla.firefox
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
command:
|
||||
description: The exact flatpak command that was executed
|
||||
returned: When a flatpak command has been executed
|
||||
type: str
|
||||
sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator"
|
||||
msg:
|
||||
description: Module error message
|
||||
returned: failure
|
||||
type: str
|
||||
sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
|
||||
rc:
|
||||
description: Return code from flatpak binary
|
||||
returned: When a flatpak command has been executed
|
||||
type: int
|
||||
sample: 0
|
||||
stderr:
|
||||
description: Error output from flatpak binary
|
||||
returned: When a flatpak command has been executed
|
||||
type: str
|
||||
sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE"
|
||||
stdout:
|
||||
description: Output from flatpak binary
|
||||
returned: When a flatpak command has been executed
|
||||
type: str
|
||||
sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlparse
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
|
||||
OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application"
|
||||
|
||||
|
||||
def install_flat(module, binary, remote, names, method, no_dependencies):
|
||||
"""Add new flatpaks."""
|
||||
global result # pylint: disable=global-variable-not-assigned
|
||||
uri_names = []
|
||||
id_names = []
|
||||
for name in names:
|
||||
if name.startswith('http://') or name.startswith('https://'):
|
||||
uri_names.append(name)
|
||||
else:
|
||||
id_names.append(name)
|
||||
base_command = [binary, "install", "--{0}".format(method)]
|
||||
flatpak_version = _flatpak_version(module, binary)
|
||||
if LooseVersion(flatpak_version) < LooseVersion('1.1.3'):
|
||||
base_command += ["-y"]
|
||||
else:
|
||||
base_command += ["--noninteractive"]
|
||||
if no_dependencies:
|
||||
base_command += ["--no-deps"]
|
||||
if uri_names:
|
||||
command = base_command + uri_names
|
||||
_flatpak_command(module, module.check_mode, command)
|
||||
if id_names:
|
||||
command = base_command + [remote] + id_names
|
||||
_flatpak_command(module, module.check_mode, command)
|
||||
result['changed'] = True
|
||||
|
||||
|
||||
def uninstall_flat(module, binary, names, method):
|
||||
"""Remove existing flatpaks."""
|
||||
global result # pylint: disable=global-variable-not-assigned
|
||||
installed_flat_names = [
|
||||
_match_installed_flat_name(module, binary, name, method)
|
||||
for name in names
|
||||
]
|
||||
command = [binary, "uninstall"]
|
||||
flatpak_version = _flatpak_version(module, binary)
|
||||
if LooseVersion(flatpak_version) < LooseVersion('1.1.3'):
|
||||
command += ["-y"]
|
||||
else:
|
||||
command += ["--noninteractive"]
|
||||
command += ["--{0}".format(method)] + installed_flat_names
|
||||
_flatpak_command(module, module.check_mode, command)
|
||||
result['changed'] = True
|
||||
|
||||
|
||||
def flatpak_exists(module, binary, names, method):
|
||||
"""Check if the flatpaks are installed."""
|
||||
command = [binary, "list", "--{0}".format(method), "--app"]
|
||||
output = _flatpak_command(module, False, command)
|
||||
installed = []
|
||||
not_installed = []
|
||||
for name in names:
|
||||
parsed_name = _parse_flatpak_name(name).lower()
|
||||
if parsed_name in output.lower():
|
||||
installed.append(name)
|
||||
else:
|
||||
not_installed.append(name)
|
||||
return installed, not_installed
|
||||
|
||||
|
||||
def _match_installed_flat_name(module, binary, name, method):
|
||||
# This is a difficult function, since if the user supplies a flatpakref url,
|
||||
# we have to rely on a naming convention:
|
||||
# The flatpakref file name needs to match the flatpak name
|
||||
global result # pylint: disable=global-variable-not-assigned
|
||||
parsed_name = _parse_flatpak_name(name)
|
||||
# Try running flatpak list with columns feature
|
||||
command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"]
|
||||
_flatpak_command(module, False, command, ignore_failure=True)
|
||||
if result['rc'] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result['stderr']:
|
||||
# Probably flatpak before 1.2
|
||||
matched_flatpak_name = \
|
||||
_match_flat_using_flatpak_column_feature(module, binary, parsed_name, method)
|
||||
else:
|
||||
# Probably flatpak >= 1.2
|
||||
matched_flatpak_name = \
|
||||
_match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method)
|
||||
|
||||
if matched_flatpak_name:
|
||||
return matched_flatpak_name
|
||||
else:
|
||||
result['msg'] = "Flatpak removal failed: Could not match any installed flatpaks to " +\
|
||||
"the name `{0}`. ".format(_parse_flatpak_name(name)) +\
|
||||
"If you used a URL, try using the reverse DNS name of the flatpak"
|
||||
module.fail_json(**result)
|
||||
|
||||
|
||||
def _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method):
|
||||
global result # pylint: disable=global-variable-not-assigned
|
||||
command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"]
|
||||
output = _flatpak_command(module, False, command)
|
||||
for row in output.split('\n'):
|
||||
if parsed_name.lower() == row.lower():
|
||||
return row
|
||||
|
||||
|
||||
def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method):
|
||||
global result # pylint: disable=global-variable-not-assigned
|
||||
command = [binary, "list", "--{0}".format(method), "--app"]
|
||||
output = _flatpak_command(module, False, command)
|
||||
for row in output.split('\n'):
|
||||
if parsed_name.lower() in row.lower():
|
||||
return row.split()[0]
|
||||
|
||||
|
||||
def _parse_flatpak_name(name):
|
||||
if name.startswith('http://') or name.startswith('https://'):
|
||||
file_name = urlparse(name).path.split('/')[-1]
|
||||
file_name_without_extension = file_name.split('.')[0:-1]
|
||||
common_name = ".".join(file_name_without_extension)
|
||||
else:
|
||||
common_name = name
|
||||
return common_name
|
||||
|
||||
|
||||
def _flatpak_version(module, binary):
|
||||
global result # pylint: disable=global-variable-not-assigned
|
||||
command = [binary, "--version"]
|
||||
output = _flatpak_command(module, False, command)
|
||||
version_number = output.split()[1]
|
||||
return version_number
|
||||
|
||||
|
||||
def _flatpak_command(module, noop, command, ignore_failure=False):
|
||||
global result # pylint: disable=global-variable-not-assigned
|
||||
result['command'] = ' '.join(command)
|
||||
if noop:
|
||||
result['rc'] = 0
|
||||
return ""
|
||||
|
||||
result['rc'], result['stdout'], result['stderr'] = module.run_command(
|
||||
command, check_rc=not ignore_failure
|
||||
)
|
||||
return result['stdout']
|
||||
|
||||
|
||||
def main():
|
||||
# This module supports check mode
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type='list', elements='str', required=True),
|
||||
remote=dict(type='str', default='flathub'),
|
||||
method=dict(type='str', default='system',
|
||||
choices=['user', 'system']),
|
||||
state=dict(type='str', default='present',
|
||||
choices=['absent', 'present']),
|
||||
no_dependencies=dict(type='bool', default=False),
|
||||
executable=dict(type='path', default='flatpak')
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
state = module.params['state']
|
||||
remote = module.params['remote']
|
||||
no_dependencies = module.params['no_dependencies']
|
||||
method = module.params['method']
|
||||
executable = module.params['executable']
|
||||
binary = module.get_bin_path(executable, None)
|
||||
|
||||
global result
|
||||
result = dict(
|
||||
changed=False
|
||||
)
|
||||
|
||||
# If the binary was not found, fail the operation
|
||||
if not binary:
|
||||
module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
|
||||
|
||||
installed, not_installed = flatpak_exists(module, binary, name, method)
|
||||
if state == 'present' and not_installed:
|
||||
install_flat(module, binary, remote, not_installed, method, no_dependencies)
|
||||
elif state == 'absent' and installed:
|
||||
uninstall_flat(module, binary, installed, method)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,214 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
|
||||
# Copyright (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: flatpak_remote
|
||||
short_description: Manage flatpak repository remotes
|
||||
description:
|
||||
- Allows users to add or remove flatpak remotes.
|
||||
- The flatpak remotes concept is comparable to what is called repositories in other packaging
|
||||
formats.
|
||||
- Currently, remote addition is only supported via I(flatpakrepo) file URLs.
|
||||
- Existing remotes will not be updated.
|
||||
- See the M(community.general.flatpak) module for managing flatpaks.
|
||||
author:
|
||||
- John Kwiatkoski (@JayKayy)
|
||||
- Alexander Bethke (@oolongbrothers)
|
||||
requirements:
|
||||
- flatpak
|
||||
options:
|
||||
executable:
|
||||
description:
|
||||
- The path to the C(flatpak) executable to use.
|
||||
- By default, this module looks for the C(flatpak) executable on the path.
|
||||
type: str
|
||||
default: flatpak
|
||||
flatpakrepo_url:
|
||||
description:
|
||||
- The URL to the I(flatpakrepo) file representing the repository remote to add.
|
||||
- When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url)
|
||||
is added using the specified installation C(method).
|
||||
- When used with I(state=absent), this is not required.
|
||||
- Required when I(state=present).
|
||||
type: str
|
||||
method:
|
||||
description:
|
||||
- The installation method to use.
|
||||
- Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
|
||||
or only for the current C(user).
|
||||
type: str
|
||||
choices: [ system, user ]
|
||||
default: system
|
||||
name:
|
||||
description:
|
||||
- The desired name for the flatpak remote to be registered under on the managed host.
|
||||
- When used with I(state=present), the remote will be added to the managed host under
|
||||
the specified I(name).
|
||||
- When used with I(state=absent) the remote with that name will be removed.
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Indicates the desired package state.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Add the Gnome flatpak remote to the system installation
|
||||
community.general.flatpak_remote:
|
||||
name: gnome
|
||||
state: present
|
||||
flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo
|
||||
|
||||
- name: Add the flathub flatpak repository remote to the user installation
|
||||
community.general.flatpak_remote:
|
||||
name: flathub
|
||||
state: present
|
||||
flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo
|
||||
method: user
|
||||
|
||||
- name: Remove the Gnome flatpak remote from the user installation
|
||||
community.general.flatpak_remote:
|
||||
name: gnome
|
||||
state: absent
|
||||
method: user
|
||||
|
||||
- name: Remove the flathub remote from the system installation
|
||||
community.general.flatpak_remote:
|
||||
name: flathub
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
command:
|
||||
description: The exact flatpak command that was executed
|
||||
returned: When a flatpak command has been executed
|
||||
type: str
|
||||
sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo"
|
||||
msg:
|
||||
description: Module error message
|
||||
returned: failure
|
||||
type: str
|
||||
sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
|
||||
rc:
|
||||
description: Return code from flatpak binary
|
||||
returned: When a flatpak command has been executed
|
||||
type: int
|
||||
sample: 0
|
||||
stderr:
|
||||
description: Error output from flatpak binary
|
||||
returned: When a flatpak command has been executed
|
||||
type: str
|
||||
sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n"
|
||||
stdout:
|
||||
description: Output from flatpak binary
|
||||
returned: When a flatpak command has been executed
|
||||
type: str
|
||||
sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_native
|
||||
|
||||
|
||||
def add_remote(module, binary, name, flatpakrepo_url, method):
|
||||
"""Add a new remote."""
|
||||
global result # pylint: disable=global-variable-not-assigned
|
||||
command = [binary, "remote-add", "--{0}".format(method), name, flatpakrepo_url]
|
||||
_flatpak_command(module, module.check_mode, command)
|
||||
result['changed'] = True
|
||||
|
||||
|
||||
def remove_remote(module, binary, name, method):
|
||||
"""Remove an existing remote."""
|
||||
global result # pylint: disable=global-variable-not-assigned
|
||||
command = [binary, "remote-delete", "--{0}".format(method), "--force", name]
|
||||
_flatpak_command(module, module.check_mode, command)
|
||||
result['changed'] = True
|
||||
|
||||
|
||||
def remote_exists(module, binary, name, method):
|
||||
"""Check if the remote exists."""
|
||||
command = [binary, "remote-list", "-d", "--{0}".format(method)]
|
||||
# The query operation for the remote needs to be run even in check mode
|
||||
output = _flatpak_command(module, False, command)
|
||||
for line in output.splitlines():
|
||||
listed_remote = line.split()
|
||||
if len(listed_remote) == 0:
|
||||
continue
|
||||
if listed_remote[0] == to_native(name):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _flatpak_command(module, noop, command):
|
||||
global result # pylint: disable=global-variable-not-assigned
|
||||
result['command'] = ' '.join(command)
|
||||
if noop:
|
||||
result['rc'] = 0
|
||||
return ""
|
||||
|
||||
result['rc'], result['stdout'], result['stderr'] = module.run_command(
|
||||
command, check_rc=True
|
||||
)
|
||||
return result['stdout']
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(type='str', required=True),
|
||||
flatpakrepo_url=dict(type='str'),
|
||||
method=dict(type='str', default='system',
|
||||
choices=['user', 'system']),
|
||||
state=dict(type='str', default="present",
|
||||
choices=['absent', 'present']),
|
||||
executable=dict(type='str', default="flatpak")
|
||||
),
|
||||
# This module supports check mode
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
flatpakrepo_url = module.params['flatpakrepo_url']
|
||||
method = module.params['method']
|
||||
state = module.params['state']
|
||||
executable = module.params['executable']
|
||||
binary = module.get_bin_path(executable, None)
|
||||
|
||||
if flatpakrepo_url is None:
|
||||
flatpakrepo_url = ''
|
||||
|
||||
global result
|
||||
result = dict(
|
||||
changed=False
|
||||
)
|
||||
|
||||
# If the binary was not found, fail the operation
|
||||
if not binary:
|
||||
module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
|
||||
|
||||
remote_already_exists = remote_exists(module, binary, to_bytes(name), method)
|
||||
|
||||
if state == 'present' and not remote_already_exists:
|
||||
add_remote(module, binary, name, flatpakrepo_url, method)
|
||||
elif state == 'absent' and remote_already_exists:
|
||||
remove_remote(module, binary, name, method)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,198 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2013 Matt Coddington <coddington@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: flowdock
|
||||
author: "Matt Coddington (@mcodd)"
|
||||
short_description: Send a message to a flowdock
|
||||
description:
|
||||
- Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
|
||||
options:
|
||||
token:
|
||||
type: str
|
||||
description:
|
||||
- API token.
|
||||
required: true
|
||||
type:
|
||||
type: str
|
||||
description:
|
||||
- Whether to post to 'inbox' or 'chat'
|
||||
required: true
|
||||
choices: [ "inbox", "chat" ]
|
||||
msg:
|
||||
type: str
|
||||
description:
|
||||
- Content of the message
|
||||
required: true
|
||||
tags:
|
||||
type: str
|
||||
description:
|
||||
- tags of the message, separated by commas
|
||||
required: false
|
||||
external_user_name:
|
||||
type: str
|
||||
description:
|
||||
- (chat only - required) Name of the "user" sending the message
|
||||
required: false
|
||||
from_address:
|
||||
type: str
|
||||
description:
|
||||
- (inbox only - required) Email address of the message sender
|
||||
required: false
|
||||
source:
|
||||
type: str
|
||||
description:
|
||||
- (inbox only - required) Human readable identifier of the application that uses the Flowdock API
|
||||
required: false
|
||||
subject:
|
||||
type: str
|
||||
description:
|
||||
- (inbox only - required) Subject line of the message
|
||||
required: false
|
||||
from_name:
|
||||
type: str
|
||||
description:
|
||||
- (inbox only) Name of the message sender
|
||||
required: false
|
||||
reply_to:
|
||||
type: str
|
||||
description:
|
||||
- (inbox only) Email address for replies
|
||||
required: false
|
||||
project:
|
||||
type: str
|
||||
description:
|
||||
- (inbox only) Human readable identifier for more detailed message categorization
|
||||
required: false
|
||||
link:
|
||||
type: str
|
||||
description:
|
||||
- (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
|
||||
required: false
|
||||
validate_certs:
|
||||
description:
|
||||
- If C(false), SSL certificates will not be validated. This should only be used
|
||||
on personally controlled sites using self-signed certificates.
|
||||
required: false
|
||||
default: true
|
||||
type: bool
|
||||
|
||||
requirements: [ ]
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Send a message to a flowdock
|
||||
community.general.flowdock:
|
||||
type: inbox
|
||||
token: AAAAAA
|
||||
from_address: user@example.com
|
||||
source: my cool app
|
||||
msg: test from ansible
|
||||
subject: test subject
|
||||
|
||||
- name: Send a message to a flowdock
|
||||
community.general.flowdock:
|
||||
type: chat
|
||||
token: AAAAAA
|
||||
external_user_name: testuser
|
||||
msg: test from ansible
|
||||
tags: tag1,tag2,tag3
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six.moves.urllib.parse import urlencode
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
token=dict(required=True, no_log=True),
|
||||
msg=dict(required=True),
|
||||
type=dict(required=True, choices=["inbox", "chat"]),
|
||||
external_user_name=dict(required=False),
|
||||
from_address=dict(required=False),
|
||||
source=dict(required=False),
|
||||
subject=dict(required=False),
|
||||
from_name=dict(required=False),
|
||||
reply_to=dict(required=False),
|
||||
project=dict(required=False),
|
||||
tags=dict(required=False),
|
||||
link=dict(required=False),
|
||||
validate_certs=dict(default=True, type='bool'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
type = module.params["type"]
|
||||
token = module.params["token"]
|
||||
if type == 'inbox':
|
||||
url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
|
||||
else:
|
||||
url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
|
||||
|
||||
params = {}
|
||||
|
||||
# required params
|
||||
params['content'] = module.params["msg"]
|
||||
|
||||
# required params for the 'chat' type
|
||||
if module.params['external_user_name']:
|
||||
if type == 'inbox':
|
||||
module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
|
||||
else:
|
||||
params['external_user_name'] = module.params["external_user_name"]
|
||||
elif type == 'chat':
|
||||
module.fail_json(msg="external_user_name is required for the 'chat' type")
|
||||
|
||||
# required params for the 'inbox' type
|
||||
for item in ['from_address', 'source', 'subject']:
|
||||
if module.params[item]:
|
||||
if type == 'chat':
|
||||
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
|
||||
else:
|
||||
params[item] = module.params[item]
|
||||
elif type == 'inbox':
|
||||
module.fail_json(msg="%s is required for the 'inbox' type" % item)
|
||||
|
||||
# optional params
|
||||
if module.params["tags"]:
|
||||
params['tags'] = module.params["tags"]
|
||||
|
||||
# optional params for the 'inbox' type
|
||||
for item in ['from_name', 'reply_to', 'project', 'link']:
|
||||
if module.params[item]:
|
||||
if type == 'chat':
|
||||
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
|
||||
else:
|
||||
params[item] = module.params[item]
|
||||
|
||||
# If we're in check mode, just exit pretending like we succeeded
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
# Send the data to Flowdock
|
||||
data = urlencode(params)
|
||||
response, info = fetch_url(module, url, data=data)
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="unable to send msg: %s" % info['msg'])
|
||||
|
||||
module.exit_json(changed=True, msg=module.params["msg"])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,188 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2019 Gregory Thiemonge <gregory.thiemonge@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: gandi_livedns
|
||||
author:
|
||||
- Gregory Thiemonge (@gthiemonge)
|
||||
version_added: "2.3.0"
|
||||
short_description: Manage Gandi LiveDNS records
|
||||
description:
|
||||
- "Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/)."
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Account API token.
|
||||
type: str
|
||||
required: true
|
||||
record:
|
||||
description:
|
||||
- Record to add.
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Whether the record(s) should exist or not.
|
||||
type: str
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
ttl:
|
||||
description:
|
||||
- The TTL to give the new record.
|
||||
- Required when I(state=present).
|
||||
type: int
|
||||
type:
|
||||
description:
|
||||
- The type of DNS record to create.
|
||||
type: str
|
||||
required: true
|
||||
values:
|
||||
description:
|
||||
- The record values.
|
||||
- Required when I(state=present).
|
||||
type: list
|
||||
elements: str
|
||||
domain:
|
||||
description:
|
||||
- The name of the Domain to work with (for example, "example.com").
|
||||
required: true
|
||||
type: str
|
||||
notes:
|
||||
- Supports C(check_mode).
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Create a test A record to point to 127.0.0.1 in the my.com domain
|
||||
community.general.gandi_livedns:
|
||||
domain: my.com
|
||||
record: test
|
||||
type: A
|
||||
values:
|
||||
- 127.0.0.1
|
||||
ttl: 7200
|
||||
api_key: dummyapitoken
|
||||
register: record
|
||||
|
||||
- name: Create a mail CNAME record to www.my.com domain
|
||||
community.general.gandi_livedns:
|
||||
domain: my.com
|
||||
type: CNAME
|
||||
record: mail
|
||||
values:
|
||||
- www
|
||||
ttl: 7200
|
||||
api_key: dummyapitoken
|
||||
state: present
|
||||
|
||||
- name: Change its TTL
|
||||
community.general.gandi_livedns:
|
||||
domain: my.com
|
||||
type: CNAME
|
||||
record: mail
|
||||
values:
|
||||
- www
|
||||
ttl: 10800
|
||||
api_key: dummyapitoken
|
||||
state: present
|
||||
|
||||
- name: Delete the record
|
||||
community.general.gandi_livedns:
|
||||
domain: my.com
|
||||
type: CNAME
|
||||
record: mail
|
||||
api_key: dummyapitoken
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
record:
|
||||
description: A dictionary containing the record data.
|
||||
returned: success, except on record deletion
|
||||
type: dict
|
||||
contains:
|
||||
values:
|
||||
description: The record content (details depend on record type).
|
||||
returned: success
|
||||
type: list
|
||||
elements: str
|
||||
sample:
|
||||
- 192.0.2.91
|
||||
- 192.0.2.92
|
||||
record:
|
||||
description: The record name.
|
||||
returned: success
|
||||
type: str
|
||||
sample: www
|
||||
ttl:
|
||||
description: The time-to-live for the record.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 300
|
||||
type:
|
||||
description: The record type.
|
||||
returned: success
|
||||
type: str
|
||||
sample: A
|
||||
domain:
|
||||
description: The domain associated with the record.
|
||||
returned: success
|
||||
type: str
|
||||
sample: my.com
|
||||
'''
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible_collections.community.general.plugins.module_utils.gandi_livedns_api import GandiLiveDNSAPI
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
api_key=dict(type='str', required=True, no_log=True),
|
||||
record=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['absent', 'present']),
|
||||
ttl=dict(type='int'),
|
||||
type=dict(type='str', required=True),
|
||||
values=dict(type='list', elements='str'),
|
||||
domain=dict(type='str', required=True),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
required_if=[
|
||||
('state', 'present', ['values', 'ttl']),
|
||||
],
|
||||
)
|
||||
|
||||
gandi_api = GandiLiveDNSAPI(module)
|
||||
|
||||
if module.params['state'] == 'present':
|
||||
ret, changed = gandi_api.ensure_dns_record(module.params['record'],
|
||||
module.params['type'],
|
||||
module.params['ttl'],
|
||||
module.params['values'],
|
||||
module.params['domain'])
|
||||
else:
|
||||
ret, changed = gandi_api.delete_dns_record(module.params['record'],
|
||||
module.params['type'],
|
||||
module.params['values'],
|
||||
module.params['domain'])
|
||||
|
||||
result = dict(
|
||||
changed=changed,
|
||||
)
|
||||
if ret:
|
||||
result['record'] = gandi_api.build_result(ret,
|
||||
module.params['domain'])
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,235 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2016, Kenneth D. Evensen <kevensen@redhat.com>
|
||||
# Copyright (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: gconftool2
|
||||
author:
|
||||
- Kenneth D. Evensen (@kevensen)
|
||||
short_description: Edit GNOME Configurations
|
||||
description:
|
||||
- This module allows for the manipulation of GNOME 2 Configuration via
|
||||
gconftool-2. Please see the gconftool-2(1) man pages for more details.
|
||||
options:
|
||||
key:
|
||||
type: str
|
||||
description:
|
||||
- A GConf preference key is an element in the GConf repository
|
||||
that corresponds to an application preference. See man gconftool-2(1).
|
||||
required: true
|
||||
value:
|
||||
type: str
|
||||
description:
|
||||
- Preference keys typically have simple values such as strings,
|
||||
integers, or lists of strings and integers. This is ignored if the state
|
||||
is "get". See man gconftool-2(1).
|
||||
value_type:
|
||||
type: str
|
||||
description:
|
||||
- The type of value being set. This is ignored if the state is "get".
|
||||
choices: [ bool, float, int, string ]
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- The action to take upon the key/value.
|
||||
- State C(get) is deprecated and will be removed in community.general 8.0.0. Please use the module M(community.general.gconftool2_info) instead.
|
||||
required: true
|
||||
choices: [ absent, get, present ]
|
||||
config_source:
|
||||
type: str
|
||||
description:
|
||||
- Specify a configuration source to use rather than the default path.
|
||||
See man gconftool-2(1).
|
||||
direct:
|
||||
description:
|
||||
- Access the config database directly, bypassing server. If direct is
|
||||
specified then the config_source must be specified as well.
|
||||
See man gconftool-2(1).
|
||||
type: bool
|
||||
default: false
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Change the widget font to "Serif 12"
|
||||
community.general.gconftool2:
|
||||
key: "/desktop/gnome/interface/font_name"
|
||||
value_type: "string"
|
||||
value: "Serif 12"
|
||||
"""
|
||||
|
||||
RETURN = '''
|
||||
key:
|
||||
description: The key specified in the module parameters
|
||||
returned: success
|
||||
type: str
|
||||
sample: /desktop/gnome/interface/font_name
|
||||
value_type:
|
||||
description: The type of the value that was changed
|
||||
returned: success
|
||||
type: str
|
||||
sample: string
|
||||
value:
|
||||
description: The value of the preference key after executing the module
|
||||
returned: success
|
||||
type: str
|
||||
sample: "Serif 12"
|
||||
...
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
class GConf2Preference(object):
|
||||
def __init__(self, ansible, key, value_type, value,
|
||||
direct=False, config_source=""):
|
||||
self.ansible = ansible
|
||||
self.key = key
|
||||
self.value_type = value_type
|
||||
self.value = value
|
||||
self.config_source = config_source
|
||||
self.direct = direct
|
||||
|
||||
def value_already_set(self):
|
||||
return False
|
||||
|
||||
def call(self, call_type, fail_onerr=True):
|
||||
""" Helper function to perform gconftool-2 operations """
|
||||
config_source = []
|
||||
direct = []
|
||||
changed = False
|
||||
out = ''
|
||||
|
||||
# If the configuration source is different from the default, create
|
||||
# the argument
|
||||
if self.config_source is not None and len(self.config_source) > 0:
|
||||
config_source = ["--config-source", self.config_source]
|
||||
|
||||
# If direct is true, create the argument
|
||||
if self.direct:
|
||||
direct = ["--direct"]
|
||||
|
||||
# Execute the call
|
||||
cmd = ["gconftool-2"]
|
||||
try:
|
||||
# If the call is "get", then we don't need as many parameters and
|
||||
# we can ignore some
|
||||
if call_type == 'get':
|
||||
self.ansible.deprecate(
|
||||
msg="State 'get' is deprecated. Please use the module community.general.gconftool2_info instead",
|
||||
version="8.0.0", collection_name="community.general"
|
||||
)
|
||||
cmd.extend(["--get", self.key])
|
||||
# Otherwise, we will use all relevant parameters
|
||||
elif call_type == 'set':
|
||||
cmd.extend(direct)
|
||||
cmd.extend(config_source)
|
||||
cmd.extend(["--type", self.value_type, "--{3}".format(call_type), self.key, self.value])
|
||||
elif call_type == 'unset':
|
||||
cmd.extend(["--unset", self.key])
|
||||
|
||||
# Start external command
|
||||
rc, out, err = self.ansible.run_command(cmd)
|
||||
|
||||
if err and fail_onerr:
|
||||
self.ansible.fail_json(msg='gconftool-2 failed with '
|
||||
'error: %s' % (str(err)))
|
||||
else:
|
||||
changed = True
|
||||
|
||||
except OSError as exception:
|
||||
self.ansible.fail_json(msg='gconftool-2 failed with exception: '
|
||||
'%s' % exception)
|
||||
return changed, out.rstrip()
|
||||
|
||||
|
||||
def main():
|
||||
# Setup the Ansible module
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
key=dict(type='str', required=True, no_log=False),
|
||||
value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']),
|
||||
value=dict(type='str'),
|
||||
state=dict(type='str', required=True, choices=['absent', 'get', 'present']),
|
||||
direct=dict(type='bool', default=False),
|
||||
config_source=dict(type='str'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
state_values = {"present": "set", "absent": "unset", "get": "get"}
|
||||
|
||||
# Assign module values to dictionary values
|
||||
key = module.params['key']
|
||||
value_type = module.params['value_type']
|
||||
if module.params['value'].lower() == "true":
|
||||
value = "true"
|
||||
elif module.params['value'] == "false":
|
||||
value = "false"
|
||||
else:
|
||||
value = module.params['value']
|
||||
|
||||
state = state_values[module.params['state']]
|
||||
direct = module.params['direct']
|
||||
config_source = module.params['config_source']
|
||||
|
||||
# Initialize some variables for later
|
||||
change = False
|
||||
new_value = ''
|
||||
|
||||
if state != "get":
|
||||
if value is None or value == "":
|
||||
module.fail_json(msg='State %s requires "value" to be set'
|
||||
% str(state))
|
||||
elif value_type is None or value_type == "":
|
||||
module.fail_json(msg='State %s requires "value_type" to be set'
|
||||
% str(state))
|
||||
|
||||
if direct and config_source is None:
|
||||
module.fail_json(msg='If "direct" is "true" then the ' +
|
||||
'"config_source" must be specified')
|
||||
elif not direct and config_source is not None:
|
||||
module.fail_json(msg='If the "config_source" is specified ' +
|
||||
'then "direct" must be "true"')
|
||||
|
||||
# Create a gconf2 preference
|
||||
gconf_pref = GConf2Preference(module, key, value_type,
|
||||
value, direct, config_source)
|
||||
# Now we get the current value, if not found don't fail
|
||||
dummy, current_value = gconf_pref.call("get", fail_onerr=False)
|
||||
|
||||
# Check if the current value equals the value we want to set. If not, make
|
||||
# a change
|
||||
if current_value != value:
|
||||
# If check mode, we know a change would have occurred.
|
||||
if module.check_mode:
|
||||
# So we will set the change to True
|
||||
change = True
|
||||
# And set the new_value to the value that would have been set
|
||||
new_value = value
|
||||
# If not check mode make the change.
|
||||
else:
|
||||
change, new_value = gconf_pref.call(state)
|
||||
# If the value we want to set is the same as the current_value, we will
|
||||
# set the new_value to the current_value for reporting
|
||||
else:
|
||||
new_value = current_value
|
||||
|
||||
facts = dict(gconftool2={'changed': change,
|
||||
'key': key,
|
||||
'value_type': value_type,
|
||||
'new_value': new_value,
|
||||
'previous_value': current_value,
|
||||
'playbook_value': module.params['value']})
|
||||
|
||||
module.exit_json(changed=change, ansible_facts=facts)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,78 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: gconftool2_info
|
||||
author:
|
||||
- "Alexei Znamensky (@russoz)"
|
||||
short_description: Retrieve GConf configurations
|
||||
version_added: 5.1.0
|
||||
description:
|
||||
- This module allows retrieving application preferences from the GConf database, with the help of C(gconftool-2).
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
- community.general.attributes.info_module
|
||||
options:
|
||||
key:
|
||||
description:
|
||||
- The key name for an element in the GConf database.
|
||||
type: str
|
||||
required: true
|
||||
notes:
|
||||
- See man gconftool-2(1) for more details.
|
||||
seealso:
|
||||
- name: gconf repository (archived)
|
||||
description: Git repository for the project. It is an archived project, so the repository is read-only.
|
||||
link: https://gitlab.gnome.org/Archive/gconf
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Get value for a certain key in the database.
|
||||
community.general.gconftool2_info:
|
||||
key: /desktop/gnome/background/picture_filename
|
||||
register: result
|
||||
"""
|
||||
|
||||
RETURN = '''
|
||||
value:
|
||||
description:
|
||||
- The value of the property.
|
||||
returned: success
|
||||
type: str
|
||||
sample: Monospace 10
|
||||
'''
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
|
||||
from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner
|
||||
|
||||
|
||||
class GConftoolInfo(ModuleHelper):
|
||||
output_params = ['key']
|
||||
module = dict(
|
||||
argument_spec=dict(
|
||||
key=dict(type='str', required=True, no_log=False),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
def __init_module__(self):
|
||||
self.runner = gconftool2_runner(self.module, check_rc=True)
|
||||
|
||||
def __run__(self):
|
||||
with self.runner.context(args_order=["get", "key"]) as ctx:
|
||||
rc, out, err = ctx.run(get=True)
|
||||
self.vars.value = None if err and not out else out.rstrip()
|
||||
|
||||
|
||||
def main():
|
||||
GConftoolInfo.execute()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,344 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: gem
|
||||
short_description: Manage Ruby gems
|
||||
description:
|
||||
- Manage installation and uninstallation of Ruby gems.
|
||||
options:
|
||||
name:
|
||||
type: str
|
||||
description:
|
||||
- The name of the gem to be managed.
|
||||
required: true
|
||||
state:
|
||||
type: str
|
||||
description:
|
||||
- The desired state of the gem. C(latest) ensures that the latest version is installed.
|
||||
required: false
|
||||
choices: [present, absent, latest]
|
||||
default: present
|
||||
gem_source:
|
||||
type: path
|
||||
description:
|
||||
- The path to a local gem used as installation source.
|
||||
required: false
|
||||
include_dependencies:
|
||||
description:
|
||||
- Whether to include dependencies or not.
|
||||
required: false
|
||||
type: bool
|
||||
default: true
|
||||
repository:
|
||||
type: str
|
||||
description:
|
||||
- The repository from which the gem will be installed
|
||||
required: false
|
||||
aliases: [source]
|
||||
user_install:
|
||||
description:
|
||||
- Install gem in user's local gems cache or for all users
|
||||
required: false
|
||||
type: bool
|
||||
default: true
|
||||
executable:
|
||||
type: path
|
||||
description:
|
||||
- Override the path to the gem executable
|
||||
required: false
|
||||
install_dir:
|
||||
type: path
|
||||
description:
|
||||
- Install the gems into a specific directory.
|
||||
These gems will be independent from the global installed ones.
|
||||
Specifying this requires user_install to be false.
|
||||
required: false
|
||||
bindir:
|
||||
type: path
|
||||
description:
|
||||
- Install executables into a specific directory.
|
||||
version_added: 3.3.0
|
||||
norc:
|
||||
type: bool
|
||||
default: true
|
||||
description:
|
||||
- Avoid loading any C(.gemrc) file. Ignored for RubyGems prior to 2.5.2.
|
||||
- The default changed from C(false) to C(true) in community.general 6.0.0.
|
||||
version_added: 3.3.0
|
||||
env_shebang:
|
||||
description:
|
||||
- Rewrite the shebang line on installed scripts to use /usr/bin/env.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
version:
|
||||
type: str
|
||||
description:
|
||||
- Version of the gem to be installed/removed.
|
||||
required: false
|
||||
pre_release:
|
||||
description:
|
||||
- Allow installation of pre-release versions of the gem.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
include_doc:
|
||||
description:
|
||||
- Install with or without docs.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
build_flags:
|
||||
type: str
|
||||
description:
|
||||
- Allow adding build flags for gem compilation
|
||||
required: false
|
||||
force:
|
||||
description:
|
||||
- Force gem to install, bypassing dependency checks.
|
||||
required: false
|
||||
default: false
|
||||
type: bool
|
||||
author:
|
||||
- "Ansible Core Team"
|
||||
- "Johan Wiren (@johanwiren)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Install version 1.0 of vagrant
|
||||
community.general.gem:
|
||||
name: vagrant
|
||||
version: 1.0
|
||||
state: present
|
||||
|
||||
- name: Install latest available version of rake
|
||||
community.general.gem:
|
||||
name: rake
|
||||
state: latest
|
||||
|
||||
- name: Install rake version 1.0 from a local gem on disk
|
||||
community.general.gem:
|
||||
name: rake
|
||||
gem_source: /path/to/gems/rake-1.0.gem
|
||||
state: present
|
||||
'''
|
||||
|
||||
import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def get_rubygems_path(module):
|
||||
if module.params['executable']:
|
||||
result = module.params['executable'].split(' ')
|
||||
else:
|
||||
result = [module.get_bin_path('gem', True)]
|
||||
return result
|
||||
|
||||
|
||||
def get_rubygems_version(module):
|
||||
if hasattr(get_rubygems_version, "ver"):
|
||||
return get_rubygems_version.ver
|
||||
|
||||
cmd = get_rubygems_path(module) + ['--version']
|
||||
(rc, out, err) = module.run_command(cmd, check_rc=True)
|
||||
|
||||
match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out)
|
||||
if not match:
|
||||
return None
|
||||
|
||||
ver = tuple(int(x) for x in match.groups())
|
||||
get_rubygems_version.ver = ver
|
||||
|
||||
return ver
|
||||
|
||||
|
||||
def get_rubygems_environ(module):
|
||||
if module.params['install_dir']:
|
||||
return {'GEM_HOME': module.params['install_dir']}
|
||||
return None
|
||||
|
||||
|
||||
def get_installed_versions(module, remote=False):
|
||||
|
||||
cmd = get_rubygems_path(module)
|
||||
cmd.append('query')
|
||||
cmd.extend(common_opts(module))
|
||||
if remote:
|
||||
cmd.append('--remote')
|
||||
if module.params['repository']:
|
||||
cmd.extend(['--source', module.params['repository']])
|
||||
cmd.append('-n')
|
||||
cmd.append('^%s$' % module.params['name'])
|
||||
|
||||
environ = get_rubygems_environ(module)
|
||||
(rc, out, err) = module.run_command(cmd, environ_update=environ, check_rc=True)
|
||||
installed_versions = []
|
||||
for line in out.splitlines():
|
||||
match = re.match(r"\S+\s+\((?:default: )?(.+)\)", line)
|
||||
if match:
|
||||
versions = match.group(1)
|
||||
for version in versions.split(', '):
|
||||
installed_versions.append(version.split()[0])
|
||||
return installed_versions
|
||||
|
||||
|
||||
def exists(module):
|
||||
if module.params['state'] == 'latest':
|
||||
remoteversions = get_installed_versions(module, remote=True)
|
||||
if remoteversions:
|
||||
module.params['version'] = remoteversions[0]
|
||||
installed_versions = get_installed_versions(module)
|
||||
if module.params['version']:
|
||||
if module.params['version'] in installed_versions:
|
||||
return True
|
||||
else:
|
||||
if installed_versions:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def common_opts(module):
|
||||
opts = []
|
||||
ver = get_rubygems_version(module)
|
||||
if module.params['norc'] and ver and ver >= (2, 5, 2):
|
||||
opts.append('--norc')
|
||||
return opts
|
||||
|
||||
|
||||
def uninstall(module):
|
||||
|
||||
if module.check_mode:
|
||||
return
|
||||
cmd = get_rubygems_path(module)
|
||||
environ = get_rubygems_environ(module)
|
||||
cmd.append('uninstall')
|
||||
cmd.extend(common_opts(module))
|
||||
if module.params['install_dir']:
|
||||
cmd.extend(['--install-dir', module.params['install_dir']])
|
||||
|
||||
if module.params['bindir']:
|
||||
cmd.extend(['--bindir', module.params['bindir']])
|
||||
|
||||
if module.params['version']:
|
||||
cmd.extend(['--version', module.params['version']])
|
||||
else:
|
||||
cmd.append('--all')
|
||||
cmd.append('--executable')
|
||||
cmd.append(module.params['name'])
|
||||
module.run_command(cmd, environ_update=environ, check_rc=True)
|
||||
|
||||
|
||||
def install(module):
|
||||
|
||||
if module.check_mode:
|
||||
return
|
||||
|
||||
ver = get_rubygems_version(module)
|
||||
|
||||
cmd = get_rubygems_path(module)
|
||||
cmd.append('install')
|
||||
cmd.extend(common_opts(module))
|
||||
if module.params['version']:
|
||||
cmd.extend(['--version', module.params['version']])
|
||||
if module.params['repository']:
|
||||
cmd.extend(['--source', module.params['repository']])
|
||||
if not module.params['include_dependencies']:
|
||||
cmd.append('--ignore-dependencies')
|
||||
else:
|
||||
if ver and ver < (2, 0, 0):
|
||||
cmd.append('--include-dependencies')
|
||||
if module.params['user_install']:
|
||||
cmd.append('--user-install')
|
||||
else:
|
||||
cmd.append('--no-user-install')
|
||||
if module.params['install_dir']:
|
||||
cmd.extend(['--install-dir', module.params['install_dir']])
|
||||
if module.params['bindir']:
|
||||
cmd.extend(['--bindir', module.params['bindir']])
|
||||
if module.params['pre_release']:
|
||||
cmd.append('--pre')
|
||||
if not module.params['include_doc']:
|
||||
if ver and ver < (2, 0, 0):
|
||||
cmd.append('--no-rdoc')
|
||||
cmd.append('--no-ri')
|
||||
else:
|
||||
cmd.append('--no-document')
|
||||
if module.params['env_shebang']:
|
||||
cmd.append('--env-shebang')
|
||||
cmd.append(module.params['gem_source'])
|
||||
if module.params['build_flags']:
|
||||
cmd.extend(['--', module.params['build_flags']])
|
||||
if module.params['force']:
|
||||
cmd.append('--force')
|
||||
module.run_command(cmd, check_rc=True)
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
executable=dict(required=False, type='path'),
|
||||
gem_source=dict(required=False, type='path'),
|
||||
include_dependencies=dict(required=False, default=True, type='bool'),
|
||||
name=dict(required=True, type='str'),
|
||||
repository=dict(required=False, aliases=['source'], type='str'),
|
||||
state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'),
|
||||
user_install=dict(required=False, default=True, type='bool'),
|
||||
install_dir=dict(required=False, type='path'),
|
||||
bindir=dict(type='path'),
|
||||
norc=dict(type='bool', default=True),
|
||||
pre_release=dict(required=False, default=False, type='bool'),
|
||||
include_doc=dict(required=False, default=False, type='bool'),
|
||||
env_shebang=dict(required=False, default=False, type='bool'),
|
||||
version=dict(required=False, type='str'),
|
||||
build_flags=dict(required=False, type='str'),
|
||||
force=dict(required=False, default=False, type='bool'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']],
|
||||
)
|
||||
|
||||
if module.params['version'] and module.params['state'] == 'latest':
|
||||
module.fail_json(msg="Cannot specify version when state=latest")
|
||||
if module.params['gem_source'] and module.params['state'] == 'latest':
|
||||
module.fail_json(msg="Cannot maintain state=latest when installing from local source")
|
||||
if module.params['user_install'] and module.params['install_dir']:
|
||||
module.fail_json(msg="install_dir requires user_install=false")
|
||||
|
||||
if not module.params['gem_source']:
|
||||
module.params['gem_source'] = module.params['name']
|
||||
|
||||
changed = False
|
||||
|
||||
if module.params['state'] in ['present', 'latest']:
|
||||
if not exists(module):
|
||||
install(module)
|
||||
changed = True
|
||||
elif module.params['state'] == 'absent':
|
||||
if exists(module):
|
||||
uninstall(module)
|
||||
changed = True
|
||||
|
||||
result = {}
|
||||
result['name'] = module.params['name']
|
||||
result['state'] = module.params['state']
|
||||
if module.params['version']:
|
||||
result['version'] = module.params['version']
|
||||
result['changed'] = changed
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,283 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2015, Marius Gedminas <marius@pov.lt>
|
||||
# Copyright (c) 2016, Matthew Gamble <git@matthewgamble.net>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: git_config
|
||||
author:
|
||||
- Matthew Gamble (@djmattyg007)
|
||||
- Marius Gedminas (@mgedmin)
|
||||
requirements: ['git']
|
||||
short_description: Read and write git configuration
|
||||
description:
|
||||
- The C(git_config) module changes git configuration by invoking 'git config'.
|
||||
This is needed if you don't want to use M(ansible.builtin.template) for the entire git
|
||||
config file (e.g. because you need to change just C(user.email) in
|
||||
/etc/.git/config). Solutions involving M(ansible.builtin.command) are cumbersome or
|
||||
don't work correctly in check mode.
|
||||
options:
|
||||
list_all:
|
||||
description:
|
||||
- List all settings (optionally limited to a given I(scope)).
|
||||
type: bool
|
||||
default: false
|
||||
name:
|
||||
description:
|
||||
- The name of the setting. If no value is supplied, the value will
|
||||
be read from the config if it has been set.
|
||||
type: str
|
||||
repo:
|
||||
description:
|
||||
- Path to a git repository for reading and writing values from a
|
||||
specific repo.
|
||||
type: path
|
||||
file:
|
||||
description:
|
||||
- Path to an adhoc git configuration file to be managed using the C(file) scope.
|
||||
type: path
|
||||
version_added: 2.0.0
|
||||
scope:
|
||||
description:
|
||||
- Specify which scope to read/set values from.
|
||||
- This is required when setting config values.
|
||||
- If this is set to C(local), you must also specify the C(repo) parameter.
|
||||
- If this is set to C(file), you must also specify the C(file) parameter.
|
||||
- It defaults to system only when not using I(list_all)=C(true).
|
||||
choices: [ "file", "local", "global", "system" ]
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- "Indicates the setting should be set/unset.
|
||||
This parameter has higher precedence than I(value) parameter:
|
||||
when I(state)=absent and I(value) is defined, I(value) is discarded."
|
||||
choices: [ 'present', 'absent' ]
|
||||
default: 'present'
|
||||
type: str
|
||||
value:
|
||||
description:
|
||||
- When specifying the name of a single setting, supply a value to
|
||||
set that setting to the given value.
|
||||
type: str
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Add a setting to ~/.gitconfig
|
||||
community.general.git_config:
|
||||
name: alias.ci
|
||||
scope: global
|
||||
value: commit
|
||||
|
||||
- name: Add a setting to ~/.gitconfig
|
||||
community.general.git_config:
|
||||
name: alias.st
|
||||
scope: global
|
||||
value: status
|
||||
|
||||
- name: Remove a setting from ~/.gitconfig
|
||||
community.general.git_config:
|
||||
name: alias.ci
|
||||
scope: global
|
||||
state: absent
|
||||
|
||||
- name: Add a setting to ~/.gitconfig
|
||||
community.general.git_config:
|
||||
name: core.editor
|
||||
scope: global
|
||||
value: vim
|
||||
|
||||
- name: Add a setting system-wide
|
||||
community.general.git_config:
|
||||
name: alias.remotev
|
||||
scope: system
|
||||
value: remote -v
|
||||
|
||||
- name: Add a setting to a system scope (default)
|
||||
community.general.git_config:
|
||||
name: alias.diffc
|
||||
value: diff --cached
|
||||
|
||||
- name: Add a setting to a system scope (default)
|
||||
community.general.git_config:
|
||||
name: color.ui
|
||||
value: auto
|
||||
|
||||
- name: Make etckeeper not complaining when it is invoked by cron
|
||||
community.general.git_config:
|
||||
name: user.email
|
||||
repo: /etc
|
||||
scope: local
|
||||
value: 'root@{{ ansible_fqdn }}'
|
||||
|
||||
- name: Read individual values from git config
|
||||
community.general.git_config:
|
||||
name: alias.ci
|
||||
scope: global
|
||||
|
||||
- name: Scope system is also assumed when reading values, unless list_all=true
|
||||
community.general.git_config:
|
||||
name: alias.diffc
|
||||
|
||||
- name: Read all values from git config
|
||||
community.general.git_config:
|
||||
list_all: true
|
||||
scope: global
|
||||
|
||||
- name: When list_all is yes and no scope is specified, you get configuration from all scopes
|
||||
community.general.git_config:
|
||||
list_all: true
|
||||
|
||||
- name: Specify a repository to include local settings
|
||||
community.general.git_config:
|
||||
list_all: true
|
||||
repo: /path/to/repo.git
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
config_value:
|
||||
description: When I(list_all=false) and value is not set, a string containing the value of the setting in name
|
||||
returned: success
|
||||
type: str
|
||||
sample: "vim"
|
||||
|
||||
config_values:
|
||||
description: When I(list_all=true), a dict containing key/value pairs of multiple configuration settings
|
||||
returned: success
|
||||
type: dict
|
||||
sample:
|
||||
core.editor: "vim"
|
||||
color.ui: "auto"
|
||||
alias.diffc: "diff --cached"
|
||||
alias.remotev: "remote -v"
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
list_all=dict(required=False, type='bool', default=False),
|
||||
name=dict(type='str'),
|
||||
repo=dict(type='path'),
|
||||
file=dict(type='path'),
|
||||
scope=dict(required=False, type='str', choices=['file', 'local', 'global', 'system']),
|
||||
state=dict(required=False, type='str', default='present', choices=['present', 'absent']),
|
||||
value=dict(required=False),
|
||||
),
|
||||
mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']],
|
||||
required_if=[
|
||||
('scope', 'local', ['repo']),
|
||||
('scope', 'file', ['file'])
|
||||
],
|
||||
required_one_of=[['list_all', 'name']],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
git_path = module.get_bin_path('git', True)
|
||||
|
||||
params = module.params
|
||||
# We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting.
|
||||
# Set the locale to C to ensure consistent messages.
|
||||
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
|
||||
|
||||
if params['name']:
|
||||
name = params['name']
|
||||
else:
|
||||
name = None
|
||||
|
||||
if params['scope']:
|
||||
scope = params['scope']
|
||||
elif params['list_all']:
|
||||
scope = None
|
||||
else:
|
||||
scope = 'system'
|
||||
|
||||
if params['state'] == 'absent':
|
||||
unset = 'unset'
|
||||
params['value'] = None
|
||||
else:
|
||||
unset = None
|
||||
|
||||
if params['value']:
|
||||
new_value = params['value']
|
||||
else:
|
||||
new_value = None
|
||||
|
||||
args = [git_path, "config", "--includes"]
|
||||
if params['list_all']:
|
||||
args.append('-l')
|
||||
if scope == 'file':
|
||||
args.append('-f')
|
||||
args.append(params['file'])
|
||||
elif scope:
|
||||
args.append("--" + scope)
|
||||
if name:
|
||||
args.append(name)
|
||||
|
||||
if scope == 'local':
|
||||
dir = params['repo']
|
||||
elif params['list_all'] and params['repo']:
|
||||
# Include local settings from a specific repo when listing all available settings
|
||||
dir = params['repo']
|
||||
else:
|
||||
# Run from root directory to avoid accidentally picking up any local config settings
|
||||
dir = "/"
|
||||
|
||||
(rc, out, err) = module.run_command(args, cwd=dir, expand_user_and_vars=False)
|
||||
if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
|
||||
# This just means nothing has been set at the given scope
|
||||
module.exit_json(changed=False, msg='', config_values={})
|
||||
elif rc >= 2:
|
||||
# If the return code is 1, it just means the option hasn't been set yet, which is fine.
|
||||
module.fail_json(rc=rc, msg=err, cmd=' '.join(args))
|
||||
|
||||
if params['list_all']:
|
||||
values = out.rstrip().splitlines()
|
||||
config_values = {}
|
||||
for value in values:
|
||||
k, v = value.split('=', 1)
|
||||
config_values[k] = v
|
||||
module.exit_json(changed=False, msg='', config_values=config_values)
|
||||
elif not new_value and not unset:
|
||||
module.exit_json(changed=False, msg='', config_value=out.rstrip())
|
||||
elif unset and not out:
|
||||
module.exit_json(changed=False, msg='no setting to unset')
|
||||
else:
|
||||
old_value = out.rstrip()
|
||||
if old_value == new_value:
|
||||
module.exit_json(changed=False, msg="")
|
||||
|
||||
if not module.check_mode:
|
||||
if unset:
|
||||
args.insert(len(args) - 1, "--" + unset)
|
||||
cmd = args
|
||||
else:
|
||||
cmd = args + [new_value]
|
||||
(rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False, expand_user_and_vars=False)
|
||||
if err:
|
||||
module.fail_json(rc=rc, msg=err, cmd=cmd)
|
||||
|
||||
module.exit_json(
|
||||
msg='setting changed',
|
||||
diff=dict(
|
||||
before_header=' '.join(args),
|
||||
before=old_value + "\n",
|
||||
after_header=' '.join(args),
|
||||
after=(new_value or '') + "\n"
|
||||
),
|
||||
changed=True
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,340 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) Ansible project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: github_deploy_key
|
||||
author: "Ali (@bincyber)"
|
||||
short_description: Manages deploy keys for GitHub repositories
|
||||
description:
|
||||
- "Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password,
|
||||
username and password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin
|
||||
rights on the repository are required."
|
||||
options:
|
||||
github_url:
|
||||
description:
|
||||
- The base URL of the GitHub API
|
||||
required: false
|
||||
type: str
|
||||
version_added: '0.2.0'
|
||||
default: https://api.github.com
|
||||
owner:
|
||||
description:
|
||||
- The name of the individual account or organization that owns the GitHub repository.
|
||||
required: true
|
||||
aliases: [ 'account', 'organization' ]
|
||||
type: str
|
||||
repo:
|
||||
description:
|
||||
- The name of the GitHub repository.
|
||||
required: true
|
||||
aliases: [ 'repository' ]
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- The name for the deploy key.
|
||||
required: true
|
||||
aliases: [ 'title', 'label' ]
|
||||
type: str
|
||||
key:
|
||||
description:
|
||||
- The SSH public key to add to the repository as a deploy key.
|
||||
required: true
|
||||
type: str
|
||||
read_only:
|
||||
description:
|
||||
- If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write.
|
||||
type: bool
|
||||
default: true
|
||||
state:
|
||||
description:
|
||||
- The state of the deploy key.
|
||||
default: "present"
|
||||
choices: [ "present", "absent" ]
|
||||
type: str
|
||||
force:
|
||||
description:
|
||||
- If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title.
|
||||
type: bool
|
||||
default: false
|
||||
username:
|
||||
description:
|
||||
- The username to authenticate with. Should not be set when using personal access token
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination.
|
||||
type: str
|
||||
token:
|
||||
description:
|
||||
- The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password).
|
||||
type: str
|
||||
otp:
|
||||
description:
|
||||
- The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password).
|
||||
type: int
|
||||
notes:
|
||||
- "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/."
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Add a new read-only deploy key to a GitHub repository using basic authentication
|
||||
community.general.github_deploy_key:
|
||||
owner: "johndoe"
|
||||
repo: "example"
|
||||
name: "new-deploy-key"
|
||||
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
|
||||
read_only: true
|
||||
username: "johndoe"
|
||||
password: "supersecretpassword"
|
||||
|
||||
- name: Remove an existing deploy key from a GitHub repository
|
||||
community.general.github_deploy_key:
|
||||
owner: "johndoe"
|
||||
repository: "example"
|
||||
name: "new-deploy-key"
|
||||
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
|
||||
force: true
|
||||
username: "johndoe"
|
||||
password: "supersecretpassword"
|
||||
state: absent
|
||||
|
||||
- name: Add a new deploy key to a GitHub repository, replace an existing key, use an OAuth2 token to authenticate
|
||||
community.general.github_deploy_key:
|
||||
owner: "johndoe"
|
||||
repository: "example"
|
||||
name: "new-deploy-key"
|
||||
key: "{{ lookup('file', '~/.ssh/github.pub') }}"
|
||||
force: true
|
||||
token: "ABAQDAwXxn7kIMNWzcDfo..."
|
||||
|
||||
- name: Re-add a deploy key to a GitHub repository but with a different name
|
||||
community.general.github_deploy_key:
|
||||
owner: "johndoe"
|
||||
repository: "example"
|
||||
name: "replace-deploy-key"
|
||||
key: "{{ lookup('file', '~/.ssh/github.pub') }}"
|
||||
username: "johndoe"
|
||||
password: "supersecretpassword"
|
||||
|
||||
- name: Add a new deploy key to a GitHub repository using 2FA
|
||||
community.general.github_deploy_key:
|
||||
owner: "johndoe"
|
||||
repo: "example"
|
||||
name: "new-deploy-key-2"
|
||||
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
|
||||
username: "johndoe"
|
||||
password: "supersecretpassword"
|
||||
otp: 123456
|
||||
|
||||
- name: Add a read-only deploy key to a repository hosted on GitHub Enterprise
|
||||
community.general.github_deploy_key:
|
||||
github_url: "https://api.example.com"
|
||||
owner: "janedoe"
|
||||
repo: "example"
|
||||
name: "new-deploy-key"
|
||||
key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
|
||||
read_only: true
|
||||
username: "janedoe"
|
||||
password: "supersecretpassword"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: the status message describing what occurred
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Deploy key added successfully"
|
||||
|
||||
http_status_code:
|
||||
description: the HTTP status code returned by the GitHub API
|
||||
returned: failed
|
||||
type: int
|
||||
sample: 400
|
||||
|
||||
error:
|
||||
description: the error message returned by the GitHub API
|
||||
returned: failed
|
||||
type: str
|
||||
sample: "key is already in use"
|
||||
|
||||
id:
|
||||
description: the key identifier assigned by GitHub for the deploy key
|
||||
returned: changed
|
||||
type: int
|
||||
sample: 24381901
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from re import findall
|
||||
|
||||
|
||||
class GithubDeployKey(object):
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
|
||||
self.github_url = self.module.params['github_url']
|
||||
self.name = module.params['name']
|
||||
self.key = module.params['key']
|
||||
self.state = module.params['state']
|
||||
self.read_only = module.params.get('read_only', True)
|
||||
self.force = module.params.get('force', False)
|
||||
self.username = module.params.get('username', None)
|
||||
self.password = module.params.get('password', None)
|
||||
self.token = module.params.get('token', None)
|
||||
self.otp = module.params.get('otp', None)
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
owner = self.module.params['owner']
|
||||
repo = self.module.params['repo']
|
||||
return "{0}/repos/{1}/{2}/keys".format(self.github_url, owner, repo)
|
||||
|
||||
@property
|
||||
def headers(self):
|
||||
if self.username is not None and self.password is not None:
|
||||
self.module.params['url_username'] = self.username
|
||||
self.module.params['url_password'] = self.password
|
||||
self.module.params['force_basic_auth'] = True
|
||||
if self.otp is not None:
|
||||
return {"X-GitHub-OTP": self.otp}
|
||||
elif self.token is not None:
|
||||
return {"Authorization": "token {0}".format(self.token)}
|
||||
else:
|
||||
return None
|
||||
|
||||
def paginate(self, url):
|
||||
while url:
|
||||
resp, info = fetch_url(self.module, url, headers=self.headers, method="GET")
|
||||
|
||||
if info["status"] == 200:
|
||||
yield self.module.from_json(resp.read())
|
||||
|
||||
links = {}
|
||||
for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info["link"]):
|
||||
links[y] = x
|
||||
|
||||
url = links.get('next')
|
||||
else:
|
||||
self.handle_error(method="GET", info=info)
|
||||
|
||||
def get_existing_key(self):
|
||||
for keys in self.paginate(self.url):
|
||||
if keys:
|
||||
for i in keys:
|
||||
existing_key_id = str(i["id"])
|
||||
if i["key"].split() == self.key.split()[:2]:
|
||||
return existing_key_id
|
||||
elif i['title'] == self.name and self.force:
|
||||
return existing_key_id
|
||||
else:
|
||||
return None
|
||||
|
||||
def add_new_key(self):
|
||||
request_body = {"title": self.name, "key": self.key, "read_only": self.read_only}
|
||||
|
||||
resp, info = fetch_url(self.module, self.url, data=self.module.jsonify(request_body), headers=self.headers, method="POST", timeout=30)
|
||||
|
||||
status_code = info["status"]
|
||||
|
||||
if status_code == 201:
|
||||
response_body = self.module.from_json(resp.read())
|
||||
key_id = response_body["id"]
|
||||
self.module.exit_json(changed=True, msg="Deploy key successfully added", id=key_id)
|
||||
elif status_code == 422:
|
||||
self.module.exit_json(changed=False, msg="Deploy key already exists")
|
||||
else:
|
||||
self.handle_error(method="POST", info=info)
|
||||
|
||||
def remove_existing_key(self, key_id):
|
||||
resp, info = fetch_url(self.module, "{0}/{1}".format(self.url, key_id), headers=self.headers, method="DELETE")
|
||||
|
||||
status_code = info["status"]
|
||||
|
||||
if status_code == 204:
|
||||
if self.state == 'absent':
|
||||
self.module.exit_json(changed=True, msg="Deploy key successfully deleted", id=key_id)
|
||||
else:
|
||||
self.handle_error(method="DELETE", info=info, key_id=key_id)
|
||||
|
||||
def handle_error(self, method, info, key_id=None):
|
||||
status_code = info['status']
|
||||
body = info.get('body')
|
||||
if body:
|
||||
err = self.module.from_json(body)['message']
|
||||
|
||||
if status_code == 401:
|
||||
self.module.fail_json(msg="Failed to connect to {0} due to invalid credentials".format(self.github_url), http_status_code=status_code, error=err)
|
||||
elif status_code == 404:
|
||||
self.module.fail_json(msg="GitHub repository does not exist", http_status_code=status_code, error=err)
|
||||
else:
|
||||
if method == "GET":
|
||||
self.module.fail_json(msg="Failed to retrieve existing deploy keys", http_status_code=status_code, error=err)
|
||||
elif method == "POST":
|
||||
self.module.fail_json(msg="Failed to add deploy key", http_status_code=status_code, error=err)
|
||||
elif method == "DELETE":
|
||||
self.module.fail_json(msg="Failed to delete existing deploy key", id=key_id, http_status_code=status_code, error=err)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
github_url=dict(required=False, type='str', default="https://api.github.com"),
|
||||
owner=dict(required=True, type='str', aliases=['account', 'organization']),
|
||||
repo=dict(required=True, type='str', aliases=['repository']),
|
||||
name=dict(required=True, type='str', aliases=['title', 'label']),
|
||||
key=dict(required=True, type='str', no_log=False),
|
||||
read_only=dict(required=False, type='bool', default=True),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
force=dict(required=False, type='bool', default=False),
|
||||
username=dict(required=False, type='str'),
|
||||
password=dict(required=False, type='str', no_log=True),
|
||||
otp=dict(required=False, type='int', no_log=True),
|
||||
token=dict(required=False, type='str', no_log=True)
|
||||
),
|
||||
mutually_exclusive=[
|
||||
['password', 'token']
|
||||
],
|
||||
required_together=[
|
||||
['username', 'password'],
|
||||
['otp', 'username', 'password']
|
||||
],
|
||||
required_one_of=[
|
||||
['username', 'token']
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
deploy_key = GithubDeployKey(module)
|
||||
|
||||
if module.check_mode:
|
||||
key_id = deploy_key.get_existing_key()
|
||||
if deploy_key.state == "present" and key_id is None:
|
||||
module.exit_json(changed=True)
|
||||
elif deploy_key.state == "present" and key_id is not None:
|
||||
module.exit_json(changed=False)
|
||||
|
||||
# to forcefully modify an existing key, the existing key must be deleted first
|
||||
if deploy_key.state == 'absent' or deploy_key.force:
|
||||
key_id = deploy_key.get_existing_key()
|
||||
|
||||
if key_id is not None:
|
||||
deploy_key.remove_existing_key(key_id)
|
||||
elif deploy_key.state == 'absent':
|
||||
module.exit_json(changed=False, msg="Deploy key does not exist")
|
||||
|
||||
if deploy_key.state == "present":
|
||||
deploy_key.add_new_key()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,117 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2017-18, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: github_issue
|
||||
short_description: View GitHub issue
|
||||
description:
|
||||
- View GitHub issue for a given repository and organization.
|
||||
options:
|
||||
repo:
|
||||
description:
|
||||
- Name of repository from which issue needs to be retrieved.
|
||||
required: true
|
||||
type: str
|
||||
organization:
|
||||
description:
|
||||
- Name of the GitHub organization in which the repository is hosted.
|
||||
required: true
|
||||
type: str
|
||||
issue:
|
||||
description:
|
||||
- Issue number for which information is required.
|
||||
required: true
|
||||
type: int
|
||||
action:
|
||||
description:
|
||||
- Get various details about issue depending upon action specified.
|
||||
default: 'get_status'
|
||||
choices:
|
||||
- 'get_status'
|
||||
type: str
|
||||
author:
|
||||
- Abhijeet Kasurde (@Akasurde)
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
issue_status:
|
||||
description: State of the GitHub issue
|
||||
type: str
|
||||
returned: success
|
||||
sample: open, closed
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Check if GitHub issue is closed or not
|
||||
community.general.github_issue:
|
||||
organization: ansible
|
||||
repo: ansible
|
||||
issue: 23642
|
||||
action: get_status
|
||||
register: r
|
||||
|
||||
- name: Take action depending upon issue status
|
||||
ansible.builtin.debug:
|
||||
msg: Do something when issue 23642 is open
|
||||
when: r.issue_status == 'open'
|
||||
'''
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
organization=dict(required=True),
|
||||
repo=dict(required=True),
|
||||
issue=dict(type='int', required=True),
|
||||
action=dict(choices=['get_status'], default='get_status'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
organization = module.params['organization']
|
||||
repo = module.params['repo']
|
||||
issue = module.params['issue']
|
||||
action = module.params['action']
|
||||
|
||||
result = dict()
|
||||
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'application/vnd.github.v3+json',
|
||||
}
|
||||
|
||||
url = "https://api.github.com/repos/%s/%s/issues/%s" % (organization, repo, issue)
|
||||
|
||||
response, info = fetch_url(module, url, headers=headers)
|
||||
if not (200 <= info['status'] < 400):
|
||||
if info['status'] == 404:
|
||||
module.fail_json(msg="Failed to find issue %s" % issue)
|
||||
module.fail_json(msg="Failed to send request to %s: %s" % (url, info['msg']))
|
||||
|
||||
gh_obj = json.loads(response.read())
|
||||
|
||||
if action == 'get_status' or action is None:
|
||||
if module.check_mode:
|
||||
result.update(changed=True)
|
||||
else:
|
||||
result.update(changed=True, issue_status=gh_obj['state'])
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,243 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: github_key
|
||||
short_description: Manage GitHub access keys
|
||||
description:
|
||||
- Creates, removes, or updates GitHub access keys.
|
||||
options:
|
||||
token:
|
||||
description:
|
||||
- GitHub Access Token with permission to list and create public keys.
|
||||
required: true
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- SSH key name
|
||||
required: true
|
||||
type: str
|
||||
pubkey:
|
||||
description:
|
||||
- SSH public key value. Required when I(state=present).
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether to remove a key, ensure that it exists, or update its value.
|
||||
choices: ['present', 'absent']
|
||||
default: 'present'
|
||||
type: str
|
||||
force:
|
||||
description:
|
||||
- The default is C(true), which will replace the existing remote key
|
||||
if it's different than C(pubkey). If C(false), the key will only be
|
||||
set if no key with the given I(name) exists.
|
||||
type: bool
|
||||
default: true
|
||||
|
||||
author: Robert Estelle (@erydo)
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
deleted_keys:
|
||||
description: An array of key objects that were deleted. Only present on state=absent
|
||||
type: list
|
||||
returned: When state=absent
|
||||
sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false}]
|
||||
matching_keys:
|
||||
description: An array of keys matching the specified name. Only present on state=present
|
||||
type: list
|
||||
returned: When state=present
|
||||
sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false}]
|
||||
key:
|
||||
description: Metadata about the key just created. Only present on state=present
|
||||
type: dict
|
||||
returned: success
|
||||
sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false}
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Read SSH public key to authorize
|
||||
ansible.builtin.shell: cat /home/foo/.ssh/id_rsa.pub
|
||||
register: ssh_pub_key
|
||||
|
||||
- name: Authorize key with GitHub
|
||||
local_action:
|
||||
module: github_key
|
||||
name: Access Key for Some Machine
|
||||
token: '{{ github_access_token }}'
|
||||
pubkey: '{{ ssh_pub_key.stdout }}'
|
||||
'''
|
||||
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
|
||||
|
||||
API_BASE = 'https://api.github.com'
|
||||
|
||||
|
||||
class GitHubResponse(object):
|
||||
def __init__(self, response, info):
|
||||
self.content = response.read()
|
||||
self.info = info
|
||||
|
||||
def json(self):
|
||||
return json.loads(self.content)
|
||||
|
||||
def links(self):
|
||||
links = {}
|
||||
if 'link' in self.info:
|
||||
link_header = self.info['link']
|
||||
matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header)
|
||||
for url, rel in matches:
|
||||
links[rel] = url
|
||||
return links
|
||||
|
||||
|
||||
class GitHubSession(object):
|
||||
def __init__(self, module, token):
|
||||
self.module = module
|
||||
self.token = token
|
||||
|
||||
def request(self, method, url, data=None):
|
||||
headers = {
|
||||
'Authorization': 'token %s' % self.token,
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'application/vnd.github.v3+json',
|
||||
}
|
||||
response, info = fetch_url(
|
||||
self.module, url, method=method, data=data, headers=headers)
|
||||
if not (200 <= info['status'] < 400):
|
||||
self.module.fail_json(
|
||||
msg=(" failed to send request %s to %s: %s"
|
||||
% (method, url, info['msg'])))
|
||||
return GitHubResponse(response, info)
|
||||
|
||||
|
||||
def get_all_keys(session):
|
||||
url = API_BASE + '/user/keys'
|
||||
result = []
|
||||
while url:
|
||||
r = session.request('GET', url)
|
||||
result.extend(r.json())
|
||||
url = r.links().get('next')
|
||||
return result
|
||||
|
||||
|
||||
def create_key(session, name, pubkey, check_mode):
|
||||
if check_mode:
|
||||
from datetime import datetime
|
||||
now = datetime.utcnow()
|
||||
return {
|
||||
'id': 0,
|
||||
'key': pubkey,
|
||||
'title': name,
|
||||
'url': 'http://example.com/CHECK_MODE_GITHUB_KEY',
|
||||
'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'),
|
||||
'read_only': False,
|
||||
'verified': False
|
||||
}
|
||||
else:
|
||||
return session.request(
|
||||
'POST',
|
||||
API_BASE + '/user/keys',
|
||||
data=json.dumps({'title': name, 'key': pubkey})).json()
|
||||
|
||||
|
||||
def delete_keys(session, to_delete, check_mode):
|
||||
if check_mode:
|
||||
return
|
||||
|
||||
for key in to_delete:
|
||||
session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"])
|
||||
|
||||
|
||||
def ensure_key_absent(session, name, check_mode):
|
||||
to_delete = [key for key in get_all_keys(session) if key['title'] == name]
|
||||
delete_keys(session, to_delete, check_mode=check_mode)
|
||||
|
||||
return {'changed': bool(to_delete),
|
||||
'deleted_keys': to_delete}
|
||||
|
||||
|
||||
def ensure_key_present(module, session, name, pubkey, force, check_mode):
|
||||
all_keys = get_all_keys(session)
|
||||
matching_keys = [k for k in all_keys if k['title'] == name]
|
||||
deleted_keys = []
|
||||
|
||||
new_signature = pubkey.split(' ')[1]
|
||||
for key in all_keys:
|
||||
existing_signature = key['key'].split(' ')[1]
|
||||
if new_signature == existing_signature and key['title'] != name:
|
||||
module.fail_json(msg=(
|
||||
"another key with the same content is already registered "
|
||||
"under the name |{0}|").format(key['title']))
|
||||
|
||||
if matching_keys and force and matching_keys[0]['key'].split(' ')[1] != new_signature:
|
||||
delete_keys(session, matching_keys, check_mode=check_mode)
|
||||
(deleted_keys, matching_keys) = (matching_keys, [])
|
||||
|
||||
if not matching_keys:
|
||||
key = create_key(session, name, pubkey, check_mode=check_mode)
|
||||
else:
|
||||
key = matching_keys[0]
|
||||
|
||||
return {
|
||||
'changed': bool(deleted_keys or not matching_keys),
|
||||
'deleted_keys': deleted_keys,
|
||||
'matching_keys': matching_keys,
|
||||
'key': key
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = {
|
||||
'token': {'required': True, 'no_log': True},
|
||||
'name': {'required': True},
|
||||
'pubkey': {},
|
||||
'state': {'choices': ['present', 'absent'], 'default': 'present'},
|
||||
'force': {'default': True, 'type': 'bool'},
|
||||
}
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
token = module.params['token']
|
||||
name = module.params['name']
|
||||
state = module.params['state']
|
||||
force = module.params['force']
|
||||
pubkey = module.params.get('pubkey')
|
||||
|
||||
if pubkey:
|
||||
pubkey_parts = pubkey.split(' ')
|
||||
# Keys consist of a protocol, the key data, and an optional comment.
|
||||
if len(pubkey_parts) < 2:
|
||||
module.fail_json(msg='"pubkey" parameter has an invalid format')
|
||||
elif state == 'present':
|
||||
module.fail_json(msg='"pubkey" is required when state=present')
|
||||
|
||||
session = GitHubSession(module, token)
|
||||
if state == 'present':
|
||||
result = ensure_key_present(module, session, name, pubkey, force=force,
|
||||
check_mode=module.check_mode)
|
||||
elif state == 'absent':
|
||||
result = ensure_key_absent(session, name, check_mode=module.check_mode)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,223 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright Ansible Team
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: github_release
|
||||
short_description: Interact with GitHub Releases
|
||||
description:
|
||||
- Fetch metadata about GitHub Releases
|
||||
options:
|
||||
token:
|
||||
description:
|
||||
- GitHub Personal Access Token for authenticating. Mutually exclusive with C(password).
|
||||
type: str
|
||||
user:
|
||||
description:
|
||||
- The GitHub account that owns the repository
|
||||
type: str
|
||||
required: true
|
||||
password:
|
||||
description:
|
||||
- The GitHub account password for the user. Mutually exclusive with C(token).
|
||||
type: str
|
||||
repo:
|
||||
description:
|
||||
- Repository name
|
||||
type: str
|
||||
required: true
|
||||
action:
|
||||
description:
|
||||
- Action to perform
|
||||
type: str
|
||||
required: true
|
||||
choices: [ 'latest_release', 'create_release' ]
|
||||
tag:
|
||||
description:
|
||||
- Tag name when creating a release. Required when using action is set to C(create_release).
|
||||
type: str
|
||||
target:
|
||||
description:
|
||||
- Target of release when creating a release
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name of release when creating a release
|
||||
type: str
|
||||
body:
|
||||
description:
|
||||
- Description of the release when creating a release
|
||||
type: str
|
||||
draft:
|
||||
description:
|
||||
- Sets if the release is a draft or not. (boolean)
|
||||
type: bool
|
||||
default: false
|
||||
prerelease:
|
||||
description:
|
||||
- Sets if the release is a prerelease or not. (boolean)
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
author:
|
||||
- "Adrian Moisey (@adrianmoisey)"
|
||||
requirements:
|
||||
- "github3.py >= 1.0.0a3"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Get latest release of a public repository
|
||||
community.general.github_release:
|
||||
user: ansible
|
||||
repo: ansible
|
||||
action: latest_release
|
||||
|
||||
- name: Get latest release of testuseer/testrepo
|
||||
community.general.github_release:
|
||||
token: tokenabc1234567890
|
||||
user: testuser
|
||||
repo: testrepo
|
||||
action: latest_release
|
||||
|
||||
- name: Get latest release of test repo using username and password. Ansible 2.4.
|
||||
community.general.github_release:
|
||||
user: testuser
|
||||
password: secret123
|
||||
repo: testrepo
|
||||
action: latest_release
|
||||
|
||||
- name: Create a new release
|
||||
community.general.github_release:
|
||||
token: tokenabc1234567890
|
||||
user: testuser
|
||||
repo: testrepo
|
||||
action: create_release
|
||||
tag: test
|
||||
target: master
|
||||
name: My Release
|
||||
body: Some description
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
create_release:
|
||||
description:
|
||||
- Version of the created release
|
||||
- "For Ansible version 2.5 and later, if specified release version already exists, then State is unchanged"
|
||||
- "For Ansible versions prior to 2.5, if specified release version already exists, then State is skipped"
|
||||
type: str
|
||||
returned: success
|
||||
sample: 1.1.0
|
||||
|
||||
latest_release:
|
||||
description: Version of the latest release
|
||||
type: str
|
||||
returned: success
|
||||
sample: 1.1.0
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
GITHUB_IMP_ERR = None
|
||||
try:
|
||||
import github3
|
||||
|
||||
HAS_GITHUB_API = True
|
||||
except ImportError:
|
||||
GITHUB_IMP_ERR = traceback.format_exc()
|
||||
HAS_GITHUB_API = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
repo=dict(required=True),
|
||||
user=dict(required=True),
|
||||
password=dict(no_log=True),
|
||||
token=dict(no_log=True),
|
||||
action=dict(
|
||||
required=True, choices=['latest_release', 'create_release']),
|
||||
tag=dict(type='str'),
|
||||
target=dict(type='str'),
|
||||
name=dict(type='str'),
|
||||
body=dict(type='str'),
|
||||
draft=dict(type='bool', default=False),
|
||||
prerelease=dict(type='bool', default=False),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=(('password', 'token'),),
|
||||
required_if=[('action', 'create_release', ['tag']),
|
||||
('action', 'create_release', ['password', 'token'], True)],
|
||||
)
|
||||
|
||||
if not HAS_GITHUB_API:
|
||||
module.fail_json(msg=missing_required_lib('github3.py >= 1.0.0a3'),
|
||||
exception=GITHUB_IMP_ERR)
|
||||
|
||||
repo = module.params['repo']
|
||||
user = module.params['user']
|
||||
password = module.params['password']
|
||||
login_token = module.params['token']
|
||||
action = module.params['action']
|
||||
tag = module.params.get('tag')
|
||||
target = module.params.get('target')
|
||||
name = module.params.get('name')
|
||||
body = module.params.get('body')
|
||||
draft = module.params.get('draft')
|
||||
prerelease = module.params.get('prerelease')
|
||||
|
||||
# login to github
|
||||
try:
|
||||
if password:
|
||||
gh_obj = github3.login(user, password=password)
|
||||
elif login_token:
|
||||
gh_obj = github3.login(token=login_token)
|
||||
else:
|
||||
gh_obj = github3.GitHub()
|
||||
|
||||
# test if we're actually logged in
|
||||
if password or login_token:
|
||||
gh_obj.me()
|
||||
except github3.exceptions.AuthenticationFailed as e:
|
||||
module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e),
|
||||
details="Please check username and password or token "
|
||||
"for repository %s" % repo)
|
||||
|
||||
repository = gh_obj.repository(user, repo)
|
||||
|
||||
if not repository:
|
||||
module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo))
|
||||
|
||||
if action == 'latest_release':
|
||||
release = repository.latest_release()
|
||||
if release:
|
||||
module.exit_json(tag=release.tag_name)
|
||||
else:
|
||||
module.exit_json(tag=None)
|
||||
|
||||
if action == 'create_release':
|
||||
release_exists = repository.release_from_tag(tag)
|
||||
if release_exists:
|
||||
module.exit_json(changed=False, msg="Release for tag %s already exists." % tag)
|
||||
|
||||
release = repository.create_release(
|
||||
tag, target, name, body, draft, prerelease)
|
||||
if release:
|
||||
module.exit_json(changed=True, tag=release.tag_name)
|
||||
else:
|
||||
module.exit_json(changed=False, tag=None)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,273 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2021, Álvaro Torres Cogollo
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: github_repo
|
||||
short_description: Manage your repositories on Github
|
||||
version_added: 2.2.0
|
||||
description:
|
||||
- Manages Github repositories using PyGithub library.
|
||||
- Authentication can be done with I(access_token) or with I(username) and I(password).
|
||||
options:
|
||||
username:
|
||||
description:
|
||||
- Username used for authentication.
|
||||
- This is only needed when not using I(access_token).
|
||||
type: str
|
||||
required: false
|
||||
password:
|
||||
description:
|
||||
- Password used for authentication.
|
||||
- This is only needed when not using I(access_token).
|
||||
type: str
|
||||
required: false
|
||||
access_token:
|
||||
description:
|
||||
- Token parameter for authentication.
|
||||
- This is only needed when not using I(username) and I(password).
|
||||
type: str
|
||||
required: false
|
||||
name:
|
||||
description:
|
||||
- Repository name.
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
description:
|
||||
- Description for the repository.
|
||||
- Defaults to empty if I(force_defaults=true), which is the default in this module.
|
||||
- Defaults to empty if I(force_defaults=false) when creating a new repository.
|
||||
- This is only used when I(state) is C(present).
|
||||
type: str
|
||||
required: false
|
||||
private:
|
||||
description:
|
||||
- Whether the repository should be private or not.
|
||||
- Defaults to C(false) if I(force_defaults=true), which is the default in this module.
|
||||
- Defaults to C(false) if I(force_defaults=false) when creating a new repository.
|
||||
- This is only used when I(state) is C(present).
|
||||
type: bool
|
||||
required: false
|
||||
state:
|
||||
description:
|
||||
- Whether the repository should exist or not.
|
||||
type: str
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
required: false
|
||||
organization:
|
||||
description:
|
||||
- Organization for the repository.
|
||||
- When I(state) is C(present), the repository will be created in the current user profile.
|
||||
type: str
|
||||
required: false
|
||||
api_url:
|
||||
description:
|
||||
- URL to the GitHub API if not using github.com but you own instance.
|
||||
type: str
|
||||
default: 'https://api.github.com'
|
||||
version_added: "3.5.0"
|
||||
force_defaults:
|
||||
description:
|
||||
- Overwrite current I(description) and I(private) attributes with defaults if set to C(true), which currently is the default.
|
||||
- The default for this option will be deprecated in a future version of this collection, and eventually change to C(false).
|
||||
type: bool
|
||||
default: true
|
||||
required: false
|
||||
version_added: 4.1.0
|
||||
requirements:
|
||||
- PyGithub>=1.54
|
||||
notes:
|
||||
- For Python 3, PyGithub>=1.54 should be used.
|
||||
- "For Python 3.5, PyGithub==1.54 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-54-november-30-2020)."
|
||||
- "For Python 2.7, PyGithub==1.45 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-45-december-29-2019)."
|
||||
- Supports C(check_mode).
|
||||
author:
|
||||
- Álvaro Torres Cogollo (@atorrescogollo)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a Github repository
|
||||
community.general.github_repo:
|
||||
access_token: mytoken
|
||||
organization: MyOrganization
|
||||
name: myrepo
|
||||
description: "Just for fun"
|
||||
private: true
|
||||
state: present
|
||||
force_defaults: false
|
||||
register: result
|
||||
|
||||
- name: Delete the repository
|
||||
community.general.github_repo:
|
||||
username: octocat
|
||||
password: password
|
||||
organization: MyOrganization
|
||||
name: myrepo
|
||||
state: absent
|
||||
register: result
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
repo:
|
||||
description: Repository information as JSON. See U(https://docs.github.com/en/rest/reference/repos#get-a-repository).
|
||||
returned: success and I(state) is C(present)
|
||||
type: dict
|
||||
'''
|
||||
|
||||
import traceback
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
GITHUB_IMP_ERR = None
|
||||
try:
|
||||
from github import Github, GithubException, GithubObject
|
||||
from github.GithubException import UnknownObjectException
|
||||
HAS_GITHUB_PACKAGE = True
|
||||
except Exception:
|
||||
GITHUB_IMP_ERR = traceback.format_exc()
|
||||
HAS_GITHUB_PACKAGE = False
|
||||
|
||||
|
||||
def authenticate(username=None, password=None, access_token=None, api_url=None):
|
||||
if not api_url:
|
||||
return None
|
||||
|
||||
if access_token:
|
||||
return Github(base_url=api_url, login_or_token=access_token)
|
||||
else:
|
||||
return Github(base_url=api_url, login_or_token=username, password=password)
|
||||
|
||||
|
||||
def create_repo(gh, name, organization=None, private=None, description=None, check_mode=False):
|
||||
result = dict(
|
||||
changed=False,
|
||||
repo=dict())
|
||||
if organization:
|
||||
target = gh.get_organization(organization)
|
||||
else:
|
||||
target = gh.get_user()
|
||||
|
||||
repo = None
|
||||
try:
|
||||
repo = target.get_repo(name=name)
|
||||
result['repo'] = repo.raw_data
|
||||
except UnknownObjectException:
|
||||
if not check_mode:
|
||||
repo = target.create_repo(
|
||||
name=name,
|
||||
private=GithubObject.NotSet if private is None else private,
|
||||
description=GithubObject.NotSet if description is None else description,
|
||||
)
|
||||
result['repo'] = repo.raw_data
|
||||
|
||||
result['changed'] = True
|
||||
|
||||
changes = {}
|
||||
if private is not None:
|
||||
if repo is None or repo.raw_data['private'] != private:
|
||||
changes['private'] = private
|
||||
if description is not None:
|
||||
if repo is None or repo.raw_data['description'] not in (description, description or None):
|
||||
changes['description'] = description
|
||||
|
||||
if changes:
|
||||
if not check_mode:
|
||||
repo.edit(**changes)
|
||||
|
||||
result['repo'].update({
|
||||
'private': repo._private.value if not check_mode else private,
|
||||
'description': repo._description.value if not check_mode else description,
|
||||
})
|
||||
result['changed'] = True
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def delete_repo(gh, name, organization=None, check_mode=False):
|
||||
result = dict(changed=False)
|
||||
if organization:
|
||||
target = gh.get_organization(organization)
|
||||
else:
|
||||
target = gh.get_user()
|
||||
try:
|
||||
repo = target.get_repo(name=name)
|
||||
if not check_mode:
|
||||
repo.delete()
|
||||
result['changed'] = True
|
||||
except UnknownObjectException:
|
||||
pass
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def run_module(params, check_mode=False):
|
||||
if params['force_defaults']:
|
||||
params['description'] = params['description'] or ''
|
||||
params['private'] = params['private'] or False
|
||||
|
||||
gh = authenticate(
|
||||
username=params['username'], password=params['password'], access_token=params['access_token'],
|
||||
api_url=params['api_url'])
|
||||
if params['state'] == "absent":
|
||||
return delete_repo(
|
||||
gh=gh,
|
||||
name=params['name'],
|
||||
organization=params['organization'],
|
||||
check_mode=check_mode
|
||||
)
|
||||
else:
|
||||
return create_repo(
|
||||
gh=gh,
|
||||
name=params['name'],
|
||||
organization=params['organization'],
|
||||
private=params['private'],
|
||||
description=params['description'],
|
||||
check_mode=check_mode
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
module_args = dict(
|
||||
username=dict(type='str'),
|
||||
password=dict(type='str', no_log=True),
|
||||
access_token=dict(type='str', no_log=True),
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', required=False, default="present",
|
||||
choices=["present", "absent"]),
|
||||
organization=dict(type='str', required=False, default=None),
|
||||
private=dict(type='bool'),
|
||||
description=dict(type='str'),
|
||||
api_url=dict(type='str', required=False, default='https://api.github.com'),
|
||||
force_defaults=dict(type='bool', default=True),
|
||||
)
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True,
|
||||
required_together=[('username', 'password')],
|
||||
required_one_of=[('username', 'access_token')],
|
||||
mutually_exclusive=[('username', 'access_token')]
|
||||
)
|
||||
|
||||
if not HAS_GITHUB_PACKAGE:
|
||||
module.fail_json(msg=missing_required_lib(
|
||||
"PyGithub"), exception=GITHUB_IMP_ERR)
|
||||
|
||||
try:
|
||||
result = run_module(module.params, module.check_mode)
|
||||
module.exit_json(**result)
|
||||
except GithubException as e:
|
||||
module.fail_json(msg="Github error. {0}".format(repr(e)))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Unexpected error. {0}".format(repr(e)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,291 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2018, Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: github_webhook
|
||||
short_description: Manage GitHub webhooks
|
||||
description:
|
||||
- "Create and delete GitHub webhooks"
|
||||
requirements:
|
||||
- "PyGithub >= 1.3.5"
|
||||
options:
|
||||
repository:
|
||||
description:
|
||||
- Full name of the repository to configure a hook for
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- repo
|
||||
url:
|
||||
description:
|
||||
- URL to which payloads will be delivered
|
||||
type: str
|
||||
required: true
|
||||
content_type:
|
||||
description:
|
||||
- The media type used to serialize the payloads
|
||||
type: str
|
||||
required: false
|
||||
choices: [ form, json ]
|
||||
default: form
|
||||
secret:
|
||||
description:
|
||||
- The shared secret between GitHub and the payload URL.
|
||||
type: str
|
||||
required: false
|
||||
insecure_ssl:
|
||||
description:
|
||||
- >
|
||||
Flag to indicate that GitHub should skip SSL verification when calling
|
||||
the hook.
|
||||
required: false
|
||||
type: bool
|
||||
default: false
|
||||
events:
|
||||
description:
|
||||
- >
|
||||
A list of GitHub events the hook is triggered for. Events are listed at
|
||||
U(https://developer.github.com/v3/activity/events/types/). Required
|
||||
unless C(state) is C(absent)
|
||||
required: false
|
||||
type: list
|
||||
elements: str
|
||||
active:
|
||||
description:
|
||||
- Whether or not the hook is active
|
||||
required: false
|
||||
type: bool
|
||||
default: true
|
||||
state:
|
||||
description:
|
||||
- Whether the hook should be present or absent
|
||||
type: str
|
||||
required: false
|
||||
choices: [ absent, present ]
|
||||
default: present
|
||||
user:
|
||||
description:
|
||||
- User to authenticate to GitHub as
|
||||
type: str
|
||||
required: true
|
||||
password:
|
||||
description:
|
||||
- Password to authenticate to GitHub with
|
||||
type: str
|
||||
required: false
|
||||
token:
|
||||
description:
|
||||
- Token to authenticate to GitHub with
|
||||
type: str
|
||||
required: false
|
||||
github_url:
|
||||
description:
|
||||
- Base URL of the GitHub API
|
||||
type: str
|
||||
required: false
|
||||
default: https://api.github.com
|
||||
|
||||
author:
|
||||
- "Chris St. Pierre (@stpierre)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a new webhook that triggers on push (password auth)
|
||||
community.general.github_webhook:
|
||||
repository: ansible/ansible
|
||||
url: https://www.example.com/hooks/
|
||||
events:
|
||||
- push
|
||||
user: "{{ github_user }}"
|
||||
password: "{{ github_password }}"
|
||||
|
||||
- name: Create a new webhook in a github enterprise installation with multiple event triggers (token auth)
|
||||
community.general.github_webhook:
|
||||
repository: myorg/myrepo
|
||||
url: https://jenkins.example.com/ghprbhook/
|
||||
content_type: json
|
||||
secret: "{{ github_shared_secret }}"
|
||||
insecure_ssl: true
|
||||
events:
|
||||
- issue_comment
|
||||
- pull_request
|
||||
user: "{{ github_user }}"
|
||||
token: "{{ github_user_api_token }}"
|
||||
github_url: https://github.example.com
|
||||
|
||||
- name: Delete a webhook (password auth)
|
||||
community.general.github_webhook:
|
||||
repository: ansible/ansible
|
||||
url: https://www.example.com/hooks/
|
||||
state: absent
|
||||
user: "{{ github_user }}"
|
||||
password: "{{ github_password }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
hook_id:
|
||||
description: The GitHub ID of the hook created/updated
|
||||
returned: when state is 'present'
|
||||
type: int
|
||||
sample: 6206
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
GITHUB_IMP_ERR = None
|
||||
try:
|
||||
import github
|
||||
HAS_GITHUB = True
|
||||
except ImportError:
|
||||
GITHUB_IMP_ERR = traceback.format_exc()
|
||||
HAS_GITHUB = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def _create_hook_config(module):
|
||||
return {
|
||||
"url": module.params["url"],
|
||||
"content_type": module.params["content_type"],
|
||||
"secret": module.params.get("secret"),
|
||||
"insecure_ssl": "1" if module.params["insecure_ssl"] else "0"
|
||||
}
|
||||
|
||||
|
||||
def create_hook(repo, module):
|
||||
config = _create_hook_config(module)
|
||||
try:
|
||||
hook = repo.create_hook(
|
||||
name="web",
|
||||
config=config,
|
||||
events=module.params["events"],
|
||||
active=module.params["active"])
|
||||
except github.GithubException as err:
|
||||
module.fail_json(msg="Unable to create hook for repository %s: %s" % (
|
||||
repo.full_name, to_native(err)))
|
||||
|
||||
data = {"hook_id": hook.id}
|
||||
return True, data
|
||||
|
||||
|
||||
def update_hook(repo, hook, module):
|
||||
config = _create_hook_config(module)
|
||||
try:
|
||||
hook.update()
|
||||
hook.edit(
|
||||
name="web",
|
||||
config=config,
|
||||
events=module.params["events"],
|
||||
active=module.params["active"])
|
||||
|
||||
changed = hook.update()
|
||||
except github.GithubException as err:
|
||||
module.fail_json(msg="Unable to modify hook for repository %s: %s" % (
|
||||
repo.full_name, to_native(err)))
|
||||
|
||||
data = {"hook_id": hook.id}
|
||||
return changed, data
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
repository=dict(type='str', required=True, aliases=['repo']),
|
||||
url=dict(type='str', required=True),
|
||||
content_type=dict(
|
||||
type='str',
|
||||
choices=('json', 'form'),
|
||||
required=False,
|
||||
default='form'),
|
||||
secret=dict(type='str', required=False, no_log=True),
|
||||
insecure_ssl=dict(type='bool', required=False, default=False),
|
||||
events=dict(type='list', elements='str', required=False),
|
||||
active=dict(type='bool', required=False, default=True),
|
||||
state=dict(
|
||||
type='str',
|
||||
required=False,
|
||||
choices=('absent', 'present'),
|
||||
default='present'),
|
||||
user=dict(type='str', required=True),
|
||||
password=dict(type='str', required=False, no_log=True),
|
||||
token=dict(type='str', required=False, no_log=True),
|
||||
github_url=dict(
|
||||
type='str', required=False, default="https://api.github.com")),
|
||||
mutually_exclusive=(('password', 'token'),),
|
||||
required_one_of=(("password", "token"),),
|
||||
required_if=(("state", "present", ("events",)),),
|
||||
)
|
||||
|
||||
if not HAS_GITHUB:
|
||||
module.fail_json(msg=missing_required_lib('PyGithub'),
|
||||
exception=GITHUB_IMP_ERR)
|
||||
|
||||
try:
|
||||
github_conn = github.Github(
|
||||
module.params["user"],
|
||||
module.params.get("password") or module.params.get("token"),
|
||||
base_url=module.params["github_url"])
|
||||
except github.GithubException as err:
|
||||
module.fail_json(msg="Could not connect to GitHub at %s: %s" % (
|
||||
module.params["github_url"], to_native(err)))
|
||||
|
||||
try:
|
||||
repo = github_conn.get_repo(module.params["repository"])
|
||||
except github.BadCredentialsException as err:
|
||||
module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % (
|
||||
module.params["github_url"], to_native(err)))
|
||||
except github.UnknownObjectException as err:
|
||||
module.fail_json(
|
||||
msg="Could not find repository %s in GitHub at %s: %s" % (
|
||||
module.params["repository"], module.params["github_url"],
|
||||
to_native(err)))
|
||||
except Exception as err:
|
||||
module.fail_json(
|
||||
msg="Could not fetch repository %s from GitHub at %s: %s" %
|
||||
(module.params["repository"], module.params["github_url"],
|
||||
to_native(err)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
hook = None
|
||||
try:
|
||||
for hook in repo.get_hooks():
|
||||
if hook.config.get("url") == module.params["url"]:
|
||||
break
|
||||
else:
|
||||
hook = None
|
||||
except github.GithubException as err:
|
||||
module.fail_json(msg="Unable to get hooks from repository %s: %s" % (
|
||||
module.params["repository"], to_native(err)))
|
||||
|
||||
changed = False
|
||||
data = {}
|
||||
if hook is None and module.params["state"] == "present":
|
||||
changed, data = create_hook(repo, module)
|
||||
elif hook is not None and module.params["state"] == "absent":
|
||||
try:
|
||||
hook.delete()
|
||||
except github.GithubException as err:
|
||||
module.fail_json(
|
||||
msg="Unable to delete hook from repository %s: %s" % (
|
||||
repo.full_name, to_native(err)))
|
||||
else:
|
||||
changed = True
|
||||
elif hook is not None and module.params["state"] == "present":
|
||||
changed, data = update_hook(repo, hook, module)
|
||||
# else, there is no hook and we want there to be no hook
|
||||
|
||||
module.exit_json(changed=changed, **data)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,179 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2018, Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: github_webhook_info
|
||||
short_description: Query information about GitHub webhooks
|
||||
description:
|
||||
- "Query information about GitHub webhooks"
|
||||
- This module was called C(github_webhook_facts) before Ansible 2.9. The usage did not change.
|
||||
requirements:
|
||||
- "PyGithub >= 1.3.5"
|
||||
extends_documentation_fragment:
|
||||
- community.general.attributes
|
||||
- community.general.attributes.info_module
|
||||
options:
|
||||
repository:
|
||||
description:
|
||||
- Full name of the repository to configure a hook for
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- repo
|
||||
user:
|
||||
description:
|
||||
- User to authenticate to GitHub as
|
||||
type: str
|
||||
required: true
|
||||
password:
|
||||
description:
|
||||
- Password to authenticate to GitHub with
|
||||
type: str
|
||||
required: false
|
||||
token:
|
||||
description:
|
||||
- Token to authenticate to GitHub with
|
||||
type: str
|
||||
required: false
|
||||
github_url:
|
||||
description:
|
||||
- Base URL of the github api
|
||||
type: str
|
||||
required: false
|
||||
default: https://api.github.com
|
||||
|
||||
author:
|
||||
- "Chris St. Pierre (@stpierre)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: List hooks for a repository (password auth)
|
||||
community.general.github_webhook_info:
|
||||
repository: ansible/ansible
|
||||
user: "{{ github_user }}"
|
||||
password: "{{ github_password }}"
|
||||
register: ansible_webhooks
|
||||
|
||||
- name: List hooks for a repository on GitHub Enterprise (token auth)
|
||||
community.general.github_webhook_info:
|
||||
repository: myorg/myrepo
|
||||
user: "{{ github_user }}"
|
||||
token: "{{ github_user_api_token }}"
|
||||
github_url: https://github.example.com/api/v3/
|
||||
register: myrepo_webhooks
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
hooks:
|
||||
description: A list of hooks that exist for the repo
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
sample:
|
||||
- {
|
||||
"has_shared_secret": true,
|
||||
"url": "https://jenkins.example.com/ghprbhook/",
|
||||
"events": ["issue_comment", "pull_request"],
|
||||
"insecure_ssl": "1",
|
||||
"content_type": "json",
|
||||
"active": true,
|
||||
"id": 6206,
|
||||
"last_response": {"status": "active", "message": "OK", "code": 200}
|
||||
}
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
GITHUB_IMP_ERR = None
|
||||
try:
|
||||
import github
|
||||
HAS_GITHUB = True
|
||||
except ImportError:
|
||||
GITHUB_IMP_ERR = traceback.format_exc()
|
||||
HAS_GITHUB = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
|
||||
def _munge_hook(hook_obj):
|
||||
retval = {
|
||||
"active": hook_obj.active,
|
||||
"events": hook_obj.events,
|
||||
"id": hook_obj.id,
|
||||
"url": hook_obj.url,
|
||||
}
|
||||
retval.update(hook_obj.config)
|
||||
retval["has_shared_secret"] = "secret" in retval
|
||||
if "secret" in retval:
|
||||
del retval["secret"]
|
||||
|
||||
retval["last_response"] = hook_obj.last_response.raw_data
|
||||
return retval
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
repository=dict(type='str', required=True, aliases=["repo"]),
|
||||
user=dict(type='str', required=True),
|
||||
password=dict(type='str', required=False, no_log=True),
|
||||
token=dict(type='str', required=False, no_log=True),
|
||||
github_url=dict(
|
||||
type='str', required=False, default="https://api.github.com")),
|
||||
mutually_exclusive=(('password', 'token'), ),
|
||||
required_one_of=(("password", "token"), ),
|
||||
supports_check_mode=True)
|
||||
|
||||
if not HAS_GITHUB:
|
||||
module.fail_json(msg=missing_required_lib('PyGithub'),
|
||||
exception=GITHUB_IMP_ERR)
|
||||
|
||||
try:
|
||||
github_conn = github.Github(
|
||||
module.params["user"],
|
||||
module.params.get("password") or module.params.get("token"),
|
||||
base_url=module.params["github_url"])
|
||||
except github.GithubException as err:
|
||||
module.fail_json(msg="Could not connect to GitHub at %s: %s" % (
|
||||
module.params["github_url"], to_native(err)))
|
||||
|
||||
try:
|
||||
repo = github_conn.get_repo(module.params["repository"])
|
||||
except github.BadCredentialsException as err:
|
||||
module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % (
|
||||
module.params["github_url"], to_native(err)))
|
||||
except github.UnknownObjectException as err:
|
||||
module.fail_json(
|
||||
msg="Could not find repository %s in GitHub at %s: %s" % (
|
||||
module.params["repository"], module.params["github_url"],
|
||||
to_native(err)))
|
||||
except Exception as err:
|
||||
module.fail_json(
|
||||
msg="Could not fetch repository %s from GitHub at %s: %s" %
|
||||
(module.params["repository"], module.params["github_url"],
|
||||
to_native(err)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
try:
|
||||
hooks = [_munge_hook(h) for h in repo.get_hooks()]
|
||||
except github.GithubException as err:
|
||||
module.fail_json(
|
||||
msg="Unable to get hooks from repository %s: %s" %
|
||||
(module.params["repository"], to_native(err)),
|
||||
exception=traceback.format_exc())
|
||||
|
||||
module.exit_json(changed=False, hooks=hooks)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,176 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl)
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: gitlab_branch
|
||||
short_description: Create or delete a branch
|
||||
version_added: 4.2.0
|
||||
description:
|
||||
- This module allows to create or delete branches.
|
||||
author:
|
||||
- paytroff (@paytroff)
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- python-gitlab >= 2.3.0
|
||||
extends_documentation_fragment:
|
||||
- community.general.auth_basic
|
||||
- community.general.gitlab
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Create or delete branch.
|
||||
default: present
|
||||
type: str
|
||||
choices: ["present", "absent"]
|
||||
project:
|
||||
description:
|
||||
- The path or name of the project.
|
||||
required: true
|
||||
type: str
|
||||
branch:
|
||||
description:
|
||||
- The name of the branch that needs to be created.
|
||||
required: true
|
||||
type: str
|
||||
ref_branch:
|
||||
description:
|
||||
- Reference branch to create from.
|
||||
- This must be specified if I(state=present).
|
||||
type: str
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create branch branch2 from main
|
||||
community.general.gitlab_branch:
|
||||
api_url: https://gitlab.com
|
||||
api_token: secret_access_token
|
||||
project: "group1/project1"
|
||||
branch: branch2
|
||||
ref_branch: main
|
||||
state: present
|
||||
|
||||
- name: Delete branch branch2
|
||||
community.general.gitlab_branch:
|
||||
api_url: https://gitlab.com
|
||||
api_token: secret_access_token
|
||||
project: "group1/project1"
|
||||
branch: branch2
|
||||
state: absent
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
'''
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
|
||||
from ansible_collections.community.general.plugins.module_utils.gitlab import (
|
||||
auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package
|
||||
)
|
||||
|
||||
|
||||
class GitlabBranch(object):
|
||||
|
||||
def __init__(self, module, project, gitlab_instance):
|
||||
self.repo = gitlab_instance
|
||||
self._module = module
|
||||
self.project = self.get_project(project)
|
||||
|
||||
def get_project(self, project):
|
||||
try:
|
||||
return self.repo.projects.get(project)
|
||||
except Exception as e:
|
||||
return False
|
||||
|
||||
def get_branch(self, branch):
|
||||
try:
|
||||
return self.project.branches.get(branch)
|
||||
except Exception as e:
|
||||
return False
|
||||
|
||||
def create_branch(self, branch, ref_branch):
|
||||
return self.project.branches.create({'branch': branch, 'ref': ref_branch})
|
||||
|
||||
def delete_branch(self, branch):
|
||||
return branch.delete()
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(auth_argument_spec())
|
||||
argument_spec.update(
|
||||
project=dict(type='str', required=True),
|
||||
branch=dict(type='str', required=True),
|
||||
ref_branch=dict(type='str', required=False),
|
||||
state=dict(type='str', default="present", choices=["absent", "present"]),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
mutually_exclusive=[
|
||||
['api_username', 'api_token'],
|
||||
['api_username', 'api_oauth_token'],
|
||||
['api_username', 'api_job_token'],
|
||||
['api_token', 'api_oauth_token'],
|
||||
['api_token', 'api_job_token'],
|
||||
],
|
||||
required_together=[
|
||||
['api_username', 'api_password'],
|
||||
],
|
||||
required_one_of=[
|
||||
['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
|
||||
],
|
||||
required_if=[
|
||||
['state', 'present', ['ref_branch'], True],
|
||||
],
|
||||
supports_check_mode=False
|
||||
)
|
||||
ensure_gitlab_package(module)
|
||||
|
||||
project = module.params['project']
|
||||
branch = module.params['branch']
|
||||
ref_branch = module.params['ref_branch']
|
||||
state = module.params['state']
|
||||
|
||||
gitlab_version = gitlab.__version__
|
||||
if LooseVersion(gitlab_version) < LooseVersion('2.3.0'):
|
||||
module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])."
|
||||
" Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version)
|
||||
|
||||
gitlab_instance = gitlab_authentication(module)
|
||||
this_gitlab = GitlabBranch(module=module, project=project, gitlab_instance=gitlab_instance)
|
||||
|
||||
this_branch = this_gitlab.get_branch(branch)
|
||||
|
||||
if not this_branch and state == "present":
|
||||
r_branch = this_gitlab.get_branch(ref_branch)
|
||||
if not r_branch:
|
||||
module.fail_json(msg="Ref branch {b} not exist.".format(b=ref_branch))
|
||||
this_gitlab.create_branch(branch, ref_branch)
|
||||
module.exit_json(changed=True, msg="Created the branch {b}.".format(b=branch))
|
||||
elif this_branch and state == "present":
|
||||
module.exit_json(changed=False, msg="Branch {b} already exist".format(b=branch))
|
||||
elif this_branch and state == "absent":
|
||||
try:
|
||||
this_gitlab.delete_branch(this_branch)
|
||||
module.exit_json(changed=True, msg="Branch {b} deleted.".format(b=branch))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Error delete branch.", exception=traceback.format_exc())
|
||||
else:
|
||||
module.exit_json(changed=False, msg="No changes are needed.")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,293 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
|
||||
# Copyright (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
|
||||
# Based on code:
|
||||
# Copyright (c) 2013, Phillip Gentry <phillip@cx.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = '''
|
||||
module: gitlab_deploy_key
|
||||
short_description: Manages GitLab project deploy keys
|
||||
description:
|
||||
- Adds, updates and removes project deploy keys
|
||||
author:
|
||||
- Marcus Watkins (@marwatk)
|
||||
- Guillaume Martinez (@Lunik)
|
||||
requirements:
|
||||
- python >= 2.7
|
||||
- python-gitlab python module
|
||||
extends_documentation_fragment:
|
||||
- community.general.auth_basic
|
||||
- community.general.gitlab
|
||||
|
||||
options:
|
||||
project:
|
||||
description:
|
||||
- Id or Full path of project in the form of group/name.
|
||||
required: true
|
||||
type: str
|
||||
title:
|
||||
description:
|
||||
- Deploy key's title.
|
||||
required: true
|
||||
type: str
|
||||
key:
|
||||
description:
|
||||
- Deploy key
|
||||
required: true
|
||||
type: str
|
||||
can_push:
|
||||
description:
|
||||
- Whether this key can push to the project.
|
||||
type: bool
|
||||
default: false
|
||||
state:
|
||||
description:
|
||||
- When C(present) the deploy key added to the project if it doesn't exist.
|
||||
- When C(absent) it will be removed from the project if it exists.
|
||||
default: present
|
||||
type: str
|
||||
choices: [ "present", "absent" ]
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: "Adding a project deploy key"
|
||||
community.general.gitlab_deploy_key:
|
||||
api_url: https://gitlab.example.com/
|
||||
api_token: "{{ api_token }}"
|
||||
project: "my_group/my_project"
|
||||
title: "Jenkins CI"
|
||||
state: present
|
||||
key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..."
|
||||
|
||||
- name: "Update the above deploy key to add push access"
|
||||
community.general.gitlab_deploy_key:
|
||||
api_url: https://gitlab.example.com/
|
||||
api_token: "{{ api_token }}"
|
||||
project: "my_group/my_project"
|
||||
title: "Jenkins CI"
|
||||
state: present
|
||||
can_push: true
|
||||
|
||||
- name: "Remove the previous deploy key from the project"
|
||||
community.general.gitlab_deploy_key:
|
||||
api_url: https://gitlab.example.com/
|
||||
api_token: "{{ api_token }}"
|
||||
project: "my_group/my_project"
|
||||
state: absent
|
||||
key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..."
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Success or failure message
|
||||
returned: always
|
||||
type: str
|
||||
sample: "Success"
|
||||
|
||||
result:
|
||||
description: json parsed response from the server
|
||||
returned: always
|
||||
type: dict
|
||||
|
||||
error:
|
||||
description: the error message returned by the GitLab API
|
||||
returned: failed
|
||||
type: str
|
||||
sample: "400: key is already in use"
|
||||
|
||||
deploy_key:
|
||||
description: API object
|
||||
returned: always
|
||||
type: dict
|
||||
'''
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_native
|
||||
|
||||
from ansible_collections.community.general.plugins.module_utils.gitlab import (
|
||||
auth_argument_spec, find_project, gitlab_authentication, gitlab, ensure_gitlab_package
|
||||
)
|
||||
|
||||
|
||||
class GitLabDeployKey(object):
|
||||
def __init__(self, module, gitlab_instance):
|
||||
self._module = module
|
||||
self._gitlab = gitlab_instance
|
||||
self.deploy_key_object = None
|
||||
|
||||
'''
|
||||
@param project Project object
|
||||
@param key_title Title of the key
|
||||
@param key_key String of the key
|
||||
@param key_can_push Option of the deploy_key
|
||||
@param options Deploy key options
|
||||
'''
|
||||
def create_or_update_deploy_key(self, project, key_title, key_key, options):
|
||||
changed = False
|
||||
|
||||
# note: unfortunately public key cannot be updated directly by
|
||||
# GitLab REST API, so for that case we need to delete and
|
||||
# than recreate the key
|
||||
if self.deploy_key_object and self.deploy_key_object.key != key_key:
|
||||
if not self._module.check_mode:
|
||||
self.deploy_key_object.delete()
|
||||
self.deploy_key_object = None
|
||||
|
||||
# Because we have already call exists_deploy_key in main()
|
||||
if self.deploy_key_object is None:
|
||||
deploy_key = self.create_deploy_key(project, {
|
||||
'title': key_title,
|
||||
'key': key_key,
|
||||
'can_push': options['can_push']})
|
||||
changed = True
|
||||
else:
|
||||
changed, deploy_key = self.update_deploy_key(self.deploy_key_object, {
|
||||
'can_push': options['can_push']})
|
||||
|
||||
self.deploy_key_object = deploy_key
|
||||
if changed:
|
||||
if self._module.check_mode:
|
||||
self._module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title)
|
||||
|
||||
try:
|
||||
deploy_key.save()
|
||||
except Exception as e:
|
||||
self._module.fail_json(msg="Failed to update deploy key: %s " % e)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
'''
|
||||
@param project Project Object
|
||||
@param arguments Attributes of the deploy_key
|
||||
'''
|
||||
def create_deploy_key(self, project, arguments):
|
||||
if self._module.check_mode:
|
||||
return True
|
||||
|
||||
try:
|
||||
deploy_key = project.keys.create(arguments)
|
||||
except (gitlab.exceptions.GitlabCreateError) as e:
|
||||
self._module.fail_json(msg="Failed to create deploy key: %s " % to_native(e))
|
||||
|
||||
return deploy_key
|
||||
|
||||
'''
|
||||
@param deploy_key Deploy Key Object
|
||||
@param arguments Attributes of the deploy_key
|
||||
'''
|
||||
def update_deploy_key(self, deploy_key, arguments):
|
||||
changed = False
|
||||
|
||||
for arg_key, arg_value in arguments.items():
|
||||
if arguments[arg_key] is not None:
|
||||
if getattr(deploy_key, arg_key) != arguments[arg_key]:
|
||||
setattr(deploy_key, arg_key, arguments[arg_key])
|
||||
changed = True
|
||||
|
||||
return (changed, deploy_key)
|
||||
|
||||
'''
|
||||
@param project Project object
|
||||
@param key_title Title of the key
|
||||
'''
|
||||
def find_deploy_key(self, project, key_title):
|
||||
deploy_keys = project.keys.list(all=True)
|
||||
for deploy_key in deploy_keys:
|
||||
if (deploy_key.title == key_title):
|
||||
return deploy_key
|
||||
|
||||
'''
|
||||
@param project Project object
|
||||
@param key_title Title of the key
|
||||
'''
|
||||
def exists_deploy_key(self, project, key_title):
|
||||
# When project exists, object will be stored in self.project_object.
|
||||
deploy_key = self.find_deploy_key(project, key_title)
|
||||
if deploy_key:
|
||||
self.deploy_key_object = deploy_key
|
||||
return True
|
||||
return False
|
||||
|
||||
def delete_deploy_key(self):
|
||||
if self._module.check_mode:
|
||||
return True
|
||||
|
||||
return self.deploy_key_object.delete()
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(auth_argument_spec())
|
||||
argument_spec.update(dict(
|
||||
state=dict(type='str', default="present", choices=["absent", "present"]),
|
||||
project=dict(type='str', required=True),
|
||||
key=dict(type='str', required=True, no_log=False),
|
||||
can_push=dict(type='bool', default=False),
|
||||
title=dict(type='str', required=True)
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
mutually_exclusive=[
|
||||
['api_username', 'api_token'],
|
||||
['api_username', 'api_oauth_token'],
|
||||
['api_username', 'api_job_token'],
|
||||
['api_token', 'api_oauth_token'],
|
||||
['api_token', 'api_job_token'],
|
||||
],
|
||||
required_together=[
|
||||
['api_username', 'api_password']
|
||||
],
|
||||
required_one_of=[
|
||||
['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
|
||||
],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
ensure_gitlab_package(module)
|
||||
|
||||
state = module.params['state']
|
||||
project_identifier = module.params['project']
|
||||
key_title = module.params['title']
|
||||
key_keyfile = module.params['key']
|
||||
key_can_push = module.params['can_push']
|
||||
|
||||
gitlab_instance = gitlab_authentication(module)
|
||||
|
||||
gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance)
|
||||
|
||||
project = find_project(gitlab_instance, project_identifier)
|
||||
|
||||
if project is None:
|
||||
module.fail_json(msg="Failed to create deploy key: project %s doesn't exists" % project_identifier)
|
||||
|
||||
deploy_key_exists = gitlab_deploy_key.exists_deploy_key(project, key_title)
|
||||
|
||||
if state == 'absent':
|
||||
if deploy_key_exists:
|
||||
gitlab_deploy_key.delete_deploy_key()
|
||||
module.exit_json(changed=True, msg="Successfully deleted deploy key %s" % key_title)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="Deploy key deleted or does not exists")
|
||||
|
||||
if state == 'present':
|
||||
if gitlab_deploy_key.create_or_update_deploy_key(project, key_title, key_keyfile, {'can_push': key_can_push}):
|
||||
|
||||
module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title,
|
||||
deploy_key=gitlab_deploy_key.deploy_key_object._attrs)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="No need to update the deploy key %s" % key_title,
|
||||
deploy_key=gitlab_deploy_key.deploy_key_object._attrs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user